linux/drivers/net/ethernet/broadcom/genet/bcmgenet.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Broadcom GENET (Gigabit Ethernet) controller driver
   4 *
   5 * Copyright (c) 2014-2020 Broadcom
   6 */
   7
   8#define pr_fmt(fmt)                             "bcmgenet: " fmt
   9
  10#include <linux/acpi.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/sched.h>
  14#include <linux/types.h>
  15#include <linux/fcntl.h>
  16#include <linux/interrupt.h>
  17#include <linux/string.h>
  18#include <linux/if_ether.h>
  19#include <linux/init.h>
  20#include <linux/errno.h>
  21#include <linux/delay.h>
  22#include <linux/platform_device.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/pm.h>
  25#include <linux/clk.h>
  26#include <net/arp.h>
  27
  28#include <linux/mii.h>
  29#include <linux/ethtool.h>
  30#include <linux/netdevice.h>
  31#include <linux/inetdevice.h>
  32#include <linux/etherdevice.h>
  33#include <linux/skbuff.h>
  34#include <linux/in.h>
  35#include <linux/ip.h>
  36#include <linux/ipv6.h>
  37#include <linux/phy.h>
  38#include <linux/platform_data/bcmgenet.h>
  39
  40#include <asm/unaligned.h>
  41
  42#include "bcmgenet.h"
  43
  44/* Maximum number of hardware queues, downsized if needed */
  45#define GENET_MAX_MQ_CNT        4
  46
  47/* Default highest priority queue for multi queue support */
  48#define GENET_Q0_PRIORITY       0
  49
  50#define GENET_Q16_RX_BD_CNT     \
  51        (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
  52#define GENET_Q16_TX_BD_CNT     \
  53        (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
  54
  55#define RX_BUF_LENGTH           2048
  56#define SKB_ALIGNMENT           32
  57
  58/* Tx/Rx DMA register offset, skip 256 descriptors */
  59#define WORDS_PER_BD(p)         (p->hw_params->words_per_bd)
  60#define DMA_DESC_SIZE           (WORDS_PER_BD(priv) * sizeof(u32))
  61
  62#define GENET_TDMA_REG_OFF      (priv->hw_params->tdma_offset + \
  63                                TOTAL_DESC * DMA_DESC_SIZE)
  64
  65#define GENET_RDMA_REG_OFF      (priv->hw_params->rdma_offset + \
  66                                TOTAL_DESC * DMA_DESC_SIZE)
  67
  68/* Forward declarations */
  69static void bcmgenet_set_rx_mode(struct net_device *dev);
  70
  71static inline void bcmgenet_writel(u32 value, void __iomem *offset)
  72{
  73        /* MIPS chips strapped for BE will automagically configure the
  74         * peripheral registers for CPU-native byte order.
  75         */
  76        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  77                __raw_writel(value, offset);
  78        else
  79                writel_relaxed(value, offset);
  80}
  81
  82static inline u32 bcmgenet_readl(void __iomem *offset)
  83{
  84        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  85                return __raw_readl(offset);
  86        else
  87                return readl_relaxed(offset);
  88}
  89
  90static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
  91                                             void __iomem *d, u32 value)
  92{
  93        bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
  94}
  95
  96static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
  97                                    void __iomem *d,
  98                                    dma_addr_t addr)
  99{
 100        bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
 101
 102        /* Register writes to GISB bus can take couple hundred nanoseconds
 103         * and are done for each packet, save these expensive writes unless
 104         * the platform is explicitly configured for 64-bits/LPAE.
 105         */
 106#ifdef CONFIG_PHYS_ADDR_T_64BIT
 107        if (priv->hw_params->flags & GENET_HAS_40BITS)
 108                bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
 109#endif
 110}
 111
 112/* Combined address + length/status setter */
 113static inline void dmadesc_set(struct bcmgenet_priv *priv,
 114                               void __iomem *d, dma_addr_t addr, u32 val)
 115{
 116        dmadesc_set_addr(priv, d, addr);
 117        dmadesc_set_length_status(priv, d, val);
 118}
 119
 120static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
 121                                          void __iomem *d)
 122{
 123        dma_addr_t addr;
 124
 125        addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
 126
 127        /* Register writes to GISB bus can take couple hundred nanoseconds
 128         * and are done for each packet, save these expensive writes unless
 129         * the platform is explicitly configured for 64-bits/LPAE.
 130         */
 131#ifdef CONFIG_PHYS_ADDR_T_64BIT
 132        if (priv->hw_params->flags & GENET_HAS_40BITS)
 133                addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
 134#endif
 135        return addr;
 136}
 137
 138#define GENET_VER_FMT   "%1d.%1d EPHY: 0x%04x"
 139
 140#define GENET_MSG_DEFAULT       (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
 141                                NETIF_MSG_LINK)
 142
 143static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
 144{
 145        if (GENET_IS_V1(priv))
 146                return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
 147        else
 148                return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
 149}
 150
 151static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
 152{
 153        if (GENET_IS_V1(priv))
 154                bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
 155        else
 156                bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
 157}
 158
 159/* These macros are defined to deal with register map change
 160 * between GENET1.1 and GENET2. Only those currently being used
 161 * by driver are defined.
 162 */
 163static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
 164{
 165        if (GENET_IS_V1(priv))
 166                return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
 167        else
 168                return bcmgenet_readl(priv->base +
 169                                      priv->hw_params->tbuf_offset + TBUF_CTRL);
 170}
 171
 172static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
 173{
 174        if (GENET_IS_V1(priv))
 175                bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
 176        else
 177                bcmgenet_writel(val, priv->base +
 178                                priv->hw_params->tbuf_offset + TBUF_CTRL);
 179}
 180
 181static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
 182{
 183        if (GENET_IS_V1(priv))
 184                return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
 185        else
 186                return bcmgenet_readl(priv->base +
 187                                      priv->hw_params->tbuf_offset + TBUF_BP_MC);
 188}
 189
 190static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
 191{
 192        if (GENET_IS_V1(priv))
 193                bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
 194        else
 195                bcmgenet_writel(val, priv->base +
 196                                priv->hw_params->tbuf_offset + TBUF_BP_MC);
 197}
 198
 199/* RX/TX DMA register accessors */
 200enum dma_reg {
 201        DMA_RING_CFG = 0,
 202        DMA_CTRL,
 203        DMA_STATUS,
 204        DMA_SCB_BURST_SIZE,
 205        DMA_ARB_CTRL,
 206        DMA_PRIORITY_0,
 207        DMA_PRIORITY_1,
 208        DMA_PRIORITY_2,
 209        DMA_INDEX2RING_0,
 210        DMA_INDEX2RING_1,
 211        DMA_INDEX2RING_2,
 212        DMA_INDEX2RING_3,
 213        DMA_INDEX2RING_4,
 214        DMA_INDEX2RING_5,
 215        DMA_INDEX2RING_6,
 216        DMA_INDEX2RING_7,
 217        DMA_RING0_TIMEOUT,
 218        DMA_RING1_TIMEOUT,
 219        DMA_RING2_TIMEOUT,
 220        DMA_RING3_TIMEOUT,
 221        DMA_RING4_TIMEOUT,
 222        DMA_RING5_TIMEOUT,
 223        DMA_RING6_TIMEOUT,
 224        DMA_RING7_TIMEOUT,
 225        DMA_RING8_TIMEOUT,
 226        DMA_RING9_TIMEOUT,
 227        DMA_RING10_TIMEOUT,
 228        DMA_RING11_TIMEOUT,
 229        DMA_RING12_TIMEOUT,
 230        DMA_RING13_TIMEOUT,
 231        DMA_RING14_TIMEOUT,
 232        DMA_RING15_TIMEOUT,
 233        DMA_RING16_TIMEOUT,
 234};
 235
 236static const u8 bcmgenet_dma_regs_v3plus[] = {
 237        [DMA_RING_CFG]          = 0x00,
 238        [DMA_CTRL]              = 0x04,
 239        [DMA_STATUS]            = 0x08,
 240        [DMA_SCB_BURST_SIZE]    = 0x0C,
 241        [DMA_ARB_CTRL]          = 0x2C,
 242        [DMA_PRIORITY_0]        = 0x30,
 243        [DMA_PRIORITY_1]        = 0x34,
 244        [DMA_PRIORITY_2]        = 0x38,
 245        [DMA_RING0_TIMEOUT]     = 0x2C,
 246        [DMA_RING1_TIMEOUT]     = 0x30,
 247        [DMA_RING2_TIMEOUT]     = 0x34,
 248        [DMA_RING3_TIMEOUT]     = 0x38,
 249        [DMA_RING4_TIMEOUT]     = 0x3c,
 250        [DMA_RING5_TIMEOUT]     = 0x40,
 251        [DMA_RING6_TIMEOUT]     = 0x44,
 252        [DMA_RING7_TIMEOUT]     = 0x48,
 253        [DMA_RING8_TIMEOUT]     = 0x4c,
 254        [DMA_RING9_TIMEOUT]     = 0x50,
 255        [DMA_RING10_TIMEOUT]    = 0x54,
 256        [DMA_RING11_TIMEOUT]    = 0x58,
 257        [DMA_RING12_TIMEOUT]    = 0x5c,
 258        [DMA_RING13_TIMEOUT]    = 0x60,
 259        [DMA_RING14_TIMEOUT]    = 0x64,
 260        [DMA_RING15_TIMEOUT]    = 0x68,
 261        [DMA_RING16_TIMEOUT]    = 0x6C,
 262        [DMA_INDEX2RING_0]      = 0x70,
 263        [DMA_INDEX2RING_1]      = 0x74,
 264        [DMA_INDEX2RING_2]      = 0x78,
 265        [DMA_INDEX2RING_3]      = 0x7C,
 266        [DMA_INDEX2RING_4]      = 0x80,
 267        [DMA_INDEX2RING_5]      = 0x84,
 268        [DMA_INDEX2RING_6]      = 0x88,
 269        [DMA_INDEX2RING_7]      = 0x8C,
 270};
 271
 272static const u8 bcmgenet_dma_regs_v2[] = {
 273        [DMA_RING_CFG]          = 0x00,
 274        [DMA_CTRL]              = 0x04,
 275        [DMA_STATUS]            = 0x08,
 276        [DMA_SCB_BURST_SIZE]    = 0x0C,
 277        [DMA_ARB_CTRL]          = 0x30,
 278        [DMA_PRIORITY_0]        = 0x34,
 279        [DMA_PRIORITY_1]        = 0x38,
 280        [DMA_PRIORITY_2]        = 0x3C,
 281        [DMA_RING0_TIMEOUT]     = 0x2C,
 282        [DMA_RING1_TIMEOUT]     = 0x30,
 283        [DMA_RING2_TIMEOUT]     = 0x34,
 284        [DMA_RING3_TIMEOUT]     = 0x38,
 285        [DMA_RING4_TIMEOUT]     = 0x3c,
 286        [DMA_RING5_TIMEOUT]     = 0x40,
 287        [DMA_RING6_TIMEOUT]     = 0x44,
 288        [DMA_RING7_TIMEOUT]     = 0x48,
 289        [DMA_RING8_TIMEOUT]     = 0x4c,
 290        [DMA_RING9_TIMEOUT]     = 0x50,
 291        [DMA_RING10_TIMEOUT]    = 0x54,
 292        [DMA_RING11_TIMEOUT]    = 0x58,
 293        [DMA_RING12_TIMEOUT]    = 0x5c,
 294        [DMA_RING13_TIMEOUT]    = 0x60,
 295        [DMA_RING14_TIMEOUT]    = 0x64,
 296        [DMA_RING15_TIMEOUT]    = 0x68,
 297        [DMA_RING16_TIMEOUT]    = 0x6C,
 298};
 299
 300static const u8 bcmgenet_dma_regs_v1[] = {
 301        [DMA_CTRL]              = 0x00,
 302        [DMA_STATUS]            = 0x04,
 303        [DMA_SCB_BURST_SIZE]    = 0x0C,
 304        [DMA_ARB_CTRL]          = 0x30,
 305        [DMA_PRIORITY_0]        = 0x34,
 306        [DMA_PRIORITY_1]        = 0x38,
 307        [DMA_PRIORITY_2]        = 0x3C,
 308        [DMA_RING0_TIMEOUT]     = 0x2C,
 309        [DMA_RING1_TIMEOUT]     = 0x30,
 310        [DMA_RING2_TIMEOUT]     = 0x34,
 311        [DMA_RING3_TIMEOUT]     = 0x38,
 312        [DMA_RING4_TIMEOUT]     = 0x3c,
 313        [DMA_RING5_TIMEOUT]     = 0x40,
 314        [DMA_RING6_TIMEOUT]     = 0x44,
 315        [DMA_RING7_TIMEOUT]     = 0x48,
 316        [DMA_RING8_TIMEOUT]     = 0x4c,
 317        [DMA_RING9_TIMEOUT]     = 0x50,
 318        [DMA_RING10_TIMEOUT]    = 0x54,
 319        [DMA_RING11_TIMEOUT]    = 0x58,
 320        [DMA_RING12_TIMEOUT]    = 0x5c,
 321        [DMA_RING13_TIMEOUT]    = 0x60,
 322        [DMA_RING14_TIMEOUT]    = 0x64,
 323        [DMA_RING15_TIMEOUT]    = 0x68,
 324        [DMA_RING16_TIMEOUT]    = 0x6C,
 325};
 326
 327/* Set at runtime once bcmgenet version is known */
 328static const u8 *bcmgenet_dma_regs;
 329
 330static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
 331{
 332        return netdev_priv(dev_get_drvdata(dev));
 333}
 334
 335static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
 336                                      enum dma_reg r)
 337{
 338        return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
 339                              DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
 340}
 341
 342static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
 343                                        u32 val, enum dma_reg r)
 344{
 345        bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
 346                        DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
 347}
 348
 349static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
 350                                      enum dma_reg r)
 351{
 352        return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
 353                              DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
 354}
 355
 356static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
 357                                        u32 val, enum dma_reg r)
 358{
 359        bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
 360                        DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
 361}
 362
 363/* RDMA/TDMA ring registers and accessors
 364 * we merge the common fields and just prefix with T/D the registers
 365 * having different meaning depending on the direction
 366 */
 367enum dma_ring_reg {
 368        TDMA_READ_PTR = 0,
 369        RDMA_WRITE_PTR = TDMA_READ_PTR,
 370        TDMA_READ_PTR_HI,
 371        RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
 372        TDMA_CONS_INDEX,
 373        RDMA_PROD_INDEX = TDMA_CONS_INDEX,
 374        TDMA_PROD_INDEX,
 375        RDMA_CONS_INDEX = TDMA_PROD_INDEX,
 376        DMA_RING_BUF_SIZE,
 377        DMA_START_ADDR,
 378        DMA_START_ADDR_HI,
 379        DMA_END_ADDR,
 380        DMA_END_ADDR_HI,
 381        DMA_MBUF_DONE_THRESH,
 382        TDMA_FLOW_PERIOD,
 383        RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
 384        TDMA_WRITE_PTR,
 385        RDMA_READ_PTR = TDMA_WRITE_PTR,
 386        TDMA_WRITE_PTR_HI,
 387        RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
 388};
 389
 390/* GENET v4 supports 40-bits pointer addressing
 391 * for obvious reasons the LO and HI word parts
 392 * are contiguous, but this offsets the other
 393 * registers.
 394 */
 395static const u8 genet_dma_ring_regs_v4[] = {
 396        [TDMA_READ_PTR]                 = 0x00,
 397        [TDMA_READ_PTR_HI]              = 0x04,
 398        [TDMA_CONS_INDEX]               = 0x08,
 399        [TDMA_PROD_INDEX]               = 0x0C,
 400        [DMA_RING_BUF_SIZE]             = 0x10,
 401        [DMA_START_ADDR]                = 0x14,
 402        [DMA_START_ADDR_HI]             = 0x18,
 403        [DMA_END_ADDR]                  = 0x1C,
 404        [DMA_END_ADDR_HI]               = 0x20,
 405        [DMA_MBUF_DONE_THRESH]          = 0x24,
 406        [TDMA_FLOW_PERIOD]              = 0x28,
 407        [TDMA_WRITE_PTR]                = 0x2C,
 408        [TDMA_WRITE_PTR_HI]             = 0x30,
 409};
 410
 411static const u8 genet_dma_ring_regs_v123[] = {
 412        [TDMA_READ_PTR]                 = 0x00,
 413        [TDMA_CONS_INDEX]               = 0x04,
 414        [TDMA_PROD_INDEX]               = 0x08,
 415        [DMA_RING_BUF_SIZE]             = 0x0C,
 416        [DMA_START_ADDR]                = 0x10,
 417        [DMA_END_ADDR]                  = 0x14,
 418        [DMA_MBUF_DONE_THRESH]          = 0x18,
 419        [TDMA_FLOW_PERIOD]              = 0x1C,
 420        [TDMA_WRITE_PTR]                = 0x20,
 421};
 422
 423/* Set at runtime once GENET version is known */
 424static const u8 *genet_dma_ring_regs;
 425
 426static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
 427                                           unsigned int ring,
 428                                           enum dma_ring_reg r)
 429{
 430        return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
 431                              (DMA_RING_SIZE * ring) +
 432                              genet_dma_ring_regs[r]);
 433}
 434
 435static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
 436                                             unsigned int ring, u32 val,
 437                                             enum dma_ring_reg r)
 438{
 439        bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
 440                        (DMA_RING_SIZE * ring) +
 441                        genet_dma_ring_regs[r]);
 442}
 443
 444static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
 445                                           unsigned int ring,
 446                                           enum dma_ring_reg r)
 447{
 448        return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
 449                              (DMA_RING_SIZE * ring) +
 450                              genet_dma_ring_regs[r]);
 451}
 452
 453static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
 454                                             unsigned int ring, u32 val,
 455                                             enum dma_ring_reg r)
 456{
 457        bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
 458                        (DMA_RING_SIZE * ring) +
 459                        genet_dma_ring_regs[r]);
 460}
 461
 462static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
 463{
 464        u32 offset;
 465        u32 reg;
 466
 467        offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
 468        reg = bcmgenet_hfb_reg_readl(priv, offset);
 469        reg |= (1 << (f_index % 32));
 470        bcmgenet_hfb_reg_writel(priv, reg, offset);
 471        reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
 472        reg |= RBUF_HFB_EN;
 473        bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
 474}
 475
 476static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
 477{
 478        u32 offset, reg, reg1;
 479
 480        offset = HFB_FLT_ENABLE_V3PLUS;
 481        reg = bcmgenet_hfb_reg_readl(priv, offset);
 482        reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
 483        if  (f_index < 32) {
 484                reg1 &= ~(1 << (f_index % 32));
 485                bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
 486        } else {
 487                reg &= ~(1 << (f_index % 32));
 488                bcmgenet_hfb_reg_writel(priv, reg, offset);
 489        }
 490        if (!reg && !reg1) {
 491                reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
 492                reg &= ~RBUF_HFB_EN;
 493                bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
 494        }
 495}
 496
 497static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
 498                                                     u32 f_index, u32 rx_queue)
 499{
 500        u32 offset;
 501        u32 reg;
 502
 503        offset = f_index / 8;
 504        reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
 505        reg &= ~(0xF << (4 * (f_index % 8)));
 506        reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
 507        bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
 508}
 509
 510static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
 511                                           u32 f_index, u32 f_length)
 512{
 513        u32 offset;
 514        u32 reg;
 515
 516        offset = HFB_FLT_LEN_V3PLUS +
 517                 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
 518                 sizeof(u32);
 519        reg = bcmgenet_hfb_reg_readl(priv, offset);
 520        reg &= ~(0xFF << (8 * (f_index % 4)));
 521        reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
 522        bcmgenet_hfb_reg_writel(priv, reg, offset);
 523}
 524
 525static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
 526{
 527        while (size) {
 528                switch (*(unsigned char *)mask++) {
 529                case 0x00:
 530                case 0x0f:
 531                case 0xf0:
 532                case 0xff:
 533                        size--;
 534                        continue;
 535                default:
 536                        return -EINVAL;
 537                }
 538        }
 539
 540        return 0;
 541}
 542
 543#define VALIDATE_MASK(x) \
 544        bcmgenet_hfb_validate_mask(&(x), sizeof(x))
 545
 546static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
 547                                    u32 offset, void *val, void *mask,
 548                                    size_t size)
 549{
 550        u32 index, tmp;
 551
 552        index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
 553        tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
 554
 555        while (size--) {
 556                if (offset++ & 1) {
 557                        tmp &= ~0x300FF;
 558                        tmp |= (*(unsigned char *)val++);
 559                        switch ((*(unsigned char *)mask++)) {
 560                        case 0xFF:
 561                                tmp |= 0x30000;
 562                                break;
 563                        case 0xF0:
 564                                tmp |= 0x20000;
 565                                break;
 566                        case 0x0F:
 567                                tmp |= 0x10000;
 568                                break;
 569                        }
 570                        bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
 571                        if (size)
 572                                tmp = bcmgenet_hfb_readl(priv,
 573                                                         index * sizeof(u32));
 574                } else {
 575                        tmp &= ~0xCFF00;
 576                        tmp |= (*(unsigned char *)val++) << 8;
 577                        switch ((*(unsigned char *)mask++)) {
 578                        case 0xFF:
 579                                tmp |= 0xC0000;
 580                                break;
 581                        case 0xF0:
 582                                tmp |= 0x80000;
 583                                break;
 584                        case 0x0F:
 585                                tmp |= 0x40000;
 586                                break;
 587                        }
 588                        if (!size)
 589                                bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
 590                }
 591        }
 592
 593        return 0;
 594}
 595
 596static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
 597                                             struct bcmgenet_rxnfc_rule *rule)
 598{
 599        struct ethtool_rx_flow_spec *fs = &rule->fs;
 600        u32 offset = 0, f_length = 0, f;
 601        u8 val_8, mask_8;
 602        __be16 val_16;
 603        u16 mask_16;
 604        size_t size;
 605
 606        f = fs->location;
 607        if (fs->flow_type & FLOW_MAC_EXT) {
 608                bcmgenet_hfb_insert_data(priv, f, 0,
 609                                         &fs->h_ext.h_dest, &fs->m_ext.h_dest,
 610                                         sizeof(fs->h_ext.h_dest));
 611        }
 612
 613        if (fs->flow_type & FLOW_EXT) {
 614                if (fs->m_ext.vlan_etype ||
 615                    fs->m_ext.vlan_tci) {
 616                        bcmgenet_hfb_insert_data(priv, f, 12,
 617                                                 &fs->h_ext.vlan_etype,
 618                                                 &fs->m_ext.vlan_etype,
 619                                                 sizeof(fs->h_ext.vlan_etype));
 620                        bcmgenet_hfb_insert_data(priv, f, 14,
 621                                                 &fs->h_ext.vlan_tci,
 622                                                 &fs->m_ext.vlan_tci,
 623                                                 sizeof(fs->h_ext.vlan_tci));
 624                        offset += VLAN_HLEN;
 625                        f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
 626                }
 627        }
 628
 629        switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
 630        case ETHER_FLOW:
 631                f_length += DIV_ROUND_UP(ETH_HLEN, 2);
 632                bcmgenet_hfb_insert_data(priv, f, 0,
 633                                         &fs->h_u.ether_spec.h_dest,
 634                                         &fs->m_u.ether_spec.h_dest,
 635                                         sizeof(fs->h_u.ether_spec.h_dest));
 636                bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
 637                                         &fs->h_u.ether_spec.h_source,
 638                                         &fs->m_u.ether_spec.h_source,
 639                                         sizeof(fs->h_u.ether_spec.h_source));
 640                bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
 641                                         &fs->h_u.ether_spec.h_proto,
 642                                         &fs->m_u.ether_spec.h_proto,
 643                                         sizeof(fs->h_u.ether_spec.h_proto));
 644                break;
 645        case IP_USER_FLOW:
 646                f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
 647                /* Specify IP Ether Type */
 648                val_16 = htons(ETH_P_IP);
 649                mask_16 = 0xFFFF;
 650                bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
 651                                         &val_16, &mask_16, sizeof(val_16));
 652                bcmgenet_hfb_insert_data(priv, f, 15 + offset,
 653                                         &fs->h_u.usr_ip4_spec.tos,
 654                                         &fs->m_u.usr_ip4_spec.tos,
 655                                         sizeof(fs->h_u.usr_ip4_spec.tos));
 656                bcmgenet_hfb_insert_data(priv, f, 23 + offset,
 657                                         &fs->h_u.usr_ip4_spec.proto,
 658                                         &fs->m_u.usr_ip4_spec.proto,
 659                                         sizeof(fs->h_u.usr_ip4_spec.proto));
 660                bcmgenet_hfb_insert_data(priv, f, 26 + offset,
 661                                         &fs->h_u.usr_ip4_spec.ip4src,
 662                                         &fs->m_u.usr_ip4_spec.ip4src,
 663                                         sizeof(fs->h_u.usr_ip4_spec.ip4src));
 664                bcmgenet_hfb_insert_data(priv, f, 30 + offset,
 665                                         &fs->h_u.usr_ip4_spec.ip4dst,
 666                                         &fs->m_u.usr_ip4_spec.ip4dst,
 667                                         sizeof(fs->h_u.usr_ip4_spec.ip4dst));
 668                if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
 669                        break;
 670
 671                /* Only supports 20 byte IPv4 header */
 672                val_8 = 0x45;
 673                mask_8 = 0xFF;
 674                bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
 675                                         &val_8, &mask_8,
 676                                         sizeof(val_8));
 677                size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
 678                bcmgenet_hfb_insert_data(priv, f,
 679                                         ETH_HLEN + 20 + offset,
 680                                         &fs->h_u.usr_ip4_spec.l4_4_bytes,
 681                                         &fs->m_u.usr_ip4_spec.l4_4_bytes,
 682                                         size);
 683                f_length += DIV_ROUND_UP(size, 2);
 684                break;
 685        }
 686
 687        bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
 688        if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
 689                /* Ring 0 flows can be handled by the default Descriptor Ring
 690                 * We'll map them to ring 0, but don't enable the filter
 691                 */
 692                bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
 693                rule->state = BCMGENET_RXNFC_STATE_DISABLED;
 694        } else {
 695                /* Other Rx rings are direct mapped here */
 696                bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
 697                                                         fs->ring_cookie);
 698                bcmgenet_hfb_enable_filter(priv, f);
 699                rule->state = BCMGENET_RXNFC_STATE_ENABLED;
 700        }
 701}
 702
 703/* bcmgenet_hfb_clear
 704 *
 705 * Clear Hardware Filter Block and disable all filtering.
 706 */
 707static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
 708{
 709        u32 base, i;
 710
 711        base = f_index * priv->hw_params->hfb_filter_size;
 712        for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
 713                bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
 714}
 715
 716static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
 717{
 718        u32 i;
 719
 720        if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
 721                return;
 722
 723        bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
 724        bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
 725        bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
 726
 727        for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
 728                bcmgenet_rdma_writel(priv, 0x0, i);
 729
 730        for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
 731                bcmgenet_hfb_reg_writel(priv, 0x0,
 732                                        HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
 733
 734        for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
 735                bcmgenet_hfb_clear_filter(priv, i);
 736}
 737
 738static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
 739{
 740        int i;
 741
 742        INIT_LIST_HEAD(&priv->rxnfc_list);
 743        if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
 744                return;
 745
 746        for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
 747                INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
 748                priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
 749        }
 750
 751        bcmgenet_hfb_clear(priv);
 752}
 753
 754static int bcmgenet_begin(struct net_device *dev)
 755{
 756        struct bcmgenet_priv *priv = netdev_priv(dev);
 757
 758        /* Turn on the clock */
 759        return clk_prepare_enable(priv->clk);
 760}
 761
 762static void bcmgenet_complete(struct net_device *dev)
 763{
 764        struct bcmgenet_priv *priv = netdev_priv(dev);
 765
 766        /* Turn off the clock */
 767        clk_disable_unprepare(priv->clk);
 768}
 769
 770static int bcmgenet_get_link_ksettings(struct net_device *dev,
 771                                       struct ethtool_link_ksettings *cmd)
 772{
 773        if (!netif_running(dev))
 774                return -EINVAL;
 775
 776        if (!dev->phydev)
 777                return -ENODEV;
 778
 779        phy_ethtool_ksettings_get(dev->phydev, cmd);
 780
 781        return 0;
 782}
 783
 784static int bcmgenet_set_link_ksettings(struct net_device *dev,
 785                                       const struct ethtool_link_ksettings *cmd)
 786{
 787        if (!netif_running(dev))
 788                return -EINVAL;
 789
 790        if (!dev->phydev)
 791                return -ENODEV;
 792
 793        return phy_ethtool_ksettings_set(dev->phydev, cmd);
 794}
 795
 796static int bcmgenet_set_features(struct net_device *dev,
 797                                 netdev_features_t features)
 798{
 799        struct bcmgenet_priv *priv = netdev_priv(dev);
 800        u32 reg;
 801        int ret;
 802
 803        ret = clk_prepare_enable(priv->clk);
 804        if (ret)
 805                return ret;
 806
 807        /* Make sure we reflect the value of CRC_CMD_FWD */
 808        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
 809        priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 810
 811        clk_disable_unprepare(priv->clk);
 812
 813        return ret;
 814}
 815
 816static u32 bcmgenet_get_msglevel(struct net_device *dev)
 817{
 818        struct bcmgenet_priv *priv = netdev_priv(dev);
 819
 820        return priv->msg_enable;
 821}
 822
 823static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
 824{
 825        struct bcmgenet_priv *priv = netdev_priv(dev);
 826
 827        priv->msg_enable = level;
 828}
 829
 830static int bcmgenet_get_coalesce(struct net_device *dev,
 831                                 struct ethtool_coalesce *ec)
 832{
 833        struct bcmgenet_priv *priv = netdev_priv(dev);
 834        struct bcmgenet_rx_ring *ring;
 835        unsigned int i;
 836
 837        ec->tx_max_coalesced_frames =
 838                bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
 839                                         DMA_MBUF_DONE_THRESH);
 840        ec->rx_max_coalesced_frames =
 841                bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
 842                                         DMA_MBUF_DONE_THRESH);
 843        ec->rx_coalesce_usecs =
 844                bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
 845
 846        for (i = 0; i < priv->hw_params->rx_queues; i++) {
 847                ring = &priv->rx_rings[i];
 848                ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
 849        }
 850        ring = &priv->rx_rings[DESC_INDEX];
 851        ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
 852
 853        return 0;
 854}
 855
 856static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
 857                                     u32 usecs, u32 pkts)
 858{
 859        struct bcmgenet_priv *priv = ring->priv;
 860        unsigned int i = ring->index;
 861        u32 reg;
 862
 863        bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
 864
 865        reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
 866        reg &= ~DMA_TIMEOUT_MASK;
 867        reg |= DIV_ROUND_UP(usecs * 1000, 8192);
 868        bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
 869}
 870
 871static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
 872                                          struct ethtool_coalesce *ec)
 873{
 874        struct dim_cq_moder moder;
 875        u32 usecs, pkts;
 876
 877        ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
 878        ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
 879        usecs = ring->rx_coalesce_usecs;
 880        pkts = ring->rx_max_coalesced_frames;
 881
 882        if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
 883                moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
 884                usecs = moder.usec;
 885                pkts = moder.pkts;
 886        }
 887
 888        ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
 889        bcmgenet_set_rx_coalesce(ring, usecs, pkts);
 890}
 891
 892static int bcmgenet_set_coalesce(struct net_device *dev,
 893                                 struct ethtool_coalesce *ec)
 894{
 895        struct bcmgenet_priv *priv = netdev_priv(dev);
 896        unsigned int i;
 897
 898        /* Base system clock is 125Mhz, DMA timeout is this reference clock
 899         * divided by 1024, which yields roughly 8.192us, our maximum value
 900         * has to fit in the DMA_TIMEOUT_MASK (16 bits)
 901         */
 902        if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
 903            ec->tx_max_coalesced_frames == 0 ||
 904            ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
 905            ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
 906                return -EINVAL;
 907
 908        if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
 909                return -EINVAL;
 910
 911        /* GENET TDMA hardware does not support a configurable timeout, but will
 912         * always generate an interrupt either after MBDONE packets have been
 913         * transmitted, or when the ring is empty.
 914         */
 915
 916        /* Program all TX queues with the same values, as there is no
 917         * ethtool knob to do coalescing on a per-queue basis
 918         */
 919        for (i = 0; i < priv->hw_params->tx_queues; i++)
 920                bcmgenet_tdma_ring_writel(priv, i,
 921                                          ec->tx_max_coalesced_frames,
 922                                          DMA_MBUF_DONE_THRESH);
 923        bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
 924                                  ec->tx_max_coalesced_frames,
 925                                  DMA_MBUF_DONE_THRESH);
 926
 927        for (i = 0; i < priv->hw_params->rx_queues; i++)
 928                bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
 929        bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
 930
 931        return 0;
 932}
 933
 934/* standard ethtool support functions. */
 935enum bcmgenet_stat_type {
 936        BCMGENET_STAT_NETDEV = -1,
 937        BCMGENET_STAT_MIB_RX,
 938        BCMGENET_STAT_MIB_TX,
 939        BCMGENET_STAT_RUNT,
 940        BCMGENET_STAT_MISC,
 941        BCMGENET_STAT_SOFT,
 942};
 943
 944struct bcmgenet_stats {
 945        char stat_string[ETH_GSTRING_LEN];
 946        int stat_sizeof;
 947        int stat_offset;
 948        enum bcmgenet_stat_type type;
 949        /* reg offset from UMAC base for misc counters */
 950        u16 reg_offset;
 951};
 952
 953#define STAT_NETDEV(m) { \
 954        .stat_string = __stringify(m), \
 955        .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
 956        .stat_offset = offsetof(struct net_device_stats, m), \
 957        .type = BCMGENET_STAT_NETDEV, \
 958}
 959
 960#define STAT_GENET_MIB(str, m, _type) { \
 961        .stat_string = str, \
 962        .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
 963        .stat_offset = offsetof(struct bcmgenet_priv, m), \
 964        .type = _type, \
 965}
 966
 967#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
 968#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
 969#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
 970#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
 971
 972#define STAT_GENET_MISC(str, m, offset) { \
 973        .stat_string = str, \
 974        .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
 975        .stat_offset = offsetof(struct bcmgenet_priv, m), \
 976        .type = BCMGENET_STAT_MISC, \
 977        .reg_offset = offset, \
 978}
 979
 980#define STAT_GENET_Q(num) \
 981        STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
 982                        tx_rings[num].packets), \
 983        STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
 984                        tx_rings[num].bytes), \
 985        STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
 986                        rx_rings[num].bytes),    \
 987        STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
 988                        rx_rings[num].packets), \
 989        STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
 990                        rx_rings[num].errors), \
 991        STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
 992                        rx_rings[num].dropped)
 993
 994/* There is a 0xC gap between the end of RX and beginning of TX stats and then
 995 * between the end of TX stats and the beginning of the RX RUNT
 996 */
 997#define BCMGENET_STAT_OFFSET    0xc
 998
 999/* Hardware counters must be kept in sync because the order/offset
1000 * is important here (order in structure declaration = order in hardware)
1001 */
1002static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
1003        /* general stats */
1004        STAT_NETDEV(rx_packets),
1005        STAT_NETDEV(tx_packets),
1006        STAT_NETDEV(rx_bytes),
1007        STAT_NETDEV(tx_bytes),
1008        STAT_NETDEV(rx_errors),
1009        STAT_NETDEV(tx_errors),
1010        STAT_NETDEV(rx_dropped),
1011        STAT_NETDEV(tx_dropped),
1012        STAT_NETDEV(multicast),
1013        /* UniMAC RSV counters */
1014        STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
1015        STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
1016        STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
1017        STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
1018        STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
1019        STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
1020        STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
1021        STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
1022        STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
1023        STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
1024        STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
1025        STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
1026        STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
1027        STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
1028        STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
1029        STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
1030        STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
1031        STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
1032        STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
1033        STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
1034        STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
1035        STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
1036        STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
1037        STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
1038        STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
1039        STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
1040        STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
1041        STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
1042        STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
1043        /* UniMAC TSV counters */
1044        STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
1045        STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
1046        STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
1047        STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
1048        STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
1049        STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
1050        STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
1051        STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
1052        STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
1053        STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
1054        STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
1055        STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
1056        STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
1057        STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
1058        STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
1059        STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
1060        STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
1061        STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
1062        STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
1063        STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
1064        STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
1065        STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
1066        STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
1067        STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
1068        STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
1069        STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
1070        STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
1071        STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
1072        STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
1073        /* UniMAC RUNT counters */
1074        STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
1075        STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
1076        STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
1077        STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
1078        /* Misc UniMAC counters */
1079        STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
1080                        UMAC_RBUF_OVFL_CNT_V1),
1081        STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
1082                        UMAC_RBUF_ERR_CNT_V1),
1083        STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
1084        STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
1085        STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
1086        STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
1087        STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
1088        STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1089                            mib.tx_realloc_tsb_failed),
1090        /* Per TX queues */
1091        STAT_GENET_Q(0),
1092        STAT_GENET_Q(1),
1093        STAT_GENET_Q(2),
1094        STAT_GENET_Q(3),
1095        STAT_GENET_Q(16),
1096};
1097
1098#define BCMGENET_STATS_LEN      ARRAY_SIZE(bcmgenet_gstrings_stats)
1099
1100static void bcmgenet_get_drvinfo(struct net_device *dev,
1101                                 struct ethtool_drvinfo *info)
1102{
1103        strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
1104}
1105
1106static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
1107{
1108        switch (string_set) {
1109        case ETH_SS_STATS:
1110                return BCMGENET_STATS_LEN;
1111        default:
1112                return -EOPNOTSUPP;
1113        }
1114}
1115
1116static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
1117                                 u8 *data)
1118{
1119        int i;
1120
1121        switch (stringset) {
1122        case ETH_SS_STATS:
1123                for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1124                        memcpy(data + i * ETH_GSTRING_LEN,
1125                               bcmgenet_gstrings_stats[i].stat_string,
1126                               ETH_GSTRING_LEN);
1127                }
1128                break;
1129        }
1130}
1131
1132static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
1133{
1134        u16 new_offset;
1135        u32 val;
1136
1137        switch (offset) {
1138        case UMAC_RBUF_OVFL_CNT_V1:
1139                if (GENET_IS_V2(priv))
1140                        new_offset = RBUF_OVFL_CNT_V2;
1141                else
1142                        new_offset = RBUF_OVFL_CNT_V3PLUS;
1143
1144                val = bcmgenet_rbuf_readl(priv, new_offset);
1145                /* clear if overflowed */
1146                if (val == ~0)
1147                        bcmgenet_rbuf_writel(priv, 0, new_offset);
1148                break;
1149        case UMAC_RBUF_ERR_CNT_V1:
1150                if (GENET_IS_V2(priv))
1151                        new_offset = RBUF_ERR_CNT_V2;
1152                else
1153                        new_offset = RBUF_ERR_CNT_V3PLUS;
1154
1155                val = bcmgenet_rbuf_readl(priv, new_offset);
1156                /* clear if overflowed */
1157                if (val == ~0)
1158                        bcmgenet_rbuf_writel(priv, 0, new_offset);
1159                break;
1160        default:
1161                val = bcmgenet_umac_readl(priv, offset);
1162                /* clear if overflowed */
1163                if (val == ~0)
1164                        bcmgenet_umac_writel(priv, 0, offset);
1165                break;
1166        }
1167
1168        return val;
1169}
1170
1171static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1172{
1173        int i, j = 0;
1174
1175        for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1176                const struct bcmgenet_stats *s;
1177                u8 offset = 0;
1178                u32 val = 0;
1179                char *p;
1180
1181                s = &bcmgenet_gstrings_stats[i];
1182                switch (s->type) {
1183                case BCMGENET_STAT_NETDEV:
1184                case BCMGENET_STAT_SOFT:
1185                        continue;
1186                case BCMGENET_STAT_RUNT:
1187                        offset += BCMGENET_STAT_OFFSET;
1188                        fallthrough;
1189                case BCMGENET_STAT_MIB_TX:
1190                        offset += BCMGENET_STAT_OFFSET;
1191                        fallthrough;
1192                case BCMGENET_STAT_MIB_RX:
1193                        val = bcmgenet_umac_readl(priv,
1194                                                  UMAC_MIB_START + j + offset);
1195                        offset = 0;     /* Reset Offset */
1196                        break;
1197                case BCMGENET_STAT_MISC:
1198                        if (GENET_IS_V1(priv)) {
1199                                val = bcmgenet_umac_readl(priv, s->reg_offset);
1200                                /* clear if overflowed */
1201                                if (val == ~0)
1202                                        bcmgenet_umac_writel(priv, 0,
1203                                                             s->reg_offset);
1204                        } else {
1205                                val = bcmgenet_update_stat_misc(priv,
1206                                                                s->reg_offset);
1207                        }
1208                        break;
1209                }
1210
1211                j += s->stat_sizeof;
1212                p = (char *)priv + s->stat_offset;
1213                *(u32 *)p = val;
1214        }
1215}
1216
1217static void bcmgenet_get_ethtool_stats(struct net_device *dev,
1218                                       struct ethtool_stats *stats,
1219                                       u64 *data)
1220{
1221        struct bcmgenet_priv *priv = netdev_priv(dev);
1222        int i;
1223
1224        if (netif_running(dev))
1225                bcmgenet_update_mib_counters(priv);
1226
1227        dev->netdev_ops->ndo_get_stats(dev);
1228
1229        for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1230                const struct bcmgenet_stats *s;
1231                char *p;
1232
1233                s = &bcmgenet_gstrings_stats[i];
1234                if (s->type == BCMGENET_STAT_NETDEV)
1235                        p = (char *)&dev->stats;
1236                else
1237                        p = (char *)priv;
1238                p += s->stat_offset;
1239                if (sizeof(unsigned long) != sizeof(u32) &&
1240                    s->stat_sizeof == sizeof(unsigned long))
1241                        data[i] = *(unsigned long *)p;
1242                else
1243                        data[i] = *(u32 *)p;
1244        }
1245}
1246
1247static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
1248{
1249        struct bcmgenet_priv *priv = netdev_priv(dev);
1250        u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1251        u32 reg;
1252
1253        if (enable && !priv->clk_eee_enabled) {
1254                clk_prepare_enable(priv->clk_eee);
1255                priv->clk_eee_enabled = true;
1256        }
1257
1258        reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1259        if (enable)
1260                reg |= EEE_EN;
1261        else
1262                reg &= ~EEE_EN;
1263        bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1264
1265        /* Enable EEE and switch to a 27Mhz clock automatically */
1266        reg = bcmgenet_readl(priv->base + off);
1267        if (enable)
1268                reg |= TBUF_EEE_EN | TBUF_PM_EN;
1269        else
1270                reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1271        bcmgenet_writel(reg, priv->base + off);
1272
1273        /* Do the same for thing for RBUF */
1274        reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1275        if (enable)
1276                reg |= RBUF_EEE_EN | RBUF_PM_EN;
1277        else
1278                reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1279        bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1280
1281        if (!enable && priv->clk_eee_enabled) {
1282                clk_disable_unprepare(priv->clk_eee);
1283                priv->clk_eee_enabled = false;
1284        }
1285
1286        priv->eee.eee_enabled = enable;
1287        priv->eee.eee_active = enable;
1288}
1289
1290static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
1291{
1292        struct bcmgenet_priv *priv = netdev_priv(dev);
1293        struct ethtool_eee *p = &priv->eee;
1294
1295        if (GENET_IS_V1(priv))
1296                return -EOPNOTSUPP;
1297
1298        if (!dev->phydev)
1299                return -ENODEV;
1300
1301        e->eee_enabled = p->eee_enabled;
1302        e->eee_active = p->eee_active;
1303        e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1304
1305        return phy_ethtool_get_eee(dev->phydev, e);
1306}
1307
1308static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1309{
1310        struct bcmgenet_priv *priv = netdev_priv(dev);
1311        struct ethtool_eee *p = &priv->eee;
1312        int ret = 0;
1313
1314        if (GENET_IS_V1(priv))
1315                return -EOPNOTSUPP;
1316
1317        if (!dev->phydev)
1318                return -ENODEV;
1319
1320        p->eee_enabled = e->eee_enabled;
1321
1322        if (!p->eee_enabled) {
1323                bcmgenet_eee_enable_set(dev, false);
1324        } else {
1325                ret = phy_init_eee(dev->phydev, 0);
1326                if (ret) {
1327                        netif_err(priv, hw, dev, "EEE initialization failed\n");
1328                        return ret;
1329                }
1330
1331                bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1332                bcmgenet_eee_enable_set(dev, true);
1333        }
1334
1335        return phy_ethtool_set_eee(dev->phydev, e);
1336}
1337
1338static int bcmgenet_validate_flow(struct net_device *dev,
1339                                  struct ethtool_rxnfc *cmd)
1340{
1341        struct ethtool_usrip4_spec *l4_mask;
1342        struct ethhdr *eth_mask;
1343
1344        if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
1345                netdev_err(dev, "rxnfc: Invalid location (%d)\n",
1346                           cmd->fs.location);
1347                return -EINVAL;
1348        }
1349
1350        switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1351        case IP_USER_FLOW:
1352                l4_mask = &cmd->fs.m_u.usr_ip4_spec;
1353                /* don't allow mask which isn't valid */
1354                if (VALIDATE_MASK(l4_mask->ip4src) ||
1355                    VALIDATE_MASK(l4_mask->ip4dst) ||
1356                    VALIDATE_MASK(l4_mask->l4_4_bytes) ||
1357                    VALIDATE_MASK(l4_mask->proto) ||
1358                    VALIDATE_MASK(l4_mask->ip_ver) ||
1359                    VALIDATE_MASK(l4_mask->tos)) {
1360                        netdev_err(dev, "rxnfc: Unsupported mask\n");
1361                        return -EINVAL;
1362                }
1363                break;
1364        case ETHER_FLOW:
1365                eth_mask = &cmd->fs.m_u.ether_spec;
1366                /* don't allow mask which isn't valid */
1367                if (VALIDATE_MASK(eth_mask->h_dest) ||
1368                    VALIDATE_MASK(eth_mask->h_source) ||
1369                    VALIDATE_MASK(eth_mask->h_proto)) {
1370                        netdev_err(dev, "rxnfc: Unsupported mask\n");
1371                        return -EINVAL;
1372                }
1373                break;
1374        default:
1375                netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
1376                           cmd->fs.flow_type);
1377                return -EINVAL;
1378        }
1379
1380        if ((cmd->fs.flow_type & FLOW_EXT)) {
1381                /* don't allow mask which isn't valid */
1382                if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
1383                    VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
1384                        netdev_err(dev, "rxnfc: Unsupported mask\n");
1385                        return -EINVAL;
1386                }
1387                if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
1388                        netdev_err(dev, "rxnfc: user-def not supported\n");
1389                        return -EINVAL;
1390                }
1391        }
1392
1393        if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
1394                /* don't allow mask which isn't valid */
1395                if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
1396                        netdev_err(dev, "rxnfc: Unsupported mask\n");
1397                        return -EINVAL;
1398                }
1399        }
1400
1401        return 0;
1402}
1403
1404static int bcmgenet_insert_flow(struct net_device *dev,
1405                                struct ethtool_rxnfc *cmd)
1406{
1407        struct bcmgenet_priv *priv = netdev_priv(dev);
1408        struct bcmgenet_rxnfc_rule *loc_rule;
1409        int err;
1410
1411        if (priv->hw_params->hfb_filter_size < 128) {
1412                netdev_err(dev, "rxnfc: Not supported by this device\n");
1413                return -EINVAL;
1414        }
1415
1416        if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
1417            cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
1418                netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
1419                           cmd->fs.ring_cookie);
1420                return -EINVAL;
1421        }
1422
1423        err = bcmgenet_validate_flow(dev, cmd);
1424        if (err)
1425                return err;
1426
1427        loc_rule = &priv->rxnfc_rules[cmd->fs.location];
1428        if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1429                bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1430        if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1431                list_del(&loc_rule->list);
1432                bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1433        }
1434        loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1435        memcpy(&loc_rule->fs, &cmd->fs,
1436               sizeof(struct ethtool_rx_flow_spec));
1437
1438        bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
1439
1440        list_add_tail(&loc_rule->list, &priv->rxnfc_list);
1441
1442        return 0;
1443}
1444
1445static int bcmgenet_delete_flow(struct net_device *dev,
1446                                struct ethtool_rxnfc *cmd)
1447{
1448        struct bcmgenet_priv *priv = netdev_priv(dev);
1449        struct bcmgenet_rxnfc_rule *rule;
1450        int err = 0;
1451
1452        if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1453                return -EINVAL;
1454
1455        rule = &priv->rxnfc_rules[cmd->fs.location];
1456        if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1457                err =  -ENOENT;
1458                goto out;
1459        }
1460
1461        if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1462                bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1463        if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1464                list_del(&rule->list);
1465                bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1466        }
1467        rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1468        memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
1469
1470out:
1471        return err;
1472}
1473
1474static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1475{
1476        struct bcmgenet_priv *priv = netdev_priv(dev);
1477        int err = 0;
1478
1479        switch (cmd->cmd) {
1480        case ETHTOOL_SRXCLSRLINS:
1481                err = bcmgenet_insert_flow(dev, cmd);
1482                break;
1483        case ETHTOOL_SRXCLSRLDEL:
1484                err = bcmgenet_delete_flow(dev, cmd);
1485                break;
1486        default:
1487                netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
1488                            cmd->cmd);
1489                return -EINVAL;
1490        }
1491
1492        return err;
1493}
1494
1495static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1496                             int loc)
1497{
1498        struct bcmgenet_priv *priv = netdev_priv(dev);
1499        struct bcmgenet_rxnfc_rule *rule;
1500        int err = 0;
1501
1502        if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1503                return -EINVAL;
1504
1505        rule = &priv->rxnfc_rules[loc];
1506        if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
1507                err = -ENOENT;
1508        else
1509                memcpy(&cmd->fs, &rule->fs,
1510                       sizeof(struct ethtool_rx_flow_spec));
1511
1512        return err;
1513}
1514
1515static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
1516{
1517        struct list_head *pos;
1518        int res = 0;
1519
1520        list_for_each(pos, &priv->rxnfc_list)
1521                res++;
1522
1523        return res;
1524}
1525
1526static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1527                              u32 *rule_locs)
1528{
1529        struct bcmgenet_priv *priv = netdev_priv(dev);
1530        struct bcmgenet_rxnfc_rule *rule;
1531        int err = 0;
1532        int i = 0;
1533
1534        switch (cmd->cmd) {
1535        case ETHTOOL_GRXRINGS:
1536                cmd->data = priv->hw_params->rx_queues ?: 1;
1537                break;
1538        case ETHTOOL_GRXCLSRLCNT:
1539                cmd->rule_cnt = bcmgenet_get_num_flows(priv);
1540                cmd->data = MAX_NUM_OF_FS_RULES;
1541                break;
1542        case ETHTOOL_GRXCLSRULE:
1543                err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
1544                break;
1545        case ETHTOOL_GRXCLSRLALL:
1546                list_for_each_entry(rule, &priv->rxnfc_list, list)
1547                        if (i < cmd->rule_cnt)
1548                                rule_locs[i++] = rule->fs.location;
1549                cmd->rule_cnt = i;
1550                cmd->data = MAX_NUM_OF_FS_RULES;
1551                break;
1552        default:
1553                err = -EOPNOTSUPP;
1554                break;
1555        }
1556
1557        return err;
1558}
1559
1560/* standard ethtool support functions. */
1561static const struct ethtool_ops bcmgenet_ethtool_ops = {
1562        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1563                                     ETHTOOL_COALESCE_MAX_FRAMES |
1564                                     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1565        .begin                  = bcmgenet_begin,
1566        .complete               = bcmgenet_complete,
1567        .get_strings            = bcmgenet_get_strings,
1568        .get_sset_count         = bcmgenet_get_sset_count,
1569        .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
1570        .get_drvinfo            = bcmgenet_get_drvinfo,
1571        .get_link               = ethtool_op_get_link,
1572        .get_msglevel           = bcmgenet_get_msglevel,
1573        .set_msglevel           = bcmgenet_set_msglevel,
1574        .get_wol                = bcmgenet_get_wol,
1575        .set_wol                = bcmgenet_set_wol,
1576        .get_eee                = bcmgenet_get_eee,
1577        .set_eee                = bcmgenet_set_eee,
1578        .nway_reset             = phy_ethtool_nway_reset,
1579        .get_coalesce           = bcmgenet_get_coalesce,
1580        .set_coalesce           = bcmgenet_set_coalesce,
1581        .get_link_ksettings     = bcmgenet_get_link_ksettings,
1582        .set_link_ksettings     = bcmgenet_set_link_ksettings,
1583        .get_ts_info            = ethtool_op_get_ts_info,
1584        .get_rxnfc              = bcmgenet_get_rxnfc,
1585        .set_rxnfc              = bcmgenet_set_rxnfc,
1586};
1587
1588/* Power down the unimac, based on mode. */
1589static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1590                                enum bcmgenet_power_mode mode)
1591{
1592        int ret = 0;
1593        u32 reg;
1594
1595        switch (mode) {
1596        case GENET_POWER_CABLE_SENSE:
1597                phy_detach(priv->dev->phydev);
1598                break;
1599
1600        case GENET_POWER_WOL_MAGIC:
1601                ret = bcmgenet_wol_power_down_cfg(priv, mode);
1602                break;
1603
1604        case GENET_POWER_PASSIVE:
1605                /* Power down LED */
1606                if (priv->hw_params->flags & GENET_HAS_EXT) {
1607                        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1608                        if (GENET_IS_V5(priv))
1609                                reg |= EXT_PWR_DOWN_PHY_EN |
1610                                       EXT_PWR_DOWN_PHY_RD |
1611                                       EXT_PWR_DOWN_PHY_SD |
1612                                       EXT_PWR_DOWN_PHY_RX |
1613                                       EXT_PWR_DOWN_PHY_TX |
1614                                       EXT_IDDQ_GLBL_PWR;
1615                        else
1616                                reg |= EXT_PWR_DOWN_PHY;
1617
1618                        reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1619                        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1620
1621                        bcmgenet_phy_power_set(priv->dev, false);
1622                }
1623                break;
1624        default:
1625                break;
1626        }
1627
1628        return ret;
1629}
1630
1631static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1632                              enum bcmgenet_power_mode mode)
1633{
1634        u32 reg;
1635
1636        if (!(priv->hw_params->flags & GENET_HAS_EXT))
1637                return;
1638
1639        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1640
1641        switch (mode) {
1642        case GENET_POWER_PASSIVE:
1643                reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
1644                         EXT_ENERGY_DET_MASK);
1645                if (GENET_IS_V5(priv)) {
1646                        reg &= ~(EXT_PWR_DOWN_PHY_EN |
1647                                 EXT_PWR_DOWN_PHY_RD |
1648                                 EXT_PWR_DOWN_PHY_SD |
1649                                 EXT_PWR_DOWN_PHY_RX |
1650                                 EXT_PWR_DOWN_PHY_TX |
1651                                 EXT_IDDQ_GLBL_PWR);
1652                        reg |=   EXT_PHY_RESET;
1653                        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1654                        mdelay(1);
1655
1656                        reg &=  ~EXT_PHY_RESET;
1657                } else {
1658                        reg &= ~EXT_PWR_DOWN_PHY;
1659                        reg |= EXT_PWR_DN_EN_LD;
1660                }
1661                bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1662                bcmgenet_phy_power_set(priv->dev, true);
1663                break;
1664
1665        case GENET_POWER_CABLE_SENSE:
1666                /* enable APD */
1667                if (!GENET_IS_V5(priv)) {
1668                        reg |= EXT_PWR_DN_EN_LD;
1669                        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1670                }
1671                break;
1672        case GENET_POWER_WOL_MAGIC:
1673                bcmgenet_wol_power_up_cfg(priv, mode);
1674                return;
1675        default:
1676                break;
1677        }
1678}
1679
1680static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1681                                         struct bcmgenet_tx_ring *ring)
1682{
1683        struct enet_cb *tx_cb_ptr;
1684
1685        tx_cb_ptr = ring->cbs;
1686        tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1687
1688        /* Advancing local write pointer */
1689        if (ring->write_ptr == ring->end_ptr)
1690                ring->write_ptr = ring->cb_ptr;
1691        else
1692                ring->write_ptr++;
1693
1694        return tx_cb_ptr;
1695}
1696
1697static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1698                                         struct bcmgenet_tx_ring *ring)
1699{
1700        struct enet_cb *tx_cb_ptr;
1701
1702        tx_cb_ptr = ring->cbs;
1703        tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1704
1705        /* Rewinding local write pointer */
1706        if (ring->write_ptr == ring->cb_ptr)
1707                ring->write_ptr = ring->end_ptr;
1708        else
1709                ring->write_ptr--;
1710
1711        return tx_cb_ptr;
1712}
1713
1714static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1715{
1716        bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1717                                 INTRL2_CPU_MASK_SET);
1718}
1719
1720static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1721{
1722        bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1723                                 INTRL2_CPU_MASK_CLEAR);
1724}
1725
1726static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1727{
1728        bcmgenet_intrl2_1_writel(ring->priv,
1729                                 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1730                                 INTRL2_CPU_MASK_SET);
1731}
1732
1733static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1734{
1735        bcmgenet_intrl2_1_writel(ring->priv,
1736                                 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1737                                 INTRL2_CPU_MASK_CLEAR);
1738}
1739
1740static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1741{
1742        bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1743                                 INTRL2_CPU_MASK_SET);
1744}
1745
1746static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1747{
1748        bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1749                                 INTRL2_CPU_MASK_CLEAR);
1750}
1751
1752static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1753{
1754        bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1755                                 INTRL2_CPU_MASK_CLEAR);
1756}
1757
1758static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1759{
1760        bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1761                                 INTRL2_CPU_MASK_SET);
1762}
1763
1764/* Simple helper to free a transmit control block's resources
1765 * Returns an skb when the last transmit control block associated with the
1766 * skb is freed.  The skb should be freed by the caller if necessary.
1767 */
1768static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1769                                           struct enet_cb *cb)
1770{
1771        struct sk_buff *skb;
1772
1773        skb = cb->skb;
1774
1775        if (skb) {
1776                cb->skb = NULL;
1777                if (cb == GENET_CB(skb)->first_cb)
1778                        dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1779                                         dma_unmap_len(cb, dma_len),
1780                                         DMA_TO_DEVICE);
1781                else
1782                        dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1783                                       dma_unmap_len(cb, dma_len),
1784                                       DMA_TO_DEVICE);
1785                dma_unmap_addr_set(cb, dma_addr, 0);
1786
1787                if (cb == GENET_CB(skb)->last_cb)
1788                        return skb;
1789
1790        } else if (dma_unmap_addr(cb, dma_addr)) {
1791                dma_unmap_page(dev,
1792                               dma_unmap_addr(cb, dma_addr),
1793                               dma_unmap_len(cb, dma_len),
1794                               DMA_TO_DEVICE);
1795                dma_unmap_addr_set(cb, dma_addr, 0);
1796        }
1797
1798        return NULL;
1799}
1800
1801/* Simple helper to free a receive control block's resources */
1802static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1803                                           struct enet_cb *cb)
1804{
1805        struct sk_buff *skb;
1806
1807        skb = cb->skb;
1808        cb->skb = NULL;
1809
1810        if (dma_unmap_addr(cb, dma_addr)) {
1811                dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1812                                 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1813                dma_unmap_addr_set(cb, dma_addr, 0);
1814        }
1815
1816        return skb;
1817}
1818
1819/* Unlocked version of the reclaim routine */
1820static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1821                                          struct bcmgenet_tx_ring *ring)
1822{
1823        struct bcmgenet_priv *priv = netdev_priv(dev);
1824        unsigned int txbds_processed = 0;
1825        unsigned int bytes_compl = 0;
1826        unsigned int pkts_compl = 0;
1827        unsigned int txbds_ready;
1828        unsigned int c_index;
1829        struct sk_buff *skb;
1830
1831        /* Clear status before servicing to reduce spurious interrupts */
1832        if (ring->index == DESC_INDEX)
1833                bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1834                                         INTRL2_CPU_CLEAR);
1835        else
1836                bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1837                                         INTRL2_CPU_CLEAR);
1838
1839        /* Compute how many buffers are transmitted since last xmit call */
1840        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1841                & DMA_C_INDEX_MASK;
1842        txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1843
1844        netif_dbg(priv, tx_done, dev,
1845                  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1846                  __func__, ring->index, ring->c_index, c_index, txbds_ready);
1847
1848        /* Reclaim transmitted buffers */
1849        while (txbds_processed < txbds_ready) {
1850                skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1851                                          &priv->tx_cbs[ring->clean_ptr]);
1852                if (skb) {
1853                        pkts_compl++;
1854                        bytes_compl += GENET_CB(skb)->bytes_sent;
1855                        dev_consume_skb_any(skb);
1856                }
1857
1858                txbds_processed++;
1859                if (likely(ring->clean_ptr < ring->end_ptr))
1860                        ring->clean_ptr++;
1861                else
1862                        ring->clean_ptr = ring->cb_ptr;
1863        }
1864
1865        ring->free_bds += txbds_processed;
1866        ring->c_index = c_index;
1867
1868        ring->packets += pkts_compl;
1869        ring->bytes += bytes_compl;
1870
1871        netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1872                                  pkts_compl, bytes_compl);
1873
1874        return txbds_processed;
1875}
1876
1877static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1878                                struct bcmgenet_tx_ring *ring)
1879{
1880        unsigned int released;
1881
1882        spin_lock_bh(&ring->lock);
1883        released = __bcmgenet_tx_reclaim(dev, ring);
1884        spin_unlock_bh(&ring->lock);
1885
1886        return released;
1887}
1888
1889static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1890{
1891        struct bcmgenet_tx_ring *ring =
1892                container_of(napi, struct bcmgenet_tx_ring, napi);
1893        unsigned int work_done = 0;
1894        struct netdev_queue *txq;
1895
1896        spin_lock(&ring->lock);
1897        work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1898        if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1899                txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1900                netif_tx_wake_queue(txq);
1901        }
1902        spin_unlock(&ring->lock);
1903
1904        if (work_done == 0) {
1905                napi_complete(napi);
1906                ring->int_enable(ring);
1907
1908                return 0;
1909        }
1910
1911        return budget;
1912}
1913
1914static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1915{
1916        struct bcmgenet_priv *priv = netdev_priv(dev);
1917        int i;
1918
1919        if (netif_is_multiqueue(dev)) {
1920                for (i = 0; i < priv->hw_params->tx_queues; i++)
1921                        bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1922        }
1923
1924        bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1925}
1926
1927/* Reallocate the SKB to put enough headroom in front of it and insert
1928 * the transmit checksum offsets in the descriptors
1929 */
1930static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1931                                        struct sk_buff *skb)
1932{
1933        struct bcmgenet_priv *priv = netdev_priv(dev);
1934        struct status_64 *status = NULL;
1935        struct sk_buff *new_skb;
1936        u16 offset;
1937        u8 ip_proto;
1938        __be16 ip_ver;
1939        u32 tx_csum_info;
1940
1941        if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1942                /* If 64 byte status block enabled, must make sure skb has
1943                 * enough headroom for us to insert 64B status block.
1944                 */
1945                new_skb = skb_realloc_headroom(skb, sizeof(*status));
1946                if (!new_skb) {
1947                        dev_kfree_skb_any(skb);
1948                        priv->mib.tx_realloc_tsb_failed++;
1949                        dev->stats.tx_dropped++;
1950                        return NULL;
1951                }
1952                dev_consume_skb_any(skb);
1953                skb = new_skb;
1954                priv->mib.tx_realloc_tsb++;
1955        }
1956
1957        skb_push(skb, sizeof(*status));
1958        status = (struct status_64 *)skb->data;
1959
1960        if (skb->ip_summed  == CHECKSUM_PARTIAL) {
1961                ip_ver = skb->protocol;
1962                switch (ip_ver) {
1963                case htons(ETH_P_IP):
1964                        ip_proto = ip_hdr(skb)->protocol;
1965                        break;
1966                case htons(ETH_P_IPV6):
1967                        ip_proto = ipv6_hdr(skb)->nexthdr;
1968                        break;
1969                default:
1970                        /* don't use UDP flag */
1971                        ip_proto = 0;
1972                        break;
1973                }
1974
1975                offset = skb_checksum_start_offset(skb) - sizeof(*status);
1976                tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1977                                (offset + skb->csum_offset) |
1978                                STATUS_TX_CSUM_LV;
1979
1980                /* Set the special UDP flag for UDP */
1981                if (ip_proto == IPPROTO_UDP)
1982                        tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1983
1984                status->tx_csum_info = tx_csum_info;
1985        }
1986
1987        return skb;
1988}
1989
1990static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1991{
1992        struct bcmgenet_priv *priv = netdev_priv(dev);
1993        struct device *kdev = &priv->pdev->dev;
1994        struct bcmgenet_tx_ring *ring = NULL;
1995        struct enet_cb *tx_cb_ptr;
1996        struct netdev_queue *txq;
1997        int nr_frags, index;
1998        dma_addr_t mapping;
1999        unsigned int size;
2000        skb_frag_t *frag;
2001        u32 len_stat;
2002        int ret;
2003        int i;
2004
2005        index = skb_get_queue_mapping(skb);
2006        /* Mapping strategy:
2007         * queue_mapping = 0, unclassified, packet xmited through ring16
2008         * queue_mapping = 1, goes to ring 0. (highest priority queue
2009         * queue_mapping = 2, goes to ring 1.
2010         * queue_mapping = 3, goes to ring 2.
2011         * queue_mapping = 4, goes to ring 3.
2012         */
2013        if (index == 0)
2014                index = DESC_INDEX;
2015        else
2016                index -= 1;
2017
2018        ring = &priv->tx_rings[index];
2019        txq = netdev_get_tx_queue(dev, ring->queue);
2020
2021        nr_frags = skb_shinfo(skb)->nr_frags;
2022
2023        spin_lock(&ring->lock);
2024        if (ring->free_bds <= (nr_frags + 1)) {
2025                if (!netif_tx_queue_stopped(txq)) {
2026                        netif_tx_stop_queue(txq);
2027                        netdev_err(dev,
2028                                   "%s: tx ring %d full when queue %d awake\n",
2029                                   __func__, index, ring->queue);
2030                }
2031                ret = NETDEV_TX_BUSY;
2032                goto out;
2033        }
2034
2035        /* Retain how many bytes will be sent on the wire, without TSB inserted
2036         * by transmit checksum offload
2037         */
2038        GENET_CB(skb)->bytes_sent = skb->len;
2039
2040        /* add the Transmit Status Block */
2041        skb = bcmgenet_add_tsb(dev, skb);
2042        if (!skb) {
2043                ret = NETDEV_TX_OK;
2044                goto out;
2045        }
2046
2047        for (i = 0; i <= nr_frags; i++) {
2048                tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
2049
2050                BUG_ON(!tx_cb_ptr);
2051
2052                if (!i) {
2053                        /* Transmit single SKB or head of fragment list */
2054                        GENET_CB(skb)->first_cb = tx_cb_ptr;
2055                        size = skb_headlen(skb);
2056                        mapping = dma_map_single(kdev, skb->data, size,
2057                                                 DMA_TO_DEVICE);
2058                } else {
2059                        /* xmit fragment */
2060                        frag = &skb_shinfo(skb)->frags[i - 1];
2061                        size = skb_frag_size(frag);
2062                        mapping = skb_frag_dma_map(kdev, frag, 0, size,
2063                                                   DMA_TO_DEVICE);
2064                }
2065
2066                ret = dma_mapping_error(kdev, mapping);
2067                if (ret) {
2068                        priv->mib.tx_dma_failed++;
2069                        netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
2070                        ret = NETDEV_TX_OK;
2071                        goto out_unmap_frags;
2072                }
2073                dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
2074                dma_unmap_len_set(tx_cb_ptr, dma_len, size);
2075
2076                tx_cb_ptr->skb = skb;
2077
2078                len_stat = (size << DMA_BUFLENGTH_SHIFT) |
2079                           (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
2080
2081                /* Note: if we ever change from DMA_TX_APPEND_CRC below we
2082                 * will need to restore software padding of "runt" packets
2083                 */
2084                if (!i) {
2085                        len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
2086                        if (skb->ip_summed == CHECKSUM_PARTIAL)
2087                                len_stat |= DMA_TX_DO_CSUM;
2088                }
2089                if (i == nr_frags)
2090                        len_stat |= DMA_EOP;
2091
2092                dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
2093        }
2094
2095        GENET_CB(skb)->last_cb = tx_cb_ptr;
2096        skb_tx_timestamp(skb);
2097
2098        /* Decrement total BD count and advance our write pointer */
2099        ring->free_bds -= nr_frags + 1;
2100        ring->prod_index += nr_frags + 1;
2101        ring->prod_index &= DMA_P_INDEX_MASK;
2102
2103        netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
2104
2105        if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2106                netif_tx_stop_queue(txq);
2107
2108        if (!netdev_xmit_more() || netif_xmit_stopped(txq))
2109                /* Packets are ready, update producer index */
2110                bcmgenet_tdma_ring_writel(priv, ring->index,
2111                                          ring->prod_index, TDMA_PROD_INDEX);
2112out:
2113        spin_unlock(&ring->lock);
2114
2115        return ret;
2116
2117out_unmap_frags:
2118        /* Back up for failed control block mapping */
2119        bcmgenet_put_txcb(priv, ring);
2120
2121        /* Unmap successfully mapped control blocks */
2122        while (i-- > 0) {
2123                tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
2124                bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
2125        }
2126
2127        dev_kfree_skb(skb);
2128        goto out;
2129}
2130
2131static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
2132                                          struct enet_cb *cb)
2133{
2134        struct device *kdev = &priv->pdev->dev;
2135        struct sk_buff *skb;
2136        struct sk_buff *rx_skb;
2137        dma_addr_t mapping;
2138
2139        /* Allocate a new Rx skb */
2140        skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
2141                                 GFP_ATOMIC | __GFP_NOWARN);
2142        if (!skb) {
2143                priv->mib.alloc_rx_buff_failed++;
2144                netif_err(priv, rx_err, priv->dev,
2145                          "%s: Rx skb allocation failed\n", __func__);
2146                return NULL;
2147        }
2148
2149        /* DMA-map the new Rx skb */
2150        mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
2151                                 DMA_FROM_DEVICE);
2152        if (dma_mapping_error(kdev, mapping)) {
2153                priv->mib.rx_dma_failed++;
2154                dev_kfree_skb_any(skb);
2155                netif_err(priv, rx_err, priv->dev,
2156                          "%s: Rx skb DMA mapping failed\n", __func__);
2157                return NULL;
2158        }
2159
2160        /* Grab the current Rx skb from the ring and DMA-unmap it */
2161        rx_skb = bcmgenet_free_rx_cb(kdev, cb);
2162
2163        /* Put the new Rx skb on the ring */
2164        cb->skb = skb;
2165        dma_unmap_addr_set(cb, dma_addr, mapping);
2166        dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
2167        dmadesc_set_addr(priv, cb->bd_addr, mapping);
2168
2169        /* Return the current Rx skb to caller */
2170        return rx_skb;
2171}
2172
2173/* bcmgenet_desc_rx - descriptor based rx process.
2174 * this could be called from bottom half, or from NAPI polling method.
2175 */
2176static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
2177                                     unsigned int budget)
2178{
2179        struct bcmgenet_priv *priv = ring->priv;
2180        struct net_device *dev = priv->dev;
2181        struct enet_cb *cb;
2182        struct sk_buff *skb;
2183        u32 dma_length_status;
2184        unsigned long dma_flag;
2185        int len;
2186        unsigned int rxpktprocessed = 0, rxpkttoprocess;
2187        unsigned int bytes_processed = 0;
2188        unsigned int p_index, mask;
2189        unsigned int discards;
2190
2191        /* Clear status before servicing to reduce spurious interrupts */
2192        if (ring->index == DESC_INDEX) {
2193                bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
2194                                         INTRL2_CPU_CLEAR);
2195        } else {
2196                mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
2197                bcmgenet_intrl2_1_writel(priv,
2198                                         mask,
2199                                         INTRL2_CPU_CLEAR);
2200        }
2201
2202        p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
2203
2204        discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
2205                   DMA_P_INDEX_DISCARD_CNT_MASK;
2206        if (discards > ring->old_discards) {
2207                discards = discards - ring->old_discards;
2208                ring->errors += discards;
2209                ring->old_discards += discards;
2210
2211                /* Clear HW register when we reach 75% of maximum 0xFFFF */
2212                if (ring->old_discards >= 0xC000) {
2213                        ring->old_discards = 0;
2214                        bcmgenet_rdma_ring_writel(priv, ring->index, 0,
2215                                                  RDMA_PROD_INDEX);
2216                }
2217        }
2218
2219        p_index &= DMA_P_INDEX_MASK;
2220        rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
2221
2222        netif_dbg(priv, rx_status, dev,
2223                  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
2224
2225        while ((rxpktprocessed < rxpkttoprocess) &&
2226               (rxpktprocessed < budget)) {
2227                struct status_64 *status;
2228                __be16 rx_csum;
2229
2230                cb = &priv->rx_cbs[ring->read_ptr];
2231                skb = bcmgenet_rx_refill(priv, cb);
2232
2233                if (unlikely(!skb)) {
2234                        ring->dropped++;
2235                        goto next;
2236                }
2237
2238                status = (struct status_64 *)skb->data;
2239                dma_length_status = status->length_status;
2240                if (dev->features & NETIF_F_RXCSUM) {
2241                        rx_csum = (__force __be16)(status->rx_csum & 0xffff);
2242                        skb->csum = (__force __wsum)ntohs(rx_csum);
2243                        skb->ip_summed = CHECKSUM_COMPLETE;
2244                }
2245
2246                /* DMA flags and length are still valid no matter how
2247                 * we got the Receive Status Vector (64B RSB or register)
2248                 */
2249                dma_flag = dma_length_status & 0xffff;
2250                len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
2251
2252                netif_dbg(priv, rx_status, dev,
2253                          "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
2254                          __func__, p_index, ring->c_index,
2255                          ring->read_ptr, dma_length_status);
2256
2257                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
2258                        netif_err(priv, rx_status, dev,
2259                                  "dropping fragmented packet!\n");
2260                        ring->errors++;
2261                        dev_kfree_skb_any(skb);
2262                        goto next;
2263                }
2264
2265                /* report errors */
2266                if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
2267                                                DMA_RX_OV |
2268                                                DMA_RX_NO |
2269                                                DMA_RX_LG |
2270                                                DMA_RX_RXER))) {
2271                        netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
2272                                  (unsigned int)dma_flag);
2273                        if (dma_flag & DMA_RX_CRC_ERROR)
2274                                dev->stats.rx_crc_errors++;
2275                        if (dma_flag & DMA_RX_OV)
2276                                dev->stats.rx_over_errors++;
2277                        if (dma_flag & DMA_RX_NO)
2278                                dev->stats.rx_frame_errors++;
2279                        if (dma_flag & DMA_RX_LG)
2280                                dev->stats.rx_length_errors++;
2281                        dev->stats.rx_errors++;
2282                        dev_kfree_skb_any(skb);
2283                        goto next;
2284                } /* error packet */
2285
2286                skb_put(skb, len);
2287
2288                /* remove RSB and hardware 2bytes added for IP alignment */
2289                skb_pull(skb, 66);
2290                len -= 66;
2291
2292                if (priv->crc_fwd_en) {
2293                        skb_trim(skb, len - ETH_FCS_LEN);
2294                        len -= ETH_FCS_LEN;
2295                }
2296
2297                bytes_processed += len;
2298
2299                /*Finish setting up the received SKB and send it to the kernel*/
2300                skb->protocol = eth_type_trans(skb, priv->dev);
2301                ring->packets++;
2302                ring->bytes += len;
2303                if (dma_flag & DMA_RX_MULT)
2304                        dev->stats.multicast++;
2305
2306                /* Notify kernel */
2307                napi_gro_receive(&ring->napi, skb);
2308                netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
2309
2310next:
2311                rxpktprocessed++;
2312                if (likely(ring->read_ptr < ring->end_ptr))
2313                        ring->read_ptr++;
2314                else
2315                        ring->read_ptr = ring->cb_ptr;
2316
2317                ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
2318                bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
2319        }
2320
2321        ring->dim.bytes = bytes_processed;
2322        ring->dim.packets = rxpktprocessed;
2323
2324        return rxpktprocessed;
2325}
2326
2327/* Rx NAPI polling method */
2328static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
2329{
2330        struct bcmgenet_rx_ring *ring = container_of(napi,
2331                        struct bcmgenet_rx_ring, napi);
2332        struct dim_sample dim_sample = {};
2333        unsigned int work_done;
2334
2335        work_done = bcmgenet_desc_rx(ring, budget);
2336
2337        if (work_done < budget) {
2338                napi_complete_done(napi, work_done);
2339                ring->int_enable(ring);
2340        }
2341
2342        if (ring->dim.use_dim) {
2343                dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2344                                  ring->dim.bytes, &dim_sample);
2345                net_dim(&ring->dim.dim, dim_sample);
2346        }
2347
2348        return work_done;
2349}
2350
2351static void bcmgenet_dim_work(struct work_struct *work)
2352{
2353        struct dim *dim = container_of(work, struct dim, work);
2354        struct bcmgenet_net_dim *ndim =
2355                        container_of(dim, struct bcmgenet_net_dim, dim);
2356        struct bcmgenet_rx_ring *ring =
2357                        container_of(ndim, struct bcmgenet_rx_ring, dim);
2358        struct dim_cq_moder cur_profile =
2359                        net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
2360
2361        bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
2362        dim->state = DIM_START_MEASURE;
2363}
2364
2365/* Assign skb to RX DMA descriptor. */
2366static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
2367                                     struct bcmgenet_rx_ring *ring)
2368{
2369        struct enet_cb *cb;
2370        struct sk_buff *skb;
2371        int i;
2372
2373        netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2374
2375        /* loop here for each buffer needing assign */
2376        for (i = 0; i < ring->size; i++) {
2377                cb = ring->cbs + i;
2378                skb = bcmgenet_rx_refill(priv, cb);
2379                if (skb)
2380                        dev_consume_skb_any(skb);
2381                if (!cb->skb)
2382                        return -ENOMEM;
2383        }
2384
2385        return 0;
2386}
2387
2388static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
2389{
2390        struct sk_buff *skb;
2391        struct enet_cb *cb;
2392        int i;
2393
2394        for (i = 0; i < priv->num_rx_bds; i++) {
2395                cb = &priv->rx_cbs[i];
2396
2397                skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
2398                if (skb)
2399                        dev_consume_skb_any(skb);
2400        }
2401}
2402
2403static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
2404{
2405        u32 reg;
2406
2407        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2408        if (reg & CMD_SW_RESET)
2409                return;
2410        if (enable)
2411                reg |= mask;
2412        else
2413                reg &= ~mask;
2414        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2415
2416        /* UniMAC stops on a packet boundary, wait for a full-size packet
2417         * to be processed
2418         */
2419        if (enable == 0)
2420                usleep_range(1000, 2000);
2421}
2422
2423static void reset_umac(struct bcmgenet_priv *priv)
2424{
2425        /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
2426        bcmgenet_rbuf_ctrl_set(priv, 0);
2427        udelay(10);
2428
2429        /* issue soft reset and disable MAC while updating its registers */
2430        bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
2431        udelay(2);
2432}
2433
2434static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
2435{
2436        /* Mask all interrupts.*/
2437        bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2438        bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2439        bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2440        bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2441}
2442
2443static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
2444{
2445        u32 int0_enable = 0;
2446
2447        /* Monitor cable plug/unplugged event for internal PHY, external PHY
2448         * and MoCA PHY
2449         */
2450        if (priv->internal_phy) {
2451                int0_enable |= UMAC_IRQ_LINK_EVENT;
2452                if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2453                        int0_enable |= UMAC_IRQ_PHY_DET_R;
2454        } else if (priv->ext_phy) {
2455                int0_enable |= UMAC_IRQ_LINK_EVENT;
2456        } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2457                if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
2458                        int0_enable |= UMAC_IRQ_LINK_EVENT;
2459        }
2460        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2461}
2462
2463static void init_umac(struct bcmgenet_priv *priv)
2464{
2465        struct device *kdev = &priv->pdev->dev;
2466        u32 reg;
2467        u32 int0_enable = 0;
2468
2469        dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2470
2471        reset_umac(priv);
2472
2473        /* clear tx/rx counter */
2474        bcmgenet_umac_writel(priv,
2475                             MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2476                             UMAC_MIB_CTRL);
2477        bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2478
2479        bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2480
2481        /* init tx registers, enable TSB */
2482        reg = bcmgenet_tbuf_ctrl_get(priv);
2483        reg |= TBUF_64B_EN;
2484        bcmgenet_tbuf_ctrl_set(priv, reg);
2485
2486        /* init rx registers, enable ip header optimization and RSB */
2487        reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2488        reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
2489        bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2490
2491        /* enable rx checksumming */
2492        reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
2493        reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
2494        /* If UniMAC forwards CRC, we need to skip over it to get
2495         * a valid CHK bit to be set in the per-packet status word
2496         */
2497        if (priv->crc_fwd_en)
2498                reg |= RBUF_SKIP_FCS;
2499        else
2500                reg &= ~RBUF_SKIP_FCS;
2501        bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
2502
2503        if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2504                bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2505
2506        bcmgenet_intr_disable(priv);
2507
2508        /* Configure backpressure vectors for MoCA */
2509        if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2510                reg = bcmgenet_bp_mc_get(priv);
2511                reg |= BIT(priv->hw_params->bp_in_en_shift);
2512
2513                /* bp_mask: back pressure mask */
2514                if (netif_is_multiqueue(priv->dev))
2515                        reg |= priv->hw_params->bp_in_mask;
2516                else
2517                        reg &= ~priv->hw_params->bp_in_mask;
2518                bcmgenet_bp_mc_set(priv, reg);
2519        }
2520
2521        /* Enable MDIO interrupts on GENET v3+ */
2522        if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2523                int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2524
2525        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2526
2527        dev_dbg(kdev, "done init umac\n");
2528}
2529
2530static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2531                              void (*cb)(struct work_struct *work))
2532{
2533        struct bcmgenet_net_dim *dim = &ring->dim;
2534
2535        INIT_WORK(&dim->dim.work, cb);
2536        dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2537        dim->event_ctr = 0;
2538        dim->packets = 0;
2539        dim->bytes = 0;
2540}
2541
2542static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2543{
2544        struct bcmgenet_net_dim *dim = &ring->dim;
2545        struct dim_cq_moder moder;
2546        u32 usecs, pkts;
2547
2548        usecs = ring->rx_coalesce_usecs;
2549        pkts = ring->rx_max_coalesced_frames;
2550
2551        /* If DIM was enabled, re-apply default parameters */
2552        if (dim->use_dim) {
2553                moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2554                usecs = moder.usec;
2555                pkts = moder.pkts;
2556        }
2557
2558        bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2559}
2560
2561/* Initialize a Tx ring along with corresponding hardware registers */
2562static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2563                                  unsigned int index, unsigned int size,
2564                                  unsigned int start_ptr, unsigned int end_ptr)
2565{
2566        struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2567        u32 words_per_bd = WORDS_PER_BD(priv);
2568        u32 flow_period_val = 0;
2569
2570        spin_lock_init(&ring->lock);
2571        ring->priv = priv;
2572        ring->index = index;
2573        if (index == DESC_INDEX) {
2574                ring->queue = 0;
2575                ring->int_enable = bcmgenet_tx_ring16_int_enable;
2576                ring->int_disable = bcmgenet_tx_ring16_int_disable;
2577        } else {
2578                ring->queue = index + 1;
2579                ring->int_enable = bcmgenet_tx_ring_int_enable;
2580                ring->int_disable = bcmgenet_tx_ring_int_disable;
2581        }
2582        ring->cbs = priv->tx_cbs + start_ptr;
2583        ring->size = size;
2584        ring->clean_ptr = start_ptr;
2585        ring->c_index = 0;
2586        ring->free_bds = size;
2587        ring->write_ptr = start_ptr;
2588        ring->cb_ptr = start_ptr;
2589        ring->end_ptr = end_ptr - 1;
2590        ring->prod_index = 0;
2591
2592        /* Set flow period for ring != 16 */
2593        if (index != DESC_INDEX)
2594                flow_period_val = ENET_MAX_MTU_SIZE << 16;
2595
2596        bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2597        bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2598        bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2599        /* Disable rate control for now */
2600        bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2601                                  TDMA_FLOW_PERIOD);
2602        bcmgenet_tdma_ring_writel(priv, index,
2603                                  ((size << DMA_RING_SIZE_SHIFT) |
2604                                   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2605
2606        /* Set start and end address, read and write pointers */
2607        bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2608                                  DMA_START_ADDR);
2609        bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2610                                  TDMA_READ_PTR);
2611        bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2612                                  TDMA_WRITE_PTR);
2613        bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2614                                  DMA_END_ADDR);
2615
2616        /* Initialize Tx NAPI */
2617        netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
2618                          NAPI_POLL_WEIGHT);
2619}
2620
2621/* Initialize a RDMA ring */
2622static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2623                                 unsigned int index, unsigned int size,
2624                                 unsigned int start_ptr, unsigned int end_ptr)
2625{
2626        struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2627        u32 words_per_bd = WORDS_PER_BD(priv);
2628        int ret;
2629
2630        ring->priv = priv;
2631        ring->index = index;
2632        if (index == DESC_INDEX) {
2633                ring->int_enable = bcmgenet_rx_ring16_int_enable;
2634                ring->int_disable = bcmgenet_rx_ring16_int_disable;
2635        } else {
2636                ring->int_enable = bcmgenet_rx_ring_int_enable;
2637                ring->int_disable = bcmgenet_rx_ring_int_disable;
2638        }
2639        ring->cbs = priv->rx_cbs + start_ptr;
2640        ring->size = size;
2641        ring->c_index = 0;
2642        ring->read_ptr = start_ptr;
2643        ring->cb_ptr = start_ptr;
2644        ring->end_ptr = end_ptr - 1;
2645
2646        ret = bcmgenet_alloc_rx_buffers(priv, ring);
2647        if (ret)
2648                return ret;
2649
2650        bcmgenet_init_dim(ring, bcmgenet_dim_work);
2651        bcmgenet_init_rx_coalesce(ring);
2652
2653        /* Initialize Rx NAPI */
2654        netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
2655                       NAPI_POLL_WEIGHT);
2656
2657        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2658        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2659        bcmgenet_rdma_ring_writel(priv, index,
2660                                  ((size << DMA_RING_SIZE_SHIFT) |
2661                                   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2662        bcmgenet_rdma_ring_writel(priv, index,
2663                                  (DMA_FC_THRESH_LO <<
2664                                   DMA_XOFF_THRESHOLD_SHIFT) |
2665                                   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2666
2667        /* Set start and end address, read and write pointers */
2668        bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2669                                  DMA_START_ADDR);
2670        bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2671                                  RDMA_READ_PTR);
2672        bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2673                                  RDMA_WRITE_PTR);
2674        bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2675                                  DMA_END_ADDR);
2676
2677        return ret;
2678}
2679
2680static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2681{
2682        unsigned int i;
2683        struct bcmgenet_tx_ring *ring;
2684
2685        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2686                ring = &priv->tx_rings[i];
2687                napi_enable(&ring->napi);
2688                ring->int_enable(ring);
2689        }
2690
2691        ring = &priv->tx_rings[DESC_INDEX];
2692        napi_enable(&ring->napi);
2693        ring->int_enable(ring);
2694}
2695
2696static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2697{
2698        unsigned int i;
2699        struct bcmgenet_tx_ring *ring;
2700
2701        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2702                ring = &priv->tx_rings[i];
2703                napi_disable(&ring->napi);
2704        }
2705
2706        ring = &priv->tx_rings[DESC_INDEX];
2707        napi_disable(&ring->napi);
2708}
2709
2710static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2711{
2712        unsigned int i;
2713        struct bcmgenet_tx_ring *ring;
2714
2715        for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2716                ring = &priv->tx_rings[i];
2717                netif_napi_del(&ring->napi);
2718        }
2719
2720        ring = &priv->tx_rings[DESC_INDEX];
2721        netif_napi_del(&ring->napi);
2722}
2723
2724/* Initialize Tx queues
2725 *
2726 * Queues 0-3 are priority-based, each one has 32 descriptors,
2727 * with queue 0 being the highest priority queue.
2728 *
2729 * Queue 16 is the default Tx queue with
2730 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2731 *
2732 * The transmit control block pool is then partitioned as follows:
2733 * - Tx queue 0 uses tx_cbs[0..31]
2734 * - Tx queue 1 uses tx_cbs[32..63]
2735 * - Tx queue 2 uses tx_cbs[64..95]
2736 * - Tx queue 3 uses tx_cbs[96..127]
2737 * - Tx queue 16 uses tx_cbs[128..255]
2738 */
2739static void bcmgenet_init_tx_queues(struct net_device *dev)
2740{
2741        struct bcmgenet_priv *priv = netdev_priv(dev);
2742        u32 i, dma_enable;
2743        u32 dma_ctrl, ring_cfg;
2744        u32 dma_priority[3] = {0, 0, 0};
2745
2746        dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2747        dma_enable = dma_ctrl & DMA_EN;
2748        dma_ctrl &= ~DMA_EN;
2749        bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2750
2751        dma_ctrl = 0;
2752        ring_cfg = 0;
2753
2754        /* Enable strict priority arbiter mode */
2755        bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2756
2757        /* Initialize Tx priority queues */
2758        for (i = 0; i < priv->hw_params->tx_queues; i++) {
2759                bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2760                                      i * priv->hw_params->tx_bds_per_q,
2761                                      (i + 1) * priv->hw_params->tx_bds_per_q);
2762                ring_cfg |= (1 << i);
2763                dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2764                dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2765                        ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2766        }
2767
2768        /* Initialize Tx default queue 16 */
2769        bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2770                              priv->hw_params->tx_queues *
2771                              priv->hw_params->tx_bds_per_q,
2772                              TOTAL_DESC);
2773        ring_cfg |= (1 << DESC_INDEX);
2774        dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2775        dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2776                ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2777                 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2778
2779        /* Set Tx queue priorities */
2780        bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2781        bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2782        bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2783
2784        /* Enable Tx queues */
2785        bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2786
2787        /* Enable Tx DMA */
2788        if (dma_enable)
2789                dma_ctrl |= DMA_EN;
2790        bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2791}
2792
2793static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2794{
2795        unsigned int i;
2796        struct bcmgenet_rx_ring *ring;
2797
2798        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2799                ring = &priv->rx_rings[i];
2800                napi_enable(&ring->napi);
2801                ring->int_enable(ring);
2802        }
2803
2804        ring = &priv->rx_rings[DESC_INDEX];
2805        napi_enable(&ring->napi);
2806        ring->int_enable(ring);
2807}
2808
2809static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2810{
2811        unsigned int i;
2812        struct bcmgenet_rx_ring *ring;
2813
2814        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2815                ring = &priv->rx_rings[i];
2816                napi_disable(&ring->napi);
2817                cancel_work_sync(&ring->dim.dim.work);
2818        }
2819
2820        ring = &priv->rx_rings[DESC_INDEX];
2821        napi_disable(&ring->napi);
2822        cancel_work_sync(&ring->dim.dim.work);
2823}
2824
2825static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2826{
2827        unsigned int i;
2828        struct bcmgenet_rx_ring *ring;
2829
2830        for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2831                ring = &priv->rx_rings[i];
2832                netif_napi_del(&ring->napi);
2833        }
2834
2835        ring = &priv->rx_rings[DESC_INDEX];
2836        netif_napi_del(&ring->napi);
2837}
2838
2839/* Initialize Rx queues
2840 *
2841 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2842 * used to direct traffic to these queues.
2843 *
2844 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2845 */
2846static int bcmgenet_init_rx_queues(struct net_device *dev)
2847{
2848        struct bcmgenet_priv *priv = netdev_priv(dev);
2849        u32 i;
2850        u32 dma_enable;
2851        u32 dma_ctrl;
2852        u32 ring_cfg;
2853        int ret;
2854
2855        dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2856        dma_enable = dma_ctrl & DMA_EN;
2857        dma_ctrl &= ~DMA_EN;
2858        bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2859
2860        dma_ctrl = 0;
2861        ring_cfg = 0;
2862
2863        /* Initialize Rx priority queues */
2864        for (i = 0; i < priv->hw_params->rx_queues; i++) {
2865                ret = bcmgenet_init_rx_ring(priv, i,
2866                                            priv->hw_params->rx_bds_per_q,
2867                                            i * priv->hw_params->rx_bds_per_q,
2868                                            (i + 1) *
2869                                            priv->hw_params->rx_bds_per_q);
2870                if (ret)
2871                        return ret;
2872
2873                ring_cfg |= (1 << i);
2874                dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2875        }
2876
2877        /* Initialize Rx default queue 16 */
2878        ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2879                                    priv->hw_params->rx_queues *
2880                                    priv->hw_params->rx_bds_per_q,
2881                                    TOTAL_DESC);
2882        if (ret)
2883                return ret;
2884
2885        ring_cfg |= (1 << DESC_INDEX);
2886        dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2887
2888        /* Enable rings */
2889        bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2890
2891        /* Configure ring as descriptor ring and re-enable DMA if enabled */
2892        if (dma_enable)
2893                dma_ctrl |= DMA_EN;
2894        bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2895
2896        return 0;
2897}
2898
2899static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2900{
2901        int ret = 0;
2902        int timeout = 0;
2903        u32 reg;
2904        u32 dma_ctrl;
2905        int i;
2906
2907        /* Disable TDMA to stop add more frames in TX DMA */
2908        reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2909        reg &= ~DMA_EN;
2910        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2911
2912        /* Check TDMA status register to confirm TDMA is disabled */
2913        while (timeout++ < DMA_TIMEOUT_VAL) {
2914                reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2915                if (reg & DMA_DISABLED)
2916                        break;
2917
2918                udelay(1);
2919        }
2920
2921        if (timeout == DMA_TIMEOUT_VAL) {
2922                netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2923                ret = -ETIMEDOUT;
2924        }
2925
2926        /* Wait 10ms for packet drain in both tx and rx dma */
2927        usleep_range(10000, 20000);
2928
2929        /* Disable RDMA */
2930        reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2931        reg &= ~DMA_EN;
2932        bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2933
2934        timeout = 0;
2935        /* Check RDMA status register to confirm RDMA is disabled */
2936        while (timeout++ < DMA_TIMEOUT_VAL) {
2937                reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2938                if (reg & DMA_DISABLED)
2939                        break;
2940
2941                udelay(1);
2942        }
2943
2944        if (timeout == DMA_TIMEOUT_VAL) {
2945                netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2946                ret = -ETIMEDOUT;
2947        }
2948
2949        dma_ctrl = 0;
2950        for (i = 0; i < priv->hw_params->rx_queues; i++)
2951                dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2952        reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2953        reg &= ~dma_ctrl;
2954        bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2955
2956        dma_ctrl = 0;
2957        for (i = 0; i < priv->hw_params->tx_queues; i++)
2958                dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2959        reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2960        reg &= ~dma_ctrl;
2961        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2962
2963        return ret;
2964}
2965
2966static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2967{
2968        struct netdev_queue *txq;
2969        int i;
2970
2971        bcmgenet_fini_rx_napi(priv);
2972        bcmgenet_fini_tx_napi(priv);
2973
2974        for (i = 0; i < priv->num_tx_bds; i++)
2975                dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
2976                                                  priv->tx_cbs + i));
2977
2978        for (i = 0; i < priv->hw_params->tx_queues; i++) {
2979                txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
2980                netdev_tx_reset_queue(txq);
2981        }
2982
2983        txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
2984        netdev_tx_reset_queue(txq);
2985
2986        bcmgenet_free_rx_buffers(priv);
2987        kfree(priv->rx_cbs);
2988        kfree(priv->tx_cbs);
2989}
2990
2991/* init_edma: Initialize DMA control register */
2992static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2993{
2994        int ret;
2995        unsigned int i;
2996        struct enet_cb *cb;
2997
2998        netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2999
3000        /* Initialize common Rx ring structures */
3001        priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
3002        priv->num_rx_bds = TOTAL_DESC;
3003        priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
3004                               GFP_KERNEL);
3005        if (!priv->rx_cbs)
3006                return -ENOMEM;
3007
3008        for (i = 0; i < priv->num_rx_bds; i++) {
3009                cb = priv->rx_cbs + i;
3010                cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
3011        }
3012
3013        /* Initialize common TX ring structures */
3014        priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
3015        priv->num_tx_bds = TOTAL_DESC;
3016        priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
3017                               GFP_KERNEL);
3018        if (!priv->tx_cbs) {
3019                kfree(priv->rx_cbs);
3020                return -ENOMEM;
3021        }
3022
3023        for (i = 0; i < priv->num_tx_bds; i++) {
3024                cb = priv->tx_cbs + i;
3025                cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
3026        }
3027
3028        /* Init rDma */
3029        bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
3030                             DMA_SCB_BURST_SIZE);
3031
3032        /* Initialize Rx queues */
3033        ret = bcmgenet_init_rx_queues(priv->dev);
3034        if (ret) {
3035                netdev_err(priv->dev, "failed to initialize Rx queues\n");
3036                bcmgenet_free_rx_buffers(priv);
3037                kfree(priv->rx_cbs);
3038                kfree(priv->tx_cbs);
3039                return ret;
3040        }
3041
3042        /* Init tDma */
3043        bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
3044                             DMA_SCB_BURST_SIZE);
3045
3046        /* Initialize Tx queues */
3047        bcmgenet_init_tx_queues(priv->dev);
3048
3049        return 0;
3050}
3051
3052/* Interrupt bottom half */
3053static void bcmgenet_irq_task(struct work_struct *work)
3054{
3055        unsigned int status;
3056        struct bcmgenet_priv *priv = container_of(
3057                        work, struct bcmgenet_priv, bcmgenet_irq_work);
3058
3059        netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
3060
3061        spin_lock_irq(&priv->lock);
3062        status = priv->irq0_stat;
3063        priv->irq0_stat = 0;
3064        spin_unlock_irq(&priv->lock);
3065
3066        if (status & UMAC_IRQ_PHY_DET_R &&
3067            priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
3068                phy_init_hw(priv->dev->phydev);
3069                genphy_config_aneg(priv->dev->phydev);
3070        }
3071
3072        /* Link UP/DOWN event */
3073        if (status & UMAC_IRQ_LINK_EVENT)
3074                phy_mac_interrupt(priv->dev->phydev);
3075
3076}
3077
3078/* bcmgenet_isr1: handle Rx and Tx priority queues */
3079static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
3080{
3081        struct bcmgenet_priv *priv = dev_id;
3082        struct bcmgenet_rx_ring *rx_ring;
3083        struct bcmgenet_tx_ring *tx_ring;
3084        unsigned int index, status;
3085
3086        /* Read irq status */
3087        status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
3088                ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3089
3090        /* clear interrupts */
3091        bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
3092
3093        netif_dbg(priv, intr, priv->dev,
3094                  "%s: IRQ=0x%x\n", __func__, status);
3095
3096        /* Check Rx priority queue interrupts */
3097        for (index = 0; index < priv->hw_params->rx_queues; index++) {
3098                if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
3099                        continue;
3100
3101                rx_ring = &priv->rx_rings[index];
3102                rx_ring->dim.event_ctr++;
3103
3104                if (likely(napi_schedule_prep(&rx_ring->napi))) {
3105                        rx_ring->int_disable(rx_ring);
3106                        __napi_schedule_irqoff(&rx_ring->napi);
3107                }
3108        }
3109
3110        /* Check Tx priority queue interrupts */
3111        for (index = 0; index < priv->hw_params->tx_queues; index++) {
3112                if (!(status & BIT(index)))
3113                        continue;
3114
3115                tx_ring = &priv->tx_rings[index];
3116
3117                if (likely(napi_schedule_prep(&tx_ring->napi))) {
3118                        tx_ring->int_disable(tx_ring);
3119                        __napi_schedule_irqoff(&tx_ring->napi);
3120                }
3121        }
3122
3123        return IRQ_HANDLED;
3124}
3125
3126/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
3127static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
3128{
3129        struct bcmgenet_priv *priv = dev_id;
3130        struct bcmgenet_rx_ring *rx_ring;
3131        struct bcmgenet_tx_ring *tx_ring;
3132        unsigned int status;
3133        unsigned long flags;
3134
3135        /* Read irq status */
3136        status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
3137                ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3138
3139        /* clear interrupts */
3140        bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
3141
3142        netif_dbg(priv, intr, priv->dev,
3143                  "IRQ=0x%x\n", status);
3144
3145        if (status & UMAC_IRQ_RXDMA_DONE) {
3146                rx_ring = &priv->rx_rings[DESC_INDEX];
3147                rx_ring->dim.event_ctr++;
3148
3149                if (likely(napi_schedule_prep(&rx_ring->napi))) {
3150                        rx_ring->int_disable(rx_ring);
3151                        __napi_schedule_irqoff(&rx_ring->napi);
3152                }
3153        }
3154
3155        if (status & UMAC_IRQ_TXDMA_DONE) {
3156                tx_ring = &priv->tx_rings[DESC_INDEX];
3157
3158                if (likely(napi_schedule_prep(&tx_ring->napi))) {
3159                        tx_ring->int_disable(tx_ring);
3160                        __napi_schedule_irqoff(&tx_ring->napi);
3161                }
3162        }
3163
3164        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
3165                status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
3166                wake_up(&priv->wq);
3167        }
3168
3169        /* all other interested interrupts handled in bottom half */
3170        status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
3171        if (status) {
3172                /* Save irq status for bottom-half processing. */
3173                spin_lock_irqsave(&priv->lock, flags);
3174                priv->irq0_stat |= status;
3175                spin_unlock_irqrestore(&priv->lock, flags);
3176
3177                schedule_work(&priv->bcmgenet_irq_work);
3178        }
3179
3180        return IRQ_HANDLED;
3181}
3182
3183static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
3184{
3185        /* Acknowledge the interrupt */
3186        return IRQ_HANDLED;
3187}
3188
3189#ifdef CONFIG_NET_POLL_CONTROLLER
3190static void bcmgenet_poll_controller(struct net_device *dev)
3191{
3192        struct bcmgenet_priv *priv = netdev_priv(dev);
3193
3194        /* Invoke the main RX/TX interrupt handler */
3195        disable_irq(priv->irq0);
3196        bcmgenet_isr0(priv->irq0, priv);
3197        enable_irq(priv->irq0);
3198
3199        /* And the interrupt handler for RX/TX priority queues */
3200        disable_irq(priv->irq1);
3201        bcmgenet_isr1(priv->irq1, priv);
3202        enable_irq(priv->irq1);
3203}
3204#endif
3205
3206static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
3207{
3208        u32 reg;
3209
3210        reg = bcmgenet_rbuf_ctrl_get(priv);
3211        reg |= BIT(1);
3212        bcmgenet_rbuf_ctrl_set(priv, reg);
3213        udelay(10);
3214
3215        reg &= ~BIT(1);
3216        bcmgenet_rbuf_ctrl_set(priv, reg);
3217        udelay(10);
3218}
3219
3220static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
3221                                 unsigned char *addr)
3222{
3223        bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
3224        bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
3225}
3226
3227static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
3228                                 unsigned char *addr)
3229{
3230        u32 addr_tmp;
3231
3232        addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
3233        put_unaligned_be32(addr_tmp, &addr[0]);
3234        addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
3235        put_unaligned_be16(addr_tmp, &addr[4]);
3236}
3237
3238/* Returns a reusable dma control register value */
3239static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
3240{
3241        unsigned int i;
3242        u32 reg;
3243        u32 dma_ctrl;
3244
3245        /* disable DMA */
3246        dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3247        for (i = 0; i < priv->hw_params->tx_queues; i++)
3248                dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3249        reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3250        reg &= ~dma_ctrl;
3251        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3252
3253        dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3254        for (i = 0; i < priv->hw_params->rx_queues; i++)
3255                dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3256        reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3257        reg &= ~dma_ctrl;
3258        bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3259
3260        bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
3261        udelay(10);
3262        bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
3263
3264        return dma_ctrl;
3265}
3266
3267static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
3268{
3269        u32 reg;
3270
3271        reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3272        reg |= dma_ctrl;
3273        bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3274
3275        reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3276        reg |= dma_ctrl;
3277        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3278}
3279
3280static void bcmgenet_netif_start(struct net_device *dev)
3281{
3282        struct bcmgenet_priv *priv = netdev_priv(dev);
3283
3284        /* Start the network engine */
3285        bcmgenet_set_rx_mode(dev);
3286        bcmgenet_enable_rx_napi(priv);
3287
3288        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
3289
3290        bcmgenet_enable_tx_napi(priv);
3291
3292        /* Monitor link interrupts now */
3293        bcmgenet_link_intr_enable(priv);
3294
3295        phy_start(dev->phydev);
3296}
3297
3298static int bcmgenet_open(struct net_device *dev)
3299{
3300        struct bcmgenet_priv *priv = netdev_priv(dev);
3301        unsigned long dma_ctrl;
3302        int ret;
3303
3304        netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
3305
3306        /* Turn on the clock */
3307        clk_prepare_enable(priv->clk);
3308
3309        /* If this is an internal GPHY, power it back on now, before UniMAC is
3310         * brought out of reset as absolutely no UniMAC activity is allowed
3311         */
3312        if (priv->internal_phy)
3313                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3314
3315        /* take MAC out of reset */
3316        bcmgenet_umac_reset(priv);
3317
3318        init_umac(priv);
3319
3320        /* Apply features again in case we changed them while interface was
3321         * down
3322         */
3323        bcmgenet_set_features(dev, dev->features);
3324
3325        bcmgenet_set_hw_addr(priv, dev->dev_addr);
3326
3327        /* Disable RX/TX DMA and flush TX queues */
3328        dma_ctrl = bcmgenet_dma_disable(priv);
3329
3330        /* Reinitialize TDMA and RDMA and SW housekeeping */
3331        ret = bcmgenet_init_dma(priv);
3332        if (ret) {
3333                netdev_err(dev, "failed to initialize DMA\n");
3334                goto err_clk_disable;
3335        }
3336
3337        /* Always enable ring 16 - descriptor ring */
3338        bcmgenet_enable_dma(priv, dma_ctrl);
3339
3340        /* HFB init */
3341        bcmgenet_hfb_init(priv);
3342
3343        ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
3344                          dev->name, priv);
3345        if (ret < 0) {
3346                netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
3347                goto err_fini_dma;
3348        }
3349
3350        ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
3351                          dev->name, priv);
3352        if (ret < 0) {
3353                netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
3354                goto err_irq0;
3355        }
3356
3357        ret = bcmgenet_mii_probe(dev);
3358        if (ret) {
3359                netdev_err(dev, "failed to connect to PHY\n");
3360                goto err_irq1;
3361        }
3362
3363        bcmgenet_netif_start(dev);
3364
3365        netif_tx_start_all_queues(dev);
3366
3367        return 0;
3368
3369err_irq1:
3370        free_irq(priv->irq1, priv);
3371err_irq0:
3372        free_irq(priv->irq0, priv);
3373err_fini_dma:
3374        bcmgenet_dma_teardown(priv);
3375        bcmgenet_fini_dma(priv);
3376err_clk_disable:
3377        if (priv->internal_phy)
3378                bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3379        clk_disable_unprepare(priv->clk);
3380        return ret;
3381}
3382
3383static void bcmgenet_netif_stop(struct net_device *dev)
3384{
3385        struct bcmgenet_priv *priv = netdev_priv(dev);
3386
3387        bcmgenet_disable_tx_napi(priv);
3388        netif_tx_disable(dev);
3389
3390        /* Disable MAC receive */
3391        umac_enable_set(priv, CMD_RX_EN, false);
3392
3393        bcmgenet_dma_teardown(priv);
3394
3395        /* Disable MAC transmit. TX DMA disabled must be done before this */
3396        umac_enable_set(priv, CMD_TX_EN, false);
3397
3398        phy_stop(dev->phydev);
3399        bcmgenet_disable_rx_napi(priv);
3400        bcmgenet_intr_disable(priv);
3401
3402        /* Wait for pending work items to complete. Since interrupts are
3403         * disabled no new work will be scheduled.
3404         */
3405        cancel_work_sync(&priv->bcmgenet_irq_work);
3406
3407        priv->old_link = -1;
3408        priv->old_speed = -1;
3409        priv->old_duplex = -1;
3410        priv->old_pause = -1;
3411
3412        /* tx reclaim */
3413        bcmgenet_tx_reclaim_all(dev);
3414        bcmgenet_fini_dma(priv);
3415}
3416
3417static int bcmgenet_close(struct net_device *dev)
3418{
3419        struct bcmgenet_priv *priv = netdev_priv(dev);
3420        int ret = 0;
3421
3422        netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
3423
3424        bcmgenet_netif_stop(dev);
3425
3426        /* Really kill the PHY state machine and disconnect from it */
3427        phy_disconnect(dev->phydev);
3428
3429        free_irq(priv->irq0, priv);
3430        free_irq(priv->irq1, priv);
3431
3432        if (priv->internal_phy)
3433                ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3434
3435        clk_disable_unprepare(priv->clk);
3436
3437        return ret;
3438}
3439
3440static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3441{
3442        struct bcmgenet_priv *priv = ring->priv;
3443        u32 p_index, c_index, intsts, intmsk;
3444        struct netdev_queue *txq;
3445        unsigned int free_bds;
3446        bool txq_stopped;
3447
3448        if (!netif_msg_tx_err(priv))
3449                return;
3450
3451        txq = netdev_get_tx_queue(priv->dev, ring->queue);
3452
3453        spin_lock(&ring->lock);
3454        if (ring->index == DESC_INDEX) {
3455                intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3456                intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
3457        } else {
3458                intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3459                intmsk = 1 << ring->index;
3460        }
3461        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3462        p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3463        txq_stopped = netif_tx_queue_stopped(txq);
3464        free_bds = ring->free_bds;
3465        spin_unlock(&ring->lock);
3466
3467        netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3468                  "TX queue status: %s, interrupts: %s\n"
3469                  "(sw)free_bds: %d (sw)size: %d\n"
3470                  "(sw)p_index: %d (hw)p_index: %d\n"
3471                  "(sw)c_index: %d (hw)c_index: %d\n"
3472                  "(sw)clean_p: %d (sw)write_p: %d\n"
3473                  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3474                  ring->index, ring->queue,
3475                  txq_stopped ? "stopped" : "active",
3476                  intsts & intmsk ? "enabled" : "disabled",
3477                  free_bds, ring->size,
3478                  ring->prod_index, p_index & DMA_P_INDEX_MASK,
3479                  ring->c_index, c_index & DMA_C_INDEX_MASK,
3480                  ring->clean_ptr, ring->write_ptr,
3481                  ring->cb_ptr, ring->end_ptr);
3482}
3483
3484static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3485{
3486        struct bcmgenet_priv *priv = netdev_priv(dev);
3487        u32 int0_enable = 0;
3488        u32 int1_enable = 0;
3489        unsigned int q;
3490
3491        netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3492
3493        for (q = 0; q < priv->hw_params->tx_queues; q++)
3494                bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3495        bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3496
3497        bcmgenet_tx_reclaim_all(dev);
3498
3499        for (q = 0; q < priv->hw_params->tx_queues; q++)
3500                int1_enable |= (1 << q);
3501
3502        int0_enable = UMAC_IRQ_TXDMA_DONE;
3503
3504        /* Re-enable TX interrupts if disabled */
3505        bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3506        bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3507
3508        netif_trans_update(dev);
3509
3510        dev->stats.tx_errors++;
3511
3512        netif_tx_wake_all_queues(dev);
3513}
3514
3515#define MAX_MDF_FILTER  17
3516
3517static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3518                                         unsigned char *addr,
3519                                         int *i)
3520{
3521        bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3522                             UMAC_MDF_ADDR + (*i * 4));
3523        bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3524                             addr[4] << 8 | addr[5],
3525                             UMAC_MDF_ADDR + ((*i + 1) * 4));
3526        *i += 2;
3527}
3528
3529static void bcmgenet_set_rx_mode(struct net_device *dev)
3530{
3531        struct bcmgenet_priv *priv = netdev_priv(dev);
3532        struct netdev_hw_addr *ha;
3533        int i, nfilter;
3534        u32 reg;
3535
3536        netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3537
3538        /* Number of filters needed */
3539        nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3540
3541        /*
3542         * Turn on promicuous mode for three scenarios
3543         * 1. IFF_PROMISC flag is set
3544         * 2. IFF_ALLMULTI flag is set
3545         * 3. The number of filters needed exceeds the number filters
3546         *    supported by the hardware.
3547        */
3548        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3549        if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3550            (nfilter > MAX_MDF_FILTER)) {
3551                reg |= CMD_PROMISC;
3552                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3553                bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3554                return;
3555        } else {
3556                reg &= ~CMD_PROMISC;
3557                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3558        }
3559
3560        /* update MDF filter */
3561        i = 0;
3562        /* Broadcast */
3563        bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3564        /* my own address.*/
3565        bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3566
3567        /* Unicast */
3568        netdev_for_each_uc_addr(ha, dev)
3569                bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3570
3571        /* Multicast */
3572        netdev_for_each_mc_addr(ha, dev)
3573                bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3574
3575        /* Enable filters */
3576        reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3577        bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3578}
3579
3580/* Set the hardware MAC address. */
3581static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3582{
3583        struct sockaddr *addr = p;
3584
3585        /* Setting the MAC address at the hardware level is not possible
3586         * without disabling the UniMAC RX/TX enable bits.
3587         */
3588        if (netif_running(dev))
3589                return -EBUSY;
3590
3591        ether_addr_copy(dev->dev_addr, addr->sa_data);
3592
3593        return 0;
3594}
3595
3596static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3597{
3598        struct bcmgenet_priv *priv = netdev_priv(dev);
3599        unsigned long tx_bytes = 0, tx_packets = 0;
3600        unsigned long rx_bytes = 0, rx_packets = 0;
3601        unsigned long rx_errors = 0, rx_dropped = 0;
3602        struct bcmgenet_tx_ring *tx_ring;
3603        struct bcmgenet_rx_ring *rx_ring;
3604        unsigned int q;
3605
3606        for (q = 0; q < priv->hw_params->tx_queues; q++) {
3607                tx_ring = &priv->tx_rings[q];
3608                tx_bytes += tx_ring->bytes;
3609                tx_packets += tx_ring->packets;
3610        }
3611        tx_ring = &priv->tx_rings[DESC_INDEX];
3612        tx_bytes += tx_ring->bytes;
3613        tx_packets += tx_ring->packets;
3614
3615        for (q = 0; q < priv->hw_params->rx_queues; q++) {
3616                rx_ring = &priv->rx_rings[q];
3617
3618                rx_bytes += rx_ring->bytes;
3619                rx_packets += rx_ring->packets;
3620                rx_errors += rx_ring->errors;
3621                rx_dropped += rx_ring->dropped;
3622        }
3623        rx_ring = &priv->rx_rings[DESC_INDEX];
3624        rx_bytes += rx_ring->bytes;
3625        rx_packets += rx_ring->packets;
3626        rx_errors += rx_ring->errors;
3627        rx_dropped += rx_ring->dropped;
3628
3629        dev->stats.tx_bytes = tx_bytes;
3630        dev->stats.tx_packets = tx_packets;
3631        dev->stats.rx_bytes = rx_bytes;
3632        dev->stats.rx_packets = rx_packets;
3633        dev->stats.rx_errors = rx_errors;
3634        dev->stats.rx_missed_errors = rx_errors;
3635        dev->stats.rx_dropped = rx_dropped;
3636        return &dev->stats;
3637}
3638
3639static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
3640{
3641        struct bcmgenet_priv *priv = netdev_priv(dev);
3642
3643        if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
3644            priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
3645                return -EOPNOTSUPP;
3646
3647        if (new_carrier)
3648                netif_carrier_on(dev);
3649        else
3650                netif_carrier_off(dev);
3651
3652        return 0;
3653}
3654
3655static const struct net_device_ops bcmgenet_netdev_ops = {
3656        .ndo_open               = bcmgenet_open,
3657        .ndo_stop               = bcmgenet_close,
3658        .ndo_start_xmit         = bcmgenet_xmit,
3659        .ndo_tx_timeout         = bcmgenet_timeout,
3660        .ndo_set_rx_mode        = bcmgenet_set_rx_mode,
3661        .ndo_set_mac_address    = bcmgenet_set_mac_addr,
3662        .ndo_do_ioctl           = phy_do_ioctl_running,
3663        .ndo_set_features       = bcmgenet_set_features,
3664#ifdef CONFIG_NET_POLL_CONTROLLER
3665        .ndo_poll_controller    = bcmgenet_poll_controller,
3666#endif
3667        .ndo_get_stats          = bcmgenet_get_stats,
3668        .ndo_change_carrier     = bcmgenet_change_carrier,
3669};
3670
3671/* Array of GENET hardware parameters/characteristics */
3672static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3673        [GENET_V1] = {
3674                .tx_queues = 0,
3675                .tx_bds_per_q = 0,
3676                .rx_queues = 0,
3677                .rx_bds_per_q = 0,
3678                .bp_in_en_shift = 16,
3679                .bp_in_mask = 0xffff,
3680                .hfb_filter_cnt = 16,
3681                .qtag_mask = 0x1F,
3682                .hfb_offset = 0x1000,
3683                .rdma_offset = 0x2000,
3684                .tdma_offset = 0x3000,
3685                .words_per_bd = 2,
3686        },
3687        [GENET_V2] = {
3688                .tx_queues = 4,
3689                .tx_bds_per_q = 32,
3690                .rx_queues = 0,
3691                .rx_bds_per_q = 0,
3692                .bp_in_en_shift = 16,
3693                .bp_in_mask = 0xffff,
3694                .hfb_filter_cnt = 16,
3695                .qtag_mask = 0x1F,
3696                .tbuf_offset = 0x0600,
3697                .hfb_offset = 0x1000,
3698                .hfb_reg_offset = 0x2000,
3699                .rdma_offset = 0x3000,
3700                .tdma_offset = 0x4000,
3701                .words_per_bd = 2,
3702                .flags = GENET_HAS_EXT,
3703        },
3704        [GENET_V3] = {
3705                .tx_queues = 4,
3706                .tx_bds_per_q = 32,
3707                .rx_queues = 0,
3708                .rx_bds_per_q = 0,
3709                .bp_in_en_shift = 17,
3710                .bp_in_mask = 0x1ffff,
3711                .hfb_filter_cnt = 48,
3712                .hfb_filter_size = 128,
3713                .qtag_mask = 0x3F,
3714                .tbuf_offset = 0x0600,
3715                .hfb_offset = 0x8000,
3716                .hfb_reg_offset = 0xfc00,
3717                .rdma_offset = 0x10000,
3718                .tdma_offset = 0x11000,
3719                .words_per_bd = 2,
3720                .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3721                         GENET_HAS_MOCA_LINK_DET,
3722        },
3723        [GENET_V4] = {
3724                .tx_queues = 4,
3725                .tx_bds_per_q = 32,
3726                .rx_queues = 0,
3727                .rx_bds_per_q = 0,
3728                .bp_in_en_shift = 17,
3729                .bp_in_mask = 0x1ffff,
3730                .hfb_filter_cnt = 48,
3731                .hfb_filter_size = 128,
3732                .qtag_mask = 0x3F,
3733                .tbuf_offset = 0x0600,
3734                .hfb_offset = 0x8000,
3735                .hfb_reg_offset = 0xfc00,
3736                .rdma_offset = 0x2000,
3737                .tdma_offset = 0x4000,
3738                .words_per_bd = 3,
3739                .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3740                         GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3741        },
3742        [GENET_V5] = {
3743                .tx_queues = 4,
3744                .tx_bds_per_q = 32,
3745                .rx_queues = 0,
3746                .rx_bds_per_q = 0,
3747                .bp_in_en_shift = 17,
3748                .bp_in_mask = 0x1ffff,
3749                .hfb_filter_cnt = 48,
3750                .hfb_filter_size = 128,
3751                .qtag_mask = 0x3F,
3752                .tbuf_offset = 0x0600,
3753                .hfb_offset = 0x8000,
3754                .hfb_reg_offset = 0xfc00,
3755                .rdma_offset = 0x2000,
3756                .tdma_offset = 0x4000,
3757                .words_per_bd = 3,
3758                .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3759                         GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3760        },
3761};
3762
3763/* Infer hardware parameters from the detected GENET version */
3764static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3765{
3766        struct bcmgenet_hw_params *params;
3767        u32 reg;
3768        u8 major;
3769        u16 gphy_rev;
3770
3771        if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3772                bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3773                genet_dma_ring_regs = genet_dma_ring_regs_v4;
3774        } else if (GENET_IS_V3(priv)) {
3775                bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3776                genet_dma_ring_regs = genet_dma_ring_regs_v123;
3777        } else if (GENET_IS_V2(priv)) {
3778                bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3779                genet_dma_ring_regs = genet_dma_ring_regs_v123;
3780        } else if (GENET_IS_V1(priv)) {
3781                bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3782                genet_dma_ring_regs = genet_dma_ring_regs_v123;
3783        }
3784
3785        /* enum genet_version starts at 1 */
3786        priv->hw_params = &bcmgenet_hw_params[priv->version];
3787        params = priv->hw_params;
3788
3789        /* Read GENET HW version */
3790        reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3791        major = (reg >> 24 & 0x0f);
3792        if (major == 6)
3793                major = 5;
3794        else if (major == 5)
3795                major = 4;
3796        else if (major == 0)
3797                major = 1;
3798        if (major != priv->version) {
3799                dev_err(&priv->pdev->dev,
3800                        "GENET version mismatch, got: %d, configured for: %d\n",
3801                        major, priv->version);
3802        }
3803
3804        /* Print the GENET core version */
3805        dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3806                 major, (reg >> 16) & 0x0f, reg & 0xffff);
3807
3808        /* Store the integrated PHY revision for the MDIO probing function
3809         * to pass this information to the PHY driver. The PHY driver expects
3810         * to find the PHY major revision in bits 15:8 while the GENET register
3811         * stores that information in bits 7:0, account for that.
3812         *
3813         * On newer chips, starting with PHY revision G0, a new scheme is
3814         * deployed similar to the Starfighter 2 switch with GPHY major
3815         * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3816         * is reserved as well as special value 0x01ff, we have a small
3817         * heuristic to check for the new GPHY revision and re-arrange things
3818         * so the GPHY driver is happy.
3819         */
3820        gphy_rev = reg & 0xffff;
3821
3822        if (GENET_IS_V5(priv)) {
3823                /* The EPHY revision should come from the MDIO registers of
3824                 * the PHY not from GENET.
3825                 */
3826                if (gphy_rev != 0) {
3827                        pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3828                                gphy_rev);
3829                }
3830        /* This is reserved so should require special treatment */
3831        } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3832                pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3833                return;
3834        /* This is the good old scheme, just GPHY major, no minor nor patch */
3835        } else if ((gphy_rev & 0xf0) != 0) {
3836                priv->gphy_rev = gphy_rev << 8;
3837        /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3838        } else if ((gphy_rev & 0xff00) != 0) {
3839                priv->gphy_rev = gphy_rev;
3840        }
3841
3842#ifdef CONFIG_PHYS_ADDR_T_64BIT
3843        if (!(params->flags & GENET_HAS_40BITS))
3844                pr_warn("GENET does not support 40-bits PA\n");
3845#endif
3846
3847        pr_debug("Configuration for version: %d\n"
3848                "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3849                "BP << en: %2d, BP msk: 0x%05x\n"
3850                "HFB count: %2d, QTAQ msk: 0x%05x\n"
3851                "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3852                "RDMA: 0x%05x, TDMA: 0x%05x\n"
3853                "Words/BD: %d\n",
3854                priv->version,
3855                params->tx_queues, params->tx_bds_per_q,
3856                params->rx_queues, params->rx_bds_per_q,
3857                params->bp_in_en_shift, params->bp_in_mask,
3858                params->hfb_filter_cnt, params->qtag_mask,
3859                params->tbuf_offset, params->hfb_offset,
3860                params->hfb_reg_offset,
3861                params->rdma_offset, params->tdma_offset,
3862                params