linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*******************************************************************************
   3  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   4  ST Ethernet IPs are built around a Synopsys IP Core.
   5
   6        Copyright(C) 2007-2011 STMicroelectronics Ltd
   7
   8
   9  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  10
  11  Documentation available at:
  12        http://www.stlinux.com
  13  Support available at:
  14        https://bugzilla.stlinux.com/
  15*******************************************************************************/
  16
  17#include <linux/clk.h>
  18#include <linux/kernel.h>
  19#include <linux/interrupt.h>
  20#include <linux/ip.h>
  21#include <linux/tcp.h>
  22#include <linux/skbuff.h>
  23#include <linux/ethtool.h>
  24#include <linux/if_ether.h>
  25#include <linux/crc32.h>
  26#include <linux/mii.h>
  27#include <linux/if.h>
  28#include <linux/if_vlan.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/slab.h>
  31#include <linux/pm_runtime.h>
  32#include <linux/prefetch.h>
  33#include <linux/pinctrl/consumer.h>
  34#ifdef CONFIG_DEBUG_FS
  35#include <linux/debugfs.h>
  36#include <linux/seq_file.h>
  37#endif /* CONFIG_DEBUG_FS */
  38#include <linux/net_tstamp.h>
  39#include <linux/phylink.h>
  40#include <linux/udp.h>
  41#include <linux/bpf_trace.h>
  42#include <net/pkt_cls.h>
  43#include <net/xdp_sock_drv.h>
  44#include "stmmac_ptp.h"
  45#include "stmmac.h"
  46#include "stmmac_xdp.h"
  47#include <linux/reset.h>
  48#include <linux/of_mdio.h>
  49#include "dwmac1000.h"
  50#include "dwxgmac2.h"
  51#include "hwif.h"
  52
  53#define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
  54#define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
  55
  56/* Module parameters */
  57#define TX_TIMEO        5000
  58static int watchdog = TX_TIMEO;
  59module_param(watchdog, int, 0644);
  60MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  61
  62static int debug = -1;
  63module_param(debug, int, 0644);
  64MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  65
  66static int phyaddr = -1;
  67module_param(phyaddr, int, 0444);
  68MODULE_PARM_DESC(phyaddr, "Physical device address");
  69
  70#define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
  71#define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
  72
  73/* Limit to make sure XDP TX and slow path can coexist */
  74#define STMMAC_XSK_TX_BUDGET_MAX        256
  75#define STMMAC_TX_XSK_AVAIL             16
  76#define STMMAC_RX_FILL_BATCH            16
  77
  78#define STMMAC_XDP_PASS         0
  79#define STMMAC_XDP_CONSUMED     BIT(0)
  80#define STMMAC_XDP_TX           BIT(1)
  81#define STMMAC_XDP_REDIRECT     BIT(2)
  82
  83static int flow_ctrl = FLOW_AUTO;
  84module_param(flow_ctrl, int, 0644);
  85MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  86
  87static int pause = PAUSE_TIME;
  88module_param(pause, int, 0644);
  89MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  90
  91#define TC_DEFAULT 64
  92static int tc = TC_DEFAULT;
  93module_param(tc, int, 0644);
  94MODULE_PARM_DESC(tc, "DMA threshold control value");
  95
  96#define DEFAULT_BUFSIZE 1536
  97static int buf_sz = DEFAULT_BUFSIZE;
  98module_param(buf_sz, int, 0644);
  99MODULE_PARM_DESC(buf_sz, "DMA buffer size");
 100
 101#define STMMAC_RX_COPYBREAK     256
 102
 103static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 104                                      NETIF_MSG_LINK | NETIF_MSG_IFUP |
 105                                      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
 106
 107#define STMMAC_DEFAULT_LPI_TIMER        1000
 108static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 109module_param(eee_timer, int, 0644);
 110MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 111#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
 112
 113/* By default the driver will use the ring mode to manage tx and rx descriptors,
 114 * but allow user to force to use the chain instead of the ring
 115 */
 116static unsigned int chain_mode;
 117module_param(chain_mode, int, 0444);
 118MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 119
 120static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 121/* For MSI interrupts handling */
 122static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
 123static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
 124static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
 125static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
 126static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
 127static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
 128
 129#ifdef CONFIG_DEBUG_FS
 130static const struct net_device_ops stmmac_netdev_ops;
 131static void stmmac_init_fs(struct net_device *dev);
 132static void stmmac_exit_fs(struct net_device *dev);
 133#endif
 134
 135#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
 136
 137int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
 138{
 139        int ret = 0;
 140
 141        if (enabled) {
 142                ret = clk_prepare_enable(priv->plat->stmmac_clk);
 143                if (ret)
 144                        return ret;
 145                ret = clk_prepare_enable(priv->plat->pclk);
 146                if (ret) {
 147                        clk_disable_unprepare(priv->plat->stmmac_clk);
 148                        return ret;
 149                }
 150                if (priv->plat->clks_config) {
 151                        ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
 152                        if (ret) {
 153                                clk_disable_unprepare(priv->plat->stmmac_clk);
 154                                clk_disable_unprepare(priv->plat->pclk);
 155                                return ret;
 156                        }
 157                }
 158        } else {
 159                clk_disable_unprepare(priv->plat->stmmac_clk);
 160                clk_disable_unprepare(priv->plat->pclk);
 161                if (priv->plat->clks_config)
 162                        priv->plat->clks_config(priv->plat->bsp_priv, enabled);
 163        }
 164
 165        return ret;
 166}
 167EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
 168
 169/**
 170 * stmmac_verify_args - verify the driver parameters.
 171 * Description: it checks the driver parameters and set a default in case of
 172 * errors.
 173 */
 174static void stmmac_verify_args(void)
 175{
 176        if (unlikely(watchdog < 0))
 177                watchdog = TX_TIMEO;
 178        if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 179                buf_sz = DEFAULT_BUFSIZE;
 180        if (unlikely(flow_ctrl > 1))
 181                flow_ctrl = FLOW_AUTO;
 182        else if (likely(flow_ctrl < 0))
 183                flow_ctrl = FLOW_OFF;
 184        if (unlikely((pause < 0) || (pause > 0xffff)))
 185                pause = PAUSE_TIME;
 186        if (eee_timer < 0)
 187                eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 188}
 189
 190static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
 191{
 192        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 193        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 194        u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 195        u32 queue;
 196
 197        for (queue = 0; queue < maxq; queue++) {
 198                struct stmmac_channel *ch = &priv->channel[queue];
 199
 200                if (stmmac_xdp_is_enabled(priv) &&
 201                    test_bit(queue, priv->af_xdp_zc_qps)) {
 202                        napi_disable(&ch->rxtx_napi);
 203                        continue;
 204                }
 205
 206                if (queue < rx_queues_cnt)
 207                        napi_disable(&ch->rx_napi);
 208                if (queue < tx_queues_cnt)
 209                        napi_disable(&ch->tx_napi);
 210        }
 211}
 212
 213/**
 214 * stmmac_disable_all_queues - Disable all queues
 215 * @priv: driver private structure
 216 */
 217static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 218{
 219        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 220        struct stmmac_rx_queue *rx_q;
 221        u32 queue;
 222
 223        /* synchronize_rcu() needed for pending XDP buffers to drain */
 224        for (queue = 0; queue < rx_queues_cnt; queue++) {
 225                rx_q = &priv->rx_queue[queue];
 226                if (rx_q->xsk_pool) {
 227                        synchronize_rcu();
 228                        break;
 229                }
 230        }
 231
 232        __stmmac_disable_all_queues(priv);
 233}
 234
 235/**
 236 * stmmac_enable_all_queues - Enable all queues
 237 * @priv: driver private structure
 238 */
 239static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 240{
 241        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 242        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 243        u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 244        u32 queue;
 245
 246        for (queue = 0; queue < maxq; queue++) {
 247                struct stmmac_channel *ch = &priv->channel[queue];
 248
 249                if (stmmac_xdp_is_enabled(priv) &&
 250                    test_bit(queue, priv->af_xdp_zc_qps)) {
 251                        napi_enable(&ch->rxtx_napi);
 252                        continue;
 253                }
 254
 255                if (queue < rx_queues_cnt)
 256                        napi_enable(&ch->rx_napi);
 257                if (queue < tx_queues_cnt)
 258                        napi_enable(&ch->tx_napi);
 259        }
 260}
 261
 262static void stmmac_service_event_schedule(struct stmmac_priv *priv)
 263{
 264        if (!test_bit(STMMAC_DOWN, &priv->state) &&
 265            !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
 266                queue_work(priv->wq, &priv->service_task);
 267}
 268
 269static void stmmac_global_err(struct stmmac_priv *priv)
 270{
 271        netif_carrier_off(priv->dev);
 272        set_bit(STMMAC_RESET_REQUESTED, &priv->state);
 273        stmmac_service_event_schedule(priv);
 274}
 275
 276/**
 277 * stmmac_clk_csr_set - dynamically set the MDC clock
 278 * @priv: driver private structure
 279 * Description: this is to dynamically set the MDC clock according to the csr
 280 * clock input.
 281 * Note:
 282 *      If a specific clk_csr value is passed from the platform
 283 *      this means that the CSR Clock Range selection cannot be
 284 *      changed at run-time and it is fixed (as reported in the driver
 285 *      documentation). Viceversa the driver will try to set the MDC
 286 *      clock dynamically according to the actual clock input.
 287 */
 288static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 289{
 290        u32 clk_rate;
 291
 292        clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 293
 294        /* Platform provided default clk_csr would be assumed valid
 295         * for all other cases except for the below mentioned ones.
 296         * For values higher than the IEEE 802.3 specified frequency
 297         * we can not estimate the proper divider as it is not known
 298         * the frequency of clk_csr_i. So we do not change the default
 299         * divider.
 300         */
 301        if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 302                if (clk_rate < CSR_F_35M)
 303                        priv->clk_csr = STMMAC_CSR_20_35M;
 304                else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 305                        priv->clk_csr = STMMAC_CSR_35_60M;
 306                else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 307                        priv->clk_csr = STMMAC_CSR_60_100M;
 308                else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 309                        priv->clk_csr = STMMAC_CSR_100_150M;
 310                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 311                        priv->clk_csr = STMMAC_CSR_150_250M;
 312                else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
 313                        priv->clk_csr = STMMAC_CSR_250_300M;
 314        }
 315
 316        if (priv->plat->has_sun8i) {
 317                if (clk_rate > 160000000)
 318                        priv->clk_csr = 0x03;
 319                else if (clk_rate > 80000000)
 320                        priv->clk_csr = 0x02;
 321                else if (clk_rate > 40000000)
 322                        priv->clk_csr = 0x01;
 323                else
 324                        priv->clk_csr = 0;
 325        }
 326
 327        if (priv->plat->has_xgmac) {
 328                if (clk_rate > 400000000)
 329                        priv->clk_csr = 0x5;
 330                else if (clk_rate > 350000000)
 331                        priv->clk_csr = 0x4;
 332                else if (clk_rate > 300000000)
 333                        priv->clk_csr = 0x3;
 334                else if (clk_rate > 250000000)
 335                        priv->clk_csr = 0x2;
 336                else if (clk_rate > 150000000)
 337                        priv->clk_csr = 0x1;
 338                else
 339                        priv->clk_csr = 0x0;
 340        }
 341}
 342
 343static void print_pkt(unsigned char *buf, int len)
 344{
 345        pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
 346        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 347}
 348
 349static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 350{
 351        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 352        u32 avail;
 353
 354        if (tx_q->dirty_tx > tx_q->cur_tx)
 355                avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
 356        else
 357                avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
 358
 359        return avail;
 360}
 361
 362/**
 363 * stmmac_rx_dirty - Get RX queue dirty
 364 * @priv: driver private structure
 365 * @queue: RX queue index
 366 */
 367static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 368{
 369        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 370        u32 dirty;
 371
 372        if (rx_q->dirty_rx <= rx_q->cur_rx)
 373                dirty = rx_q->cur_rx - rx_q->dirty_rx;
 374        else
 375                dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
 376
 377        return dirty;
 378}
 379
 380static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
 381{
 382        int tx_lpi_timer;
 383
 384        /* Clear/set the SW EEE timer flag based on LPI ET enablement */
 385        priv->eee_sw_timer_en = en ? 0 : 1;
 386        tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
 387        stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
 388}
 389
 390/**
 391 * stmmac_enable_eee_mode - check and enter in LPI mode
 392 * @priv: driver private structure
 393 * Description: this function is to verify and enter in LPI mode in case of
 394 * EEE.
 395 */
 396static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 397{
 398        u32 tx_cnt = priv->plat->tx_queues_to_use;
 399        u32 queue;
 400
 401        /* check if all TX queues have the work finished */
 402        for (queue = 0; queue < tx_cnt; queue++) {
 403                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 404
 405                if (tx_q->dirty_tx != tx_q->cur_tx)
 406                        return; /* still unfinished work */
 407        }
 408
 409        /* Check and enter in LPI mode */
 410        if (!priv->tx_path_in_lpi_mode)
 411                stmmac_set_eee_mode(priv, priv->hw,
 412                                priv->plat->en_tx_lpi_clockgating);
 413}
 414
 415/**
 416 * stmmac_disable_eee_mode - disable and exit from LPI mode
 417 * @priv: driver private structure
 418 * Description: this function is to exit and disable EEE in case of
 419 * LPI state is true. This is called by the xmit.
 420 */
 421void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 422{
 423        if (!priv->eee_sw_timer_en) {
 424                stmmac_lpi_entry_timer_config(priv, 0);
 425                return;
 426        }
 427
 428        stmmac_reset_eee_mode(priv, priv->hw);
 429        del_timer_sync(&priv->eee_ctrl_timer);
 430        priv->tx_path_in_lpi_mode = false;
 431}
 432
 433/**
 434 * stmmac_eee_ctrl_timer - EEE TX SW timer.
 435 * @t:  timer_list struct containing private info
 436 * Description:
 437 *  if there is no data transfer and if we are not in LPI state,
 438 *  then MAC Transmitter can be moved to LPI state.
 439 */
 440static void stmmac_eee_ctrl_timer(struct timer_list *t)
 441{
 442        struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 443
 444        stmmac_enable_eee_mode(priv);
 445        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
 446}
 447
 448/**
 449 * stmmac_eee_init - init EEE
 450 * @priv: driver private structure
 451 * Description:
 452 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 453 *  can also manage EEE, this function enable the LPI state and start related
 454 *  timer.
 455 */
 456bool stmmac_eee_init(struct stmmac_priv *priv)
 457{
 458        int eee_tw_timer = priv->eee_tw_timer;
 459
 460        /* Using PCS we cannot dial with the phy registers at this stage
 461         * so we do not support extra feature like EEE.
 462         */
 463        if (priv->hw->pcs == STMMAC_PCS_TBI ||
 464            priv->hw->pcs == STMMAC_PCS_RTBI)
 465                return false;
 466
 467        /* Check if MAC core supports the EEE feature. */
 468        if (!priv->dma_cap.eee)
 469                return false;
 470
 471        mutex_lock(&priv->lock);
 472
 473        /* Check if it needs to be deactivated */
 474        if (!priv->eee_active) {
 475                if (priv->eee_enabled) {
 476                        netdev_dbg(priv->dev, "disable EEE\n");
 477                        stmmac_lpi_entry_timer_config(priv, 0);
 478                        del_timer_sync(&priv->eee_ctrl_timer);
 479                        stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
 480                }
 481                mutex_unlock(&priv->lock);
 482                return false;
 483        }
 484
 485        if (priv->eee_active && !priv->eee_enabled) {
 486                timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
 487                stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
 488                                     eee_tw_timer);
 489        }
 490
 491        if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
 492                del_timer_sync(&priv->eee_ctrl_timer);
 493                priv->tx_path_in_lpi_mode = false;
 494                stmmac_lpi_entry_timer_config(priv, 1);
 495        } else {
 496                stmmac_lpi_entry_timer_config(priv, 0);
 497                mod_timer(&priv->eee_ctrl_timer,
 498                          STMMAC_LPI_T(priv->tx_lpi_timer));
 499        }
 500
 501        mutex_unlock(&priv->lock);
 502        netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
 503        return true;
 504}
 505
 506/* stmmac_get_tx_hwtstamp - get HW TX timestamps
 507 * @priv: driver private structure
 508 * @p : descriptor pointer
 509 * @skb : the socket buffer
 510 * Description :
 511 * This function will read timestamp from the descriptor & pass it to stack.
 512 * and also perform some sanity checks.
 513 */
 514static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 515                                   struct dma_desc *p, struct sk_buff *skb)
 516{
 517        struct skb_shared_hwtstamps shhwtstamp;
 518        bool found = false;
 519        s64 adjust = 0;
 520        u64 ns = 0;
 521
 522        if (!priv->hwts_tx_en)
 523                return;
 524
 525        /* exit if skb doesn't support hw tstamp */
 526        if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 527                return;
 528
 529        /* check tx tstamp status */
 530        if (stmmac_get_tx_timestamp_status(priv, p)) {
 531                stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 532                found = true;
 533        } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
 534                found = true;
 535        }
 536
 537        if (found) {
 538                /* Correct the clk domain crossing(CDC) error */
 539                if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
 540                        adjust += -(2 * (NSEC_PER_SEC /
 541                                         priv->plat->clk_ptp_rate));
 542                        ns += adjust;
 543                }
 544
 545                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 546                shhwtstamp.hwtstamp = ns_to_ktime(ns);
 547
 548                netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 549                /* pass tstamp to stack */
 550                skb_tstamp_tx(skb, &shhwtstamp);
 551        }
 552}
 553
 554/* stmmac_get_rx_hwtstamp - get HW RX timestamps
 555 * @priv: driver private structure
 556 * @p : descriptor pointer
 557 * @np : next descriptor pointer
 558 * @skb : the socket buffer
 559 * Description :
 560 * This function will read received packet's timestamp from the descriptor
 561 * and pass it to stack. It also perform some sanity checks.
 562 */
 563static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 564                                   struct dma_desc *np, struct sk_buff *skb)
 565{
 566        struct skb_shared_hwtstamps *shhwtstamp = NULL;
 567        struct dma_desc *desc = p;
 568        u64 adjust = 0;
 569        u64 ns = 0;
 570
 571        if (!priv->hwts_rx_en)
 572                return;
 573        /* For GMAC4, the valid timestamp is from CTX next desc. */
 574        if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
 575                desc = np;
 576
 577        /* Check if timestamp is available */
 578        if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
 579                stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
 580
 581                /* Correct the clk domain crossing(CDC) error */
 582                if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
 583                        adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
 584                        ns -= adjust;
 585                }
 586
 587                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 588                shhwtstamp = skb_hwtstamps(skb);
 589                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 590                shhwtstamp->hwtstamp = ns_to_ktime(ns);
 591        } else  {
 592                netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 593        }
 594}
 595
 596/**
 597 *  stmmac_hwtstamp_set - control hardware timestamping.
 598 *  @dev: device pointer.
 599 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 600 *  a proprietary structure used to pass information to the driver.
 601 *  Description:
 602 *  This function configures the MAC to enable/disable both outgoing(TX)
 603 *  and incoming(RX) packets time stamping based on user input.
 604 *  Return Value:
 605 *  0 on success and an appropriate -ve integer on failure.
 606 */
 607static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 608{
 609        struct stmmac_priv *priv = netdev_priv(dev);
 610        struct hwtstamp_config config;
 611        struct timespec64 now;
 612        u64 temp = 0;
 613        u32 ptp_v2 = 0;
 614        u32 tstamp_all = 0;
 615        u32 ptp_over_ipv4_udp = 0;
 616        u32 ptp_over_ipv6_udp = 0;
 617        u32 ptp_over_ethernet = 0;
 618        u32 snap_type_sel = 0;
 619        u32 ts_master_en = 0;
 620        u32 ts_event_en = 0;
 621        u32 sec_inc = 0;
 622        u32 value = 0;
 623        bool xmac;
 624
 625        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 626
 627        if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 628                netdev_alert(priv->dev, "No support for HW time stamping\n");
 629                priv->hwts_tx_en = 0;
 630                priv->hwts_rx_en = 0;
 631
 632                return -EOPNOTSUPP;
 633        }
 634
 635        if (copy_from_user(&config, ifr->ifr_data,
 636                           sizeof(config)))
 637                return -EFAULT;
 638
 639        netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 640                   __func__, config.flags, config.tx_type, config.rx_filter);
 641
 642        /* reserved for future extensions */
 643        if (config.flags)
 644                return -EINVAL;
 645
 646        if (config.tx_type != HWTSTAMP_TX_OFF &&
 647            config.tx_type != HWTSTAMP_TX_ON)
 648                return -ERANGE;
 649
 650        if (priv->adv_ts) {
 651                switch (config.rx_filter) {
 652                case HWTSTAMP_FILTER_NONE:
 653                        /* time stamp no incoming packet at all */
 654                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 655                        break;
 656
 657                case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 658                        /* PTP v1, UDP, any kind of event packet */
 659                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 660                        /* 'xmac' hardware can support Sync, Pdelay_Req and
 661                         * Pdelay_resp by setting bit14 and bits17/16 to 01
 662                         * This leaves Delay_Req timestamps out.
 663                         * Enable all events *and* general purpose message
 664                         * timestamping
 665                         */
 666                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 667                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 668                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 669                        break;
 670
 671                case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 672                        /* PTP v1, UDP, Sync packet */
 673                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 674                        /* take time stamp for SYNC messages only */
 675                        ts_event_en = PTP_TCR_TSEVNTENA;
 676
 677                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 678                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 679                        break;
 680
 681                case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 682                        /* PTP v1, UDP, Delay_req packet */
 683                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 684                        /* take time stamp for Delay_Req messages only */
 685                        ts_master_en = PTP_TCR_TSMSTRENA;
 686                        ts_event_en = PTP_TCR_TSEVNTENA;
 687
 688                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 689                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 690                        break;
 691
 692                case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 693                        /* PTP v2, UDP, any kind of event packet */
 694                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 695                        ptp_v2 = PTP_TCR_TSVER2ENA;
 696                        /* take time stamp for all event messages */
 697                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 698
 699                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 700                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 701                        break;
 702
 703                case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 704                        /* PTP v2, UDP, Sync packet */
 705                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 706                        ptp_v2 = PTP_TCR_TSVER2ENA;
 707                        /* take time stamp for SYNC messages only */
 708                        ts_event_en = PTP_TCR_TSEVNTENA;
 709
 710                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 711                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 712                        break;
 713
 714                case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 715                        /* PTP v2, UDP, Delay_req packet */
 716                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 717                        ptp_v2 = PTP_TCR_TSVER2ENA;
 718                        /* take time stamp for Delay_Req messages only */
 719                        ts_master_en = PTP_TCR_TSMSTRENA;
 720                        ts_event_en = PTP_TCR_TSEVNTENA;
 721
 722                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 723                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 724                        break;
 725
 726                case HWTSTAMP_FILTER_PTP_V2_EVENT:
 727                        /* PTP v2/802.AS1 any layer, any kind of event packet */
 728                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 729                        ptp_v2 = PTP_TCR_TSVER2ENA;
 730                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 731                        if (priv->synopsys_id != DWMAC_CORE_5_10)
 732                                ts_event_en = PTP_TCR_TSEVNTENA;
 733                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 734                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 735                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 736                        break;
 737
 738                case HWTSTAMP_FILTER_PTP_V2_SYNC:
 739                        /* PTP v2/802.AS1, any layer, Sync packet */
 740                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 741                        ptp_v2 = PTP_TCR_TSVER2ENA;
 742                        /* take time stamp for SYNC messages only */
 743                        ts_event_en = PTP_TCR_TSEVNTENA;
 744
 745                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 746                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 747                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 748                        break;
 749
 750                case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 751                        /* PTP v2/802.AS1, any layer, Delay_req packet */
 752                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 753                        ptp_v2 = PTP_TCR_TSVER2ENA;
 754                        /* take time stamp for Delay_Req messages only */
 755                        ts_master_en = PTP_TCR_TSMSTRENA;
 756                        ts_event_en = PTP_TCR_TSEVNTENA;
 757
 758                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 759                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 760                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 761                        break;
 762
 763                case HWTSTAMP_FILTER_NTP_ALL:
 764                case HWTSTAMP_FILTER_ALL:
 765                        /* time stamp any incoming packet */
 766                        config.rx_filter = HWTSTAMP_FILTER_ALL;
 767                        tstamp_all = PTP_TCR_TSENALL;
 768                        break;
 769
 770                default:
 771                        return -ERANGE;
 772                }
 773        } else {
 774                switch (config.rx_filter) {
 775                case HWTSTAMP_FILTER_NONE:
 776                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 777                        break;
 778                default:
 779                        /* PTP v1, UDP, any kind of event packet */
 780                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 781                        break;
 782                }
 783        }
 784        priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 785        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 786
 787        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
 788                stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
 789        else {
 790                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
 791                         tstamp_all | ptp_v2 | ptp_over_ethernet |
 792                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
 793                         ts_master_en | snap_type_sel);
 794                stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
 795
 796                /* program Sub Second Increment reg */
 797                stmmac_config_sub_second_increment(priv,
 798                                priv->ptpaddr, priv->plat->clk_ptp_rate,
 799                                xmac, &sec_inc);
 800                temp = div_u64(1000000000ULL, sec_inc);
 801
 802                /* Store sub second increment and flags for later use */
 803                priv->sub_second_inc = sec_inc;
 804                priv->systime_flags = value;
 805
 806                /* calculate default added value:
 807                 * formula is :
 808                 * addend = (2^32)/freq_div_ratio;
 809                 * where, freq_div_ratio = 1e9ns/sec_inc
 810                 */
 811                temp = (u64)(temp << 32);
 812                priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
 813                stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
 814
 815                /* initialize system time */
 816                ktime_get_real_ts64(&now);
 817
 818                /* lower 32 bits of tv_sec are safe until y2106 */
 819                stmmac_init_systime(priv, priv->ptpaddr,
 820                                (u32)now.tv_sec, now.tv_nsec);
 821        }
 822
 823        memcpy(&priv->tstamp_config, &config, sizeof(config));
 824
 825        return copy_to_user(ifr->ifr_data, &config,
 826                            sizeof(config)) ? -EFAULT : 0;
 827}
 828
 829/**
 830 *  stmmac_hwtstamp_get - read hardware timestamping.
 831 *  @dev: device pointer.
 832 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 833 *  a proprietary structure used to pass information to the driver.
 834 *  Description:
 835 *  This function obtain the current hardware timestamping settings
 836 *  as requested.
 837 */
 838static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 839{
 840        struct stmmac_priv *priv = netdev_priv(dev);
 841        struct hwtstamp_config *config = &priv->tstamp_config;
 842
 843        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 844                return -EOPNOTSUPP;
 845
 846        return copy_to_user(ifr->ifr_data, config,
 847                            sizeof(*config)) ? -EFAULT : 0;
 848}
 849
 850/**
 851 * stmmac_init_ptp - init PTP
 852 * @priv: driver private structure
 853 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
 854 * This is done by looking at the HW cap. register.
 855 * This function also registers the ptp driver.
 856 */
 857static int stmmac_init_ptp(struct stmmac_priv *priv)
 858{
 859        bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 860
 861        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 862                return -EOPNOTSUPP;
 863
 864        priv->adv_ts = 0;
 865        /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
 866        if (xmac && priv->dma_cap.atime_stamp)
 867                priv->adv_ts = 1;
 868        /* Dwmac 3.x core with extend_desc can support adv_ts */
 869        else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 870                priv->adv_ts = 1;
 871
 872        if (priv->dma_cap.time_stamp)
 873                netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 874
 875        if (priv->adv_ts)
 876                netdev_info(priv->dev,
 877                            "IEEE 1588-2008 Advanced Timestamp supported\n");
 878
 879        priv->hwts_tx_en = 0;
 880        priv->hwts_rx_en = 0;
 881
 882        stmmac_ptp_register(priv);
 883
 884        return 0;
 885}
 886
 887static void stmmac_release_ptp(struct stmmac_priv *priv)
 888{
 889        clk_disable_unprepare(priv->plat->clk_ptp_ref);
 890        stmmac_ptp_unregister(priv);
 891}
 892
 893/**
 894 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 895 *  @priv: driver private structure
 896 *  @duplex: duplex passed to the next function
 897 *  Description: It is used for configuring the flow control in all queues
 898 */
 899static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 900{
 901        u32 tx_cnt = priv->plat->tx_queues_to_use;
 902
 903        stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
 904                        priv->pause, tx_cnt);
 905}
 906
 907static void stmmac_validate(struct phylink_config *config,
 908                            unsigned long *supported,
 909                            struct phylink_link_state *state)
 910{
 911        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 912        __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
 913        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 914        int tx_cnt = priv->plat->tx_queues_to_use;
 915        int max_speed = priv->plat->max_speed;
 916
 917        phylink_set(mac_supported, 10baseT_Half);
 918        phylink_set(mac_supported, 10baseT_Full);
 919        phylink_set(mac_supported, 100baseT_Half);
 920        phylink_set(mac_supported, 100baseT_Full);
 921        phylink_set(mac_supported, 1000baseT_Half);
 922        phylink_set(mac_supported, 1000baseT_Full);
 923        phylink_set(mac_supported, 1000baseKX_Full);
 924
 925        phylink_set(mac_supported, Autoneg);
 926        phylink_set(mac_supported, Pause);
 927        phylink_set(mac_supported, Asym_Pause);
 928        phylink_set_port_modes(mac_supported);
 929
 930        /* Cut down 1G if asked to */
 931        if ((max_speed > 0) && (max_speed < 1000)) {
 932                phylink_set(mask, 1000baseT_Full);
 933                phylink_set(mask, 1000baseX_Full);
 934        } else if (priv->plat->has_gmac4) {
 935                if (!max_speed || max_speed >= 2500) {
 936                        phylink_set(mac_supported, 2500baseT_Full);
 937                        phylink_set(mac_supported, 2500baseX_Full);
 938                }
 939        } else if (priv->plat->has_xgmac) {
 940                if (!max_speed || (max_speed >= 2500)) {
 941                        phylink_set(mac_supported, 2500baseT_Full);
 942                        phylink_set(mac_supported, 2500baseX_Full);
 943                }
 944                if (!max_speed || (max_speed >= 5000)) {
 945                        phylink_set(mac_supported, 5000baseT_Full);
 946                }
 947                if (!max_speed || (max_speed >= 10000)) {
 948                        phylink_set(mac_supported, 10000baseSR_Full);
 949                        phylink_set(mac_supported, 10000baseLR_Full);
 950                        phylink_set(mac_supported, 10000baseER_Full);
 951                        phylink_set(mac_supported, 10000baseLRM_Full);
 952                        phylink_set(mac_supported, 10000baseT_Full);
 953                        phylink_set(mac_supported, 10000baseKX4_Full);
 954                        phylink_set(mac_supported, 10000baseKR_Full);
 955                }
 956                if (!max_speed || (max_speed >= 25000)) {
 957                        phylink_set(mac_supported, 25000baseCR_Full);
 958                        phylink_set(mac_supported, 25000baseKR_Full);
 959                        phylink_set(mac_supported, 25000baseSR_Full);
 960                }
 961                if (!max_speed || (max_speed >= 40000)) {
 962                        phylink_set(mac_supported, 40000baseKR4_Full);
 963                        phylink_set(mac_supported, 40000baseCR4_Full);
 964                        phylink_set(mac_supported, 40000baseSR4_Full);
 965                        phylink_set(mac_supported, 40000baseLR4_Full);
 966                }
 967                if (!max_speed || (max_speed >= 50000)) {
 968                        phylink_set(mac_supported, 50000baseCR2_Full);
 969                        phylink_set(mac_supported, 50000baseKR2_Full);
 970                        phylink_set(mac_supported, 50000baseSR2_Full);
 971                        phylink_set(mac_supported, 50000baseKR_Full);
 972                        phylink_set(mac_supported, 50000baseSR_Full);
 973                        phylink_set(mac_supported, 50000baseCR_Full);
 974                        phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
 975                        phylink_set(mac_supported, 50000baseDR_Full);
 976                }
 977                if (!max_speed || (max_speed >= 100000)) {
 978                        phylink_set(mac_supported, 100000baseKR4_Full);
 979                        phylink_set(mac_supported, 100000baseSR4_Full);
 980                        phylink_set(mac_supported, 100000baseCR4_Full);
 981                        phylink_set(mac_supported, 100000baseLR4_ER4_Full);
 982                        phylink_set(mac_supported, 100000baseKR2_Full);
 983                        phylink_set(mac_supported, 100000baseSR2_Full);
 984                        phylink_set(mac_supported, 100000baseCR2_Full);
 985                        phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
 986                        phylink_set(mac_supported, 100000baseDR2_Full);
 987                }
 988        }
 989
 990        /* Half-Duplex can only work with single queue */
 991        if (tx_cnt > 1) {
 992                phylink_set(mask, 10baseT_Half);
 993                phylink_set(mask, 100baseT_Half);
 994                phylink_set(mask, 1000baseT_Half);
 995        }
 996
 997        linkmode_and(supported, supported, mac_supported);
 998        linkmode_andnot(supported, supported, mask);
 999
1000        linkmode_and(state->advertising, state->advertising, mac_supported);
1001        linkmode_andnot(state->advertising, state->advertising, mask);
1002
1003        /* If PCS is supported, check which modes it supports. */
1004        if (priv->hw->xpcs)
1005                xpcs_validate(priv->hw->xpcs, supported, state);
1006}
1007
1008static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1009                              const struct phylink_link_state *state)
1010{
1011        /* Nothing to do, xpcs_config() handles everything */
1012}
1013
1014static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1015{
1016        struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1017        enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1018        enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1019        bool *hs_enable = &fpe_cfg->hs_enable;
1020
1021        if (is_up && *hs_enable) {
1022                stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1023        } else {
1024                *lo_state = FPE_STATE_OFF;
1025                *lp_state = FPE_STATE_OFF;
1026        }
1027}
1028
1029static void stmmac_mac_link_down(struct phylink_config *config,
1030                                 unsigned int mode, phy_interface_t interface)
1031{
1032        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1033
1034        stmmac_mac_set(priv, priv->ioaddr, false);
1035        priv->eee_active = false;
1036        priv->tx_lpi_enabled = false;
1037        stmmac_eee_init(priv);
1038        stmmac_set_eee_pls(priv, priv->hw, false);
1039
1040        if (priv->dma_cap.fpesel)
1041                stmmac_fpe_link_state_handle(priv, false);
1042}
1043
1044static void stmmac_mac_link_up(struct phylink_config *config,
1045                               struct phy_device *phy,
1046                               unsigned int mode, phy_interface_t interface,
1047                               int speed, int duplex,
1048                               bool tx_pause, bool rx_pause)
1049{
1050        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1051        u32 ctrl;
1052
1053        ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1054        ctrl &= ~priv->hw->link.speed_mask;
1055
1056        if (interface == PHY_INTERFACE_MODE_USXGMII) {
1057                switch (speed) {
1058                case SPEED_10000:
1059                        ctrl |= priv->hw->link.xgmii.speed10000;
1060                        break;
1061                case SPEED_5000:
1062                        ctrl |= priv->hw->link.xgmii.speed5000;
1063                        break;
1064                case SPEED_2500:
1065                        ctrl |= priv->hw->link.xgmii.speed2500;
1066                        break;
1067                default:
1068                        return;
1069                }
1070        } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1071                switch (speed) {
1072                case SPEED_100000:
1073                        ctrl |= priv->hw->link.xlgmii.speed100000;
1074                        break;
1075                case SPEED_50000:
1076                        ctrl |= priv->hw->link.xlgmii.speed50000;
1077                        break;
1078                case SPEED_40000:
1079                        ctrl |= priv->hw->link.xlgmii.speed40000;
1080                        break;
1081                case SPEED_25000:
1082                        ctrl |= priv->hw->link.xlgmii.speed25000;
1083                        break;
1084                case SPEED_10000:
1085                        ctrl |= priv->hw->link.xgmii.speed10000;
1086                        break;
1087                case SPEED_2500:
1088                        ctrl |= priv->hw->link.speed2500;
1089                        break;
1090                case SPEED_1000:
1091                        ctrl |= priv->hw->link.speed1000;
1092                        break;
1093                default:
1094                        return;
1095                }
1096        } else {
1097                switch (speed) {
1098                case SPEED_2500:
1099                        ctrl |= priv->hw->link.speed2500;
1100                        break;
1101                case SPEED_1000:
1102                        ctrl |= priv->hw->link.speed1000;
1103                        break;
1104                case SPEED_100:
1105                        ctrl |= priv->hw->link.speed100;
1106                        break;
1107                case SPEED_10:
1108                        ctrl |= priv->hw->link.speed10;
1109                        break;
1110                default:
1111                        return;
1112                }
1113        }
1114
1115        priv->speed = speed;
1116
1117        if (priv->plat->fix_mac_speed)
1118                priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1119
1120        if (!duplex)
1121                ctrl &= ~priv->hw->link.duplex;
1122        else
1123                ctrl |= priv->hw->link.duplex;
1124
1125        /* Flow Control operation */
1126        if (tx_pause && rx_pause)
1127                stmmac_mac_flow_ctrl(priv, duplex);
1128
1129        writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1130
1131        stmmac_mac_set(priv, priv->ioaddr, true);
1132        if (phy && priv->dma_cap.eee) {
1133                priv->eee_active = phy_init_eee(phy, 1) >= 0;
1134                priv->eee_enabled = stmmac_eee_init(priv);
1135                priv->tx_lpi_enabled = priv->eee_enabled;
1136                stmmac_set_eee_pls(priv, priv->hw, true);
1137        }
1138
1139        if (priv->dma_cap.fpesel)
1140                stmmac_fpe_link_state_handle(priv, true);
1141}
1142
1143static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1144        .validate = stmmac_validate,
1145        .mac_config = stmmac_mac_config,
1146        .mac_link_down = stmmac_mac_link_down,
1147        .mac_link_up = stmmac_mac_link_up,
1148};
1149
1150/**
1151 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1152 * @priv: driver private structure
1153 * Description: this is to verify if the HW supports the PCS.
1154 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1155 * configured for the TBI, RTBI, or SGMII PHY interface.
1156 */
1157static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1158{
1159        int interface = priv->plat->interface;
1160
1161        if (priv->dma_cap.pcs) {
1162                if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1163                    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1164                    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1165                    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1166                        netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1167                        priv->hw->pcs = STMMAC_PCS_RGMII;
1168                } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1169                        netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1170                        priv->hw->pcs = STMMAC_PCS_SGMII;
1171                }
1172        }
1173}
1174
1175/**
1176 * stmmac_init_phy - PHY initialization
1177 * @dev: net device structure
1178 * Description: it initializes the driver's PHY state, and attaches the PHY
1179 * to the mac driver.
1180 *  Return value:
1181 *  0 on success
1182 */
1183static int stmmac_init_phy(struct net_device *dev)
1184{
1185        struct stmmac_priv *priv = netdev_priv(dev);
1186        struct device_node *node;
1187        int ret;
1188
1189        node = priv->plat->phylink_node;
1190
1191        if (node)
1192                ret = phylink_of_phy_connect(priv->phylink, node, 0);
1193
1194        /* Some DT bindings do not set-up the PHY handle. Let's try to
1195         * manually parse it
1196         */
1197        if (!node || ret) {
1198                int addr = priv->plat->phy_addr;
1199                struct phy_device *phydev;
1200
1201                phydev = mdiobus_get_phy(priv->mii, addr);
1202                if (!phydev) {
1203                        netdev_err(priv->dev, "no phy at addr %d\n", addr);
1204                        return -ENODEV;
1205                }
1206
1207                ret = phylink_connect_phy(priv->phylink, phydev);
1208        }
1209
1210        if (!priv->plat->pmt) {
1211                struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1212
1213                phylink_ethtool_get_wol(priv->phylink, &wol);
1214                device_set_wakeup_capable(priv->device, !!wol.supported);
1215        }
1216
1217        return ret;
1218}
1219
1220static int stmmac_phy_setup(struct stmmac_priv *priv)
1221{
1222        struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1223        struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1224        int mode = priv->plat->phy_interface;
1225        struct phylink *phylink;
1226
1227        priv->phylink_config.dev = &priv->dev->dev;
1228        priv->phylink_config.type = PHYLINK_NETDEV;
1229        priv->phylink_config.pcs_poll = true;
1230        if (priv->plat->mdio_bus_data)
1231                priv->phylink_config.ovr_an_inband =
1232                        mdio_bus_data->xpcs_an_inband;
1233
1234        if (!fwnode)
1235                fwnode = dev_fwnode(priv->device);
1236
1237        phylink = phylink_create(&priv->phylink_config, fwnode,
1238                                 mode, &stmmac_phylink_mac_ops);
1239        if (IS_ERR(phylink))
1240                return PTR_ERR(phylink);
1241
1242        if (priv->hw->xpcs)
1243                phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1244
1245        priv->phylink = phylink;
1246        return 0;
1247}
1248
1249static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1250{
1251        u32 rx_cnt = priv->plat->rx_queues_to_use;
1252        unsigned int desc_size;
1253        void *head_rx;
1254        u32 queue;
1255
1256        /* Display RX rings */
1257        for (queue = 0; queue < rx_cnt; queue++) {
1258                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1259
1260                pr_info("\tRX Queue %u rings\n", queue);
1261
1262                if (priv->extend_desc) {
1263                        head_rx = (void *)rx_q->dma_erx;
1264                        desc_size = sizeof(struct dma_extended_desc);
1265                } else {
1266                        head_rx = (void *)rx_q->dma_rx;
1267                        desc_size = sizeof(struct dma_desc);
1268                }
1269
1270                /* Display RX ring */
1271                stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1272                                    rx_q->dma_rx_phy, desc_size);
1273        }
1274}
1275
1276static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1277{
1278        u32 tx_cnt = priv->plat->tx_queues_to_use;
1279        unsigned int desc_size;
1280        void *head_tx;
1281        u32 queue;
1282
1283        /* Display TX rings */
1284        for (queue = 0; queue < tx_cnt; queue++) {
1285                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1286
1287                pr_info("\tTX Queue %d rings\n", queue);
1288
1289                if (priv->extend_desc) {
1290                        head_tx = (void *)tx_q->dma_etx;
1291                        desc_size = sizeof(struct dma_extended_desc);
1292                } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1293                        head_tx = (void *)tx_q->dma_entx;
1294                        desc_size = sizeof(struct dma_edesc);
1295                } else {
1296                        head_tx = (void *)tx_q->dma_tx;
1297                        desc_size = sizeof(struct dma_desc);
1298                }
1299
1300                stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1301                                    tx_q->dma_tx_phy, desc_size);
1302        }
1303}
1304
1305static void stmmac_display_rings(struct stmmac_priv *priv)
1306{
1307        /* Display RX ring */
1308        stmmac_display_rx_rings(priv);
1309
1310        /* Display TX ring */
1311        stmmac_display_tx_rings(priv);
1312}
1313
1314static int stmmac_set_bfsize(int mtu, int bufsize)
1315{
1316        int ret = bufsize;
1317
1318        if (mtu >= BUF_SIZE_8KiB)
1319                ret = BUF_SIZE_16KiB;
1320        else if (mtu >= BUF_SIZE_4KiB)
1321                ret = BUF_SIZE_8KiB;
1322        else if (mtu >= BUF_SIZE_2KiB)
1323                ret = BUF_SIZE_4KiB;
1324        else if (mtu > DEFAULT_BUFSIZE)
1325                ret = BUF_SIZE_2KiB;
1326        else
1327                ret = DEFAULT_BUFSIZE;
1328
1329        return ret;
1330}
1331
1332/**
1333 * stmmac_clear_rx_descriptors - clear RX descriptors
1334 * @priv: driver private structure
1335 * @queue: RX queue index
1336 * Description: this function is called to clear the RX descriptors
1337 * in case of both basic and extended descriptors are used.
1338 */
1339static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1340{
1341        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1342        int i;
1343
1344        /* Clear the RX descriptors */
1345        for (i = 0; i < priv->dma_rx_size; i++)
1346                if (priv->extend_desc)
1347                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1348                                        priv->use_riwt, priv->mode,
1349                                        (i == priv->dma_rx_size - 1),
1350                                        priv->dma_buf_sz);
1351                else
1352                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1353                                        priv->use_riwt, priv->mode,
1354                                        (i == priv->dma_rx_size - 1),
1355                                        priv->dma_buf_sz);
1356}
1357
1358/**
1359 * stmmac_clear_tx_descriptors - clear tx descriptors
1360 * @priv: driver private structure
1361 * @queue: TX queue index.
1362 * Description: this function is called to clear the TX descriptors
1363 * in case of both basic and extended descriptors are used.
1364 */
1365static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1366{
1367        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1368        int i;
1369
1370        /* Clear the TX descriptors */
1371        for (i = 0; i < priv->dma_tx_size; i++) {
1372                int last = (i == (priv->dma_tx_size - 1));
1373                struct dma_desc *p;
1374
1375                if (priv->extend_desc)
1376                        p = &tx_q->dma_etx[i].basic;
1377                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1378                        p = &tx_q->dma_entx[i].basic;
1379                else
1380                        p = &tx_q->dma_tx[i];
1381
1382                stmmac_init_tx_desc(priv, p, priv->mode, last);
1383        }
1384}
1385
1386/**
1387 * stmmac_clear_descriptors - clear descriptors
1388 * @priv: driver private structure
1389 * Description: this function is called to clear the TX and RX descriptors
1390 * in case of both basic and extended descriptors are used.
1391 */
1392static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1393{
1394        u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1395        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1396        u32 queue;
1397
1398        /* Clear the RX descriptors */
1399        for (queue = 0; queue < rx_queue_cnt; queue++)
1400                stmmac_clear_rx_descriptors(priv, queue);
1401
1402        /* Clear the TX descriptors */
1403        for (queue = 0; queue < tx_queue_cnt; queue++)
1404                stmmac_clear_tx_descriptors(priv, queue);
1405}
1406
1407/**
1408 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1409 * @priv: driver private structure
1410 * @p: descriptor pointer
1411 * @i: descriptor index
1412 * @flags: gfp flag
1413 * @queue: RX queue index
1414 * Description: this function is called to allocate a receive buffer, perform
1415 * the DMA mapping and init the descriptor.
1416 */
1417static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1418                                  int i, gfp_t flags, u32 queue)
1419{
1420        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1421        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1422
1423        if (!buf->page) {
1424                buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1425                if (!buf->page)
1426                        return -ENOMEM;
1427                buf->page_offset = stmmac_rx_offset(priv);
1428        }
1429
1430        if (priv->sph && !buf->sec_page) {
1431                buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1432                if (!buf->sec_page)
1433                        return -ENOMEM;
1434
1435                buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1436                stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1437        } else {
1438                buf->sec_page = NULL;
1439                stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1440        }
1441
1442        buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1443
1444        stmmac_set_desc_addr(priv, p, buf->addr);
1445        if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1446                stmmac_init_desc3(priv, p);
1447
1448        return 0;
1449}
1450
1451/**
1452 * stmmac_free_rx_buffer - free RX dma buffers
1453 * @priv: private structure
1454 * @queue: RX queue index
1455 * @i: buffer index.
1456 */
1457static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1458{
1459        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1460        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1461
1462        if (buf->page)
1463                page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1464        buf->page = NULL;
1465
1466        if (buf->sec_page)
1467                page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1468        buf->sec_page = NULL;
1469}
1470
1471/**
1472 * stmmac_free_tx_buffer - free RX dma buffers
1473 * @priv: private structure
1474 * @queue: RX queue index
1475 * @i: buffer index.
1476 */
1477static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1478{
1479        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1480
1481        if (tx_q->tx_skbuff_dma[i].buf &&
1482            tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1483                if (tx_q->tx_skbuff_dma[i].map_as_page)
1484                        dma_unmap_page(priv->device,
1485                                       tx_q->tx_skbuff_dma[i].buf,
1486                                       tx_q->tx_skbuff_dma[i].len,
1487                                       DMA_TO_DEVICE);
1488                else
1489                        dma_unmap_single(priv->device,
1490                                         tx_q->tx_skbuff_dma[i].buf,
1491                                         tx_q->tx_skbuff_dma[i].len,
1492                                         DMA_TO_DEVICE);
1493        }
1494
1495        if (tx_q->xdpf[i] &&
1496            (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1497             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1498                xdp_return_frame(tx_q->xdpf[i]);
1499                tx_q->xdpf[i] = NULL;
1500        }
1501
1502        if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1503                tx_q->xsk_frames_done++;
1504
1505        if (tx_q->tx_skbuff[i] &&
1506            tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1507                dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1508                tx_q->tx_skbuff[i] = NULL;
1509        }
1510
1511        tx_q->tx_skbuff_dma[i].buf = 0;
1512        tx_q->tx_skbuff_dma[i].map_as_page = false;
1513}
1514
1515/**
1516 * dma_free_rx_skbufs - free RX dma buffers
1517 * @priv: private structure
1518 * @queue: RX queue index
1519 */
1520static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1521{
1522        int i;
1523
1524        for (i = 0; i < priv->dma_rx_size; i++)
1525                stmmac_free_rx_buffer(priv, queue, i);
1526}
1527
1528static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1529                                   gfp_t flags)
1530{
1531        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1532        int i;
1533
1534        for (i = 0; i < priv->dma_rx_size; i++) {
1535                struct dma_desc *p;
1536                int ret;
1537
1538                if (priv->extend_desc)
1539                        p = &((rx_q->dma_erx + i)->basic);
1540                else
1541                        p = rx_q->dma_rx + i;
1542
1543                ret = stmmac_init_rx_buffers(priv, p, i, flags,
1544                                             queue);
1545                if (ret)
1546                        return ret;
1547
1548                rx_q->buf_alloc_num++;
1549        }
1550
1551        return 0;
1552}
1553
1554/**
1555 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1556 * @priv: private structure
1557 * @queue: RX queue index
1558 */
1559static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1560{
1561        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1562        int i;
1563
1564        for (i = 0; i < priv->dma_rx_size; i++) {
1565                struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1566
1567                if (!buf->xdp)
1568                        continue;
1569
1570                xsk_buff_free(buf->xdp);
1571                buf->xdp = NULL;
1572        }
1573}
1574
1575static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1576{
1577        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1578        int i;
1579
1580        for (i = 0; i < priv->dma_rx_size; i++) {
1581                struct stmmac_rx_buffer *buf;
1582                dma_addr_t dma_addr;
1583                struct dma_desc *p;
1584
1585                if (priv->extend_desc)
1586                        p = (struct dma_desc *)(rx_q->dma_erx + i);
1587                else
1588                        p = rx_q->dma_rx + i;
1589
1590                buf = &rx_q->buf_pool[i];
1591
1592                buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1593                if (!buf->xdp)
1594                        return -ENOMEM;
1595
1596                dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1597                stmmac_set_desc_addr(priv, p, dma_addr);
1598                rx_q->buf_alloc_num++;
1599        }
1600
1601        return 0;
1602}
1603
1604static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1605{
1606        if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1607                return NULL;
1608
1609        return xsk_get_pool_from_qid(priv->dev, queue);
1610}
1611
1612/**
1613 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1614 * @priv: driver private structure
1615 * @queue: RX queue index
1616 * @flags: gfp flag.
1617 * Description: this function initializes the DMA RX descriptors
1618 * and allocates the socket buffers. It supports the chained and ring
1619 * modes.
1620 */
1621static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1622{
1623        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1624        int ret;
1625
1626        netif_dbg(priv, probe, priv->dev,
1627                  "(%s) dma_rx_phy=0x%08x\n", __func__,
1628                  (u32)rx_q->dma_rx_phy);
1629
1630        stmmac_clear_rx_descriptors(priv, queue);
1631
1632        xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1633
1634        rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1635
1636        if (rx_q->xsk_pool) {
1637                WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1638                                                   MEM_TYPE_XSK_BUFF_POOL,
1639                                                   NULL));
1640                netdev_info(priv->dev,
1641                            "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1642                            rx_q->queue_index);
1643                xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1644        } else {
1645                WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1646                                                   MEM_TYPE_PAGE_POOL,
1647                                                   rx_q->page_pool));
1648                netdev_info(priv->dev,
1649                            "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1650                            rx_q->queue_index);
1651        }
1652
1653        if (rx_q->xsk_pool) {
1654                /* RX XDP ZC buffer pool may not be populated, e.g.
1655                 * xdpsock TX-only.
1656                 */
1657                stmmac_alloc_rx_buffers_zc(priv, queue);
1658        } else {
1659                ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1660                if (ret < 0)
1661                        return -ENOMEM;
1662        }
1663
1664        rx_q->cur_rx = 0;
1665        rx_q->dirty_rx = 0;
1666
1667        /* Setup the chained descriptor addresses */
1668        if (priv->mode == STMMAC_CHAIN_MODE) {
1669                if (priv->extend_desc)
1670                        stmmac_mode_init(priv, rx_q->dma_erx,
1671                                         rx_q->dma_rx_phy,
1672                                         priv->dma_rx_size, 1);
1673                else
1674                        stmmac_mode_init(priv, rx_q->dma_rx,
1675                                         rx_q->dma_rx_phy,
1676                                         priv->dma_rx_size, 0);
1677        }
1678
1679        return 0;
1680}
1681
1682static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1683{
1684        struct stmmac_priv *priv = netdev_priv(dev);
1685        u32 rx_count = priv->plat->rx_queues_to_use;
1686        u32 queue;
1687        int ret;
1688
1689        /* RX INITIALIZATION */
1690        netif_dbg(priv, probe, priv->dev,
1691                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1692
1693        for (queue = 0; queue < rx_count; queue++) {
1694                ret = __init_dma_rx_desc_rings(priv, queue, flags);
1695                if (ret)
1696                        goto err_init_rx_buffers;
1697        }
1698
1699        return 0;
1700
1701err_init_rx_buffers:
1702        while (queue >= 0) {
1703                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1704
1705                if (rx_q->xsk_pool)
1706                        dma_free_rx_xskbufs(priv, queue);
1707                else
1708                        dma_free_rx_skbufs(priv, queue);
1709
1710                rx_q->buf_alloc_num = 0;
1711                rx_q->xsk_pool = NULL;
1712
1713                if (queue == 0)
1714                        break;
1715
1716                queue--;
1717        }
1718
1719        return ret;
1720}
1721
1722/**
1723 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1724 * @priv: driver private structure
1725 * @queue : TX queue index
1726 * Description: this function initializes the DMA TX descriptors
1727 * and allocates the socket buffers. It supports the chained and ring
1728 * modes.
1729 */
1730static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1731{
1732        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1733        int i;
1734
1735        netif_dbg(priv, probe, priv->dev,
1736                  "(%s) dma_tx_phy=0x%08x\n", __func__,
1737                  (u32)tx_q->dma_tx_phy);
1738
1739        /* Setup the chained descriptor addresses */
1740        if (priv->mode == STMMAC_CHAIN_MODE) {
1741                if (priv->extend_desc)
1742                        stmmac_mode_init(priv, tx_q->dma_etx,
1743                                         tx_q->dma_tx_phy,
1744                                         priv->dma_tx_size, 1);
1745                else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1746                        stmmac_mode_init(priv, tx_q->dma_tx,
1747                                         tx_q->dma_tx_phy,
1748                                         priv->dma_tx_size, 0);
1749        }
1750
1751        tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1752
1753        for (i = 0; i < priv->dma_tx_size; i++) {
1754                struct dma_desc *p;
1755
1756                if (priv->extend_desc)
1757                        p = &((tx_q->dma_etx + i)->basic);
1758                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1759                        p = &((tx_q->dma_entx + i)->basic);
1760                else
1761                        p = tx_q->dma_tx + i;
1762
1763                stmmac_clear_desc(priv, p);
1764
1765                tx_q->tx_skbuff_dma[i].buf = 0;
1766                tx_q->tx_skbuff_dma[i].map_as_page = false;
1767                tx_q->tx_skbuff_dma[i].len = 0;
1768                tx_q->tx_skbuff_dma[i].last_segment = false;
1769                tx_q->tx_skbuff[i] = NULL;
1770        }
1771
1772        tx_q->dirty_tx = 0;
1773        tx_q->cur_tx = 0;
1774        tx_q->mss = 0;
1775
1776        netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1777
1778        return 0;
1779}
1780
1781static int init_dma_tx_desc_rings(struct net_device *dev)
1782{
1783        struct stmmac_priv *priv = netdev_priv(dev);
1784        u32 tx_queue_cnt;
1785        u32 queue;
1786
1787        tx_queue_cnt = priv->plat->tx_queues_to_use;
1788
1789        for (queue = 0; queue < tx_queue_cnt; queue++)
1790                __init_dma_tx_desc_rings(priv, queue);
1791
1792        return 0;
1793}
1794
1795/**
1796 * init_dma_desc_rings - init the RX/TX descriptor rings
1797 * @dev: net device structure
1798 * @flags: gfp flag.
1799 * Description: this function initializes the DMA RX/TX descriptors
1800 * and allocates the socket buffers. It supports the chained and ring
1801 * modes.
1802 */
1803static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1804{
1805        struct stmmac_priv *priv = netdev_priv(dev);
1806        int ret;
1807
1808        ret = init_dma_rx_desc_rings(dev, flags);
1809        if (ret)
1810                return ret;
1811
1812        ret = init_dma_tx_desc_rings(dev);
1813
1814        stmmac_clear_descriptors(priv);
1815
1816        if (netif_msg_hw(priv))
1817                stmmac_display_rings(priv);
1818
1819        return ret;
1820}
1821
1822/**
1823 * dma_free_tx_skbufs - free TX dma buffers
1824 * @priv: private structure
1825 * @queue: TX queue index
1826 */
1827static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1828{
1829        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1830        int i;
1831
1832        tx_q->xsk_frames_done = 0;
1833
1834        for (i = 0; i < priv->dma_tx_size; i++)
1835                stmmac_free_tx_buffer(priv, queue, i);
1836
1837        if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1838                xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1839                tx_q->xsk_frames_done = 0;
1840                tx_q->xsk_pool = NULL;
1841        }
1842}
1843
1844/**
1845 * stmmac_free_tx_skbufs - free TX skb buffers
1846 * @priv: private structure
1847 */
1848static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1849{
1850        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1851        u32 queue;
1852
1853        for (queue = 0; queue < tx_queue_cnt; queue++)
1854                dma_free_tx_skbufs(priv, queue);
1855}
1856
1857/**
1858 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1859 * @priv: private structure
1860 * @queue: RX queue index
1861 */
1862static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1863{
1864        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1865
1866        /* Release the DMA RX socket buffers */
1867        if (rx_q->xsk_pool)
1868                dma_free_rx_xskbufs(priv, queue);
1869        else
1870                dma_free_rx_skbufs(priv, queue);
1871
1872        rx_q->buf_alloc_num = 0;
1873        rx_q->xsk_pool = NULL;
1874
1875        /* Free DMA regions of consistent memory previously allocated */
1876        if (!priv->extend_desc)
1877                dma_free_coherent(priv->device, priv->dma_rx_size *
1878                                  sizeof(struct dma_desc),
1879                                  rx_q->dma_rx, rx_q->dma_rx_phy);
1880        else
1881                dma_free_coherent(priv->device, priv->dma_rx_size *
1882                                  sizeof(struct dma_extended_desc),
1883                                  rx_q->dma_erx, rx_q->dma_rx_phy);
1884
1885        if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1886                xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1887
1888        kfree(rx_q->buf_pool);
1889        if (rx_q->page_pool)
1890                page_pool_destroy(rx_q->page_pool);
1891}
1892
1893static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1894{
1895        u32 rx_count = priv->plat->rx_queues_to_use;
1896        u32 queue;
1897
1898        /* Free RX queue resources */
1899        for (queue = 0; queue < rx_count; queue++)
1900                __free_dma_rx_desc_resources(priv, queue);
1901}
1902
1903/**
1904 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1905 * @priv: private structure
1906 * @queue: TX queue index
1907 */
1908static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1909{
1910        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1911        size_t size;
1912        void *addr;
1913
1914        /* Release the DMA TX socket buffers */
1915        dma_free_tx_skbufs(priv, queue);
1916
1917        if (priv->extend_desc) {
1918                size = sizeof(struct dma_extended_desc);
1919                addr = tx_q->dma_etx;
1920        } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1921                size = sizeof(struct dma_edesc);
1922                addr = tx_q->dma_entx;
1923        } else {
1924                size = sizeof(struct dma_desc);
1925                addr = tx_q->dma_tx;
1926        }
1927
1928        size *= priv->dma_tx_size;
1929
1930        dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1931
1932        kfree(tx_q->tx_skbuff_dma);
1933        kfree(tx_q->tx_skbuff);
1934}
1935
1936static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1937{
1938        u32 tx_count = priv->plat->tx_queues_to_use;
1939        u32 queue;
1940
1941        /* Free TX queue resources */
1942        for (queue = 0; queue < tx_count; queue++)
1943                __free_dma_tx_desc_resources(priv, queue);
1944}
1945
1946/**
1947 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1948 * @priv: private structure
1949 * @queue: RX queue index
1950 * Description: according to which descriptor can be used (extend or basic)
1951 * this function allocates the resources for TX and RX paths. In case of
1952 * reception, for example, it pre-allocated the RX socket buffer in order to
1953 * allow zero-copy mechanism.
1954 */
1955static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1956{
1957        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1958        struct stmmac_channel *ch = &priv->channel[queue];
1959        bool xdp_prog = stmmac_xdp_is_enabled(priv);
1960        struct page_pool_params pp_params = { 0 };
1961        unsigned int num_pages;
1962        unsigned int napi_id;
1963        int ret;
1964
1965        rx_q->queue_index = queue;
1966        rx_q->priv_data = priv;
1967
1968        pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1969        pp_params.pool_size = priv->dma_rx_size;
1970        num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1971        pp_params.order = ilog2(num_pages);
1972        pp_params.nid = dev_to_node(priv->device);
1973        pp_params.dev = priv->device;
1974        pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1975        pp_params.offset = stmmac_rx_offset(priv);
1976        pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
1977
1978        rx_q->page_pool = page_pool_create(&pp_params);
1979        if (IS_ERR(rx_q->page_pool)) {
1980                ret = PTR_ERR(rx_q->page_pool);
1981                rx_q->page_pool = NULL;
1982                return ret;
1983        }
1984
1985        rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1986                                 sizeof(*rx_q->buf_pool),
1987                                 GFP_KERNEL);
1988        if (!rx_q->buf_pool)
1989                return -ENOMEM;
1990
1991        if (priv->extend_desc) {
1992                rx_q->dma_erx = dma_alloc_coherent(priv->device,
1993                                                   priv->dma_rx_size *
1994                                                   sizeof(struct dma_extended_desc),
1995                                                   &rx_q->dma_rx_phy,
1996                                                   GFP_KERNEL);
1997                if (!rx_q->dma_erx)
1998                        return -ENOMEM;
1999
2000        } else {
2001                rx_q->dma_rx = dma_alloc_coherent(priv->device,
2002                                                  priv->dma_rx_size *
2003                                                  sizeof(struct dma_desc),
2004                                                  &rx_q->dma_rx_phy,
2005                                                  GFP_KERNEL);
2006                if (!rx_q->dma_rx)
2007                        return -ENOMEM;
2008        }
2009
2010        if (stmmac_xdp_is_enabled(priv) &&
2011            test_bit(queue, priv->af_xdp_zc_qps))
2012                napi_id = ch->rxtx_napi.napi_id;
2013        else
2014                napi_id = ch->rx_napi.napi_id;
2015
2016        ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2017                               rx_q->queue_index,
2018                               napi_id);
2019        if (ret) {
2020                netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2021                return -EINVAL;
2022        }
2023
2024        return 0;
2025}
2026
2027static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2028{
2029        u32 rx_count = priv->plat->rx_queues_to_use;
2030        u32 queue;
2031        int ret;
2032
2033        /* RX queues buffers and DMA */
2034        for (queue = 0; queue < rx_count; queue++) {
2035                ret = __alloc_dma_rx_desc_resources(priv, queue);
2036                if (ret)
2037                        goto err_dma;
2038        }
2039
2040        return 0;
2041
2042err_dma:
2043        free_dma_rx_desc_resources(priv);
2044
2045        return ret;
2046}
2047
2048/**
2049 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2050 * @priv: private structure
2051 * @queue: TX queue index
2052 * Description: according to which descriptor can be used (extend or basic)
2053 * this function allocates the resources for TX and RX paths. In case of
2054 * reception, for example, it pre-allocated the RX socket buffer in order to
2055 * allow zero-copy mechanism.
2056 */
2057static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2058{
2059        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2060        size_t size;
2061        void *addr;
2062
2063        tx_q->queue_index = queue;
2064        tx_q->priv_data = priv;
2065
2066        tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2067                                      sizeof(*tx_q->tx_skbuff_dma),
2068                                      GFP_KERNEL);
2069        if (!tx_q->tx_skbuff_dma)
2070                return -ENOMEM;
2071
2072        tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2073                                  sizeof(struct sk_buff *),
2074                                  GFP_KERNEL);
2075        if (!tx_q->tx_skbuff)
2076                return -ENOMEM;
2077
2078        if (priv->extend_desc)
2079                size = sizeof(struct dma_extended_desc);
2080        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2081                size = sizeof(struct dma_edesc);
2082        else
2083                size = sizeof(struct dma_desc);
2084
2085        size *= priv->dma_tx_size;
2086
2087        addr = dma_alloc_coherent(priv->device, size,
2088                                  &tx_q->dma_tx_phy, GFP_KERNEL);
2089        if (!addr)
2090                return -ENOMEM;
2091
2092        if (priv->extend_desc)
2093                tx_q->dma_etx = addr;
2094        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2095                tx_q->dma_entx = addr;
2096        else
2097                tx_q->dma_tx = addr;
2098
2099        return 0;
2100}
2101
2102static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2103{
2104        u32 tx_count = priv->plat->tx_queues_to_use;
2105        u32 queue;
2106        int ret;
2107
2108        /* TX queues buffers and DMA */
2109        for (queue = 0; queue < tx_count; queue++) {
2110                ret = __alloc_dma_tx_desc_resources(priv, queue);
2111                if (ret)
2112                        goto err_dma;
2113        }
2114
2115        return 0;
2116
2117err_dma:
2118        free_dma_tx_desc_resources(priv);
2119        return ret;
2120}
2121
2122/**
2123 * alloc_dma_desc_resources - alloc TX/RX resources.
2124 * @priv: private structure
2125 * Description: according to which descriptor can be used (extend or basic)
2126 * this function allocates the resources for TX and RX paths. In case of
2127 * reception, for example, it pre-allocated the RX socket buffer in order to
2128 * allow zero-copy mechanism.
2129 */
2130static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2131{
2132        /* RX Allocation */
2133        int ret = alloc_dma_rx_desc_resources(priv);
2134
2135        if (ret)
2136                return ret;
2137
2138        ret = alloc_dma_tx_desc_resources(priv);
2139
2140        return ret;
2141}
2142
2143/**
2144 * free_dma_desc_resources - free dma desc resources
2145 * @priv: private structure
2146 */
2147static void free_dma_desc_resources(struct stmmac_priv *priv)
2148{
2149        /* Release the DMA TX socket buffers */
2150        free_dma_tx_desc_resources(priv);
2151
2152        /* Release the DMA RX socket buffers later
2153         * to ensure all pending XDP_TX buffers are returned.
2154         */
2155        free_dma_rx_desc_resources(priv);
2156}
2157
2158/**
2159 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2160 *  @priv: driver private structure
2161 *  Description: It is used for enabling the rx queues in the MAC
2162 */
2163static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2164{
2165        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2166        int queue;
2167        u8 mode;
2168
2169        for (queue = 0; queue < rx_queues_count; queue++) {
2170                mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2171                stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2172        }
2173}
2174
2175/**
2176 * stmmac_start_rx_dma - start RX DMA channel
2177 * @priv: driver private structure
2178 * @chan: RX channel index
2179 * Description:
2180 * This starts a RX DMA channel
2181 */
2182static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2183{
2184        netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2185        stmmac_start_rx(priv, priv->ioaddr, chan);
2186}
2187
2188/**
2189 * stmmac_start_tx_dma - start TX DMA channel
2190 * @priv: driver private structure
2191 * @chan: TX channel index
2192 * Description:
2193 * This starts a TX DMA channel
2194 */
2195static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2196{
2197        netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2198        stmmac_start_tx(priv, priv->ioaddr, chan);
2199}
2200
2201/**
2202 * stmmac_stop_rx_dma - stop RX DMA channel
2203 * @priv: driver private structure
2204 * @chan: RX channel index
2205 * Description:
2206 * This stops a RX DMA channel
2207 */
2208static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2209{
2210        netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2211        stmmac_stop_rx(priv, priv->ioaddr, chan);
2212}
2213
2214/**
2215 * stmmac_stop_tx_dma - stop TX DMA channel
2216 * @priv: driver private structure
2217 * @chan: TX channel index
2218 * Description:
2219 * This stops a TX DMA channel
2220 */
2221static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2222{
2223        netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2224        stmmac_stop_tx(priv, priv->ioaddr, chan);
2225}
2226
2227/**
2228 * stmmac_start_all_dma - start all RX and TX DMA channels
2229 * @priv: driver private structure
2230 * Description:
2231 * This starts all the RX and TX DMA channels
2232 */
2233static void stmmac_start_all_dma(struct stmmac_priv *priv)
2234{
2235        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2236        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2237        u32 chan = 0;
2238
2239        for (chan = 0; chan < rx_channels_count; chan++)
2240                stmmac_start_rx_dma(priv, chan);
2241
2242        for (chan = 0; chan < tx_channels_count; chan++)
2243                stmmac_start_tx_dma(priv, chan);
2244}
2245
2246/**
2247 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2248 * @priv: driver private structure
2249 * Description:
2250 * This stops the RX and TX DMA channels
2251 */
2252static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2253{
2254        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2255        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2256        u32 chan = 0;
2257
2258        for (chan = 0; chan < rx_channels_count; chan++)
2259                stmmac_stop_rx_dma(priv, chan);
2260
2261        for (chan = 0; chan < tx_channels_count; chan++)
2262                stmmac_stop_tx_dma(priv, chan);
2263}
2264
2265/**
2266 *  stmmac_dma_operation_mode - HW DMA operation mode
2267 *  @priv: driver private structure
2268 *  Description: it is used for configuring the DMA operation mode register in
2269 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2270 */
2271static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2272{
2273        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2274        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2275        int rxfifosz = priv->plat->rx_fifo_size;
2276        int txfifosz = priv->plat->tx_fifo_size;
2277        u32 txmode = 0;
2278        u32 rxmode = 0;
2279        u32 chan = 0;
2280        u8 qmode = 0;
2281
2282        if (rxfifosz == 0)
2283                rxfifosz = priv->dma_cap.rx_fifo_size;
2284        if (txfifosz == 0)
2285                txfifosz = priv->dma_cap.tx_fifo_size;
2286
2287        /* Adjust for real per queue fifo size */
2288        rxfifosz /= rx_channels_count;
2289        txfifosz /= tx_channels_count;
2290
2291        if (priv->plat->force_thresh_dma_mode) {
2292                txmode = tc;
2293                rxmode = tc;
2294        } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2295                /*
2296                 * In case of GMAC, SF mode can be enabled
2297                 * to perform the TX COE in HW. This depends on:
2298                 * 1) TX COE if actually supported
2299                 * 2) There is no bugged Jumbo frame support
2300                 *    that needs to not insert csum in the TDES.
2301                 */
2302                txmode = SF_DMA_MODE;
2303                rxmode = SF_DMA_MODE;
2304                priv->xstats.threshold = SF_DMA_MODE;
2305        } else {
2306                txmode = tc;
2307                rxmode = SF_DMA_MODE;
2308        }
2309
2310        /* configure all channels */
2311        for (chan = 0; chan < rx_channels_count; chan++) {
2312                struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2313                u32 buf_size;
2314
2315                qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2316
2317                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2318                                rxfifosz, qmode);
2319
2320                if (rx_q->xsk_pool) {
2321                        buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2322                        stmmac_set_dma_bfsize(priv, priv->ioaddr,
2323                                              buf_size,
2324                                              chan);
2325                } else {
2326                        stmmac_set_dma_bfsize(priv, priv->ioaddr,
2327                                              priv->dma_buf_sz,
2328                                              chan);
2329                }
2330        }
2331
2332        for (chan = 0; chan < tx_channels_count; chan++) {
2333                qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2334
2335                stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2336                                txfifosz, qmode);
2337        }
2338}
2339
2340static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2341{
2342        struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2343        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2344        struct xsk_buff_pool *pool = tx_q->xsk_pool;
2345        unsigned int entry = tx_q->cur_tx;
2346        struct dma_desc *tx_desc = NULL;
2347        struct xdp_desc xdp_desc;
2348        bool work_done = true;
2349
2350        /* Avoids TX time-out as we are sharing with slow path */
2351        nq->trans_start = jiffies;
2352
2353        budget = min(budget, stmmac_tx_avail(priv, queue));
2354
2355        while (budget-- > 0) {
2356                dma_addr_t dma_addr;
2357                bool set_ic;
2358
2359                /* We are sharing with slow path and stop XSK TX desc submission when
2360                 * available TX ring is less than threshold.
2361                 */
2362                if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2363                    !netif_carrier_ok(priv->dev)) {
2364                        work_done = false;
2365                        break;
2366                }
2367
2368                if (!xsk_tx_peek_desc(pool, &xdp_desc))
2369                        break;
2370
2371                if (likely(priv->extend_desc))
2372                        tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2373                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2374                        tx_desc = &tx_q->dma_entx[entry].basic;
2375                else
2376                        tx_desc = tx_q->dma_tx + entry;
2377
2378                dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2379                xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2380
2381                tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2382
2383                /* To return XDP buffer to XSK pool, we simple call
2384                 * xsk_tx_completed(), so we don't need to fill up
2385                 * 'buf' and 'xdpf'.
2386                 */
2387                tx_q->tx_skbuff_dma[entry].buf = 0;
2388                tx_q->xdpf[entry] = NULL;
2389
2390                tx_q->tx_skbuff_dma[entry].map_as_page = false;
2391                tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2392                tx_q->tx_skbuff_dma[entry].last_segment = true;
2393                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2394
2395                stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2396
2397                tx_q->tx_count_frames++;
2398
2399                if (!priv->tx_coal_frames[queue])
2400                        set_ic = false;
2401                else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2402                        set_ic = true;
2403                else
2404                        set_ic = false;
2405
2406                if (set_ic) {
2407                        tx_q->tx_count_frames = 0;
2408                        stmmac_set_tx_ic(priv, tx_desc);
2409                        priv->xstats.tx_set_ic_bit++;
2410                }
2411
2412                stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2413                                       true, priv->mode, true, true,
2414                                       xdp_desc.len);
2415
2416                stmmac_enable_dma_transmission(priv, priv->ioaddr);
2417
2418                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2419                entry = tx_q->cur_tx;
2420        }
2421
2422        if (tx_desc) {
2423                stmmac_flush_tx_descriptors(priv, queue);
2424                xsk_tx_release(pool);
2425        }
2426
2427        /* Return true if all of the 3 conditions are met
2428         *  a) TX Budget is still available
2429         *  b) work_done = true when XSK TX desc peek is empty (no more
2430         *     pending XSK TX for transmission)
2431         */
2432        return !!budget && work_done;
2433}
2434
2435/**
2436 * stmmac_tx_clean - to manage the transmission completion
2437 * @priv: driver private structure
2438 * @budget: napi budget limiting this functions packet handling
2439 * @queue: TX queue index
2440 * Description: it reclaims the transmit resources after transmission completes.
2441 */
2442static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2443{
2444        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2445        unsigned int bytes_compl = 0, pkts_compl = 0;
2446        unsigned int entry, xmits = 0, count = 0;
2447
2448        __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2449
2450        priv->xstats.tx_clean++;
2451
2452        tx_q->xsk_frames_done = 0;
2453
2454        entry = tx_q->dirty_tx;
2455
2456        /* Try to clean all TX complete frame in 1 shot */
2457        while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2458                struct xdp_frame *xdpf;
2459                struct sk_buff *skb;
2460                struct dma_desc *p;
2461                int status;
2462
2463                if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2464                    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2465                        xdpf = tx_q->xdpf[entry];
2466                        skb = NULL;
2467                } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2468                        xdpf = NULL;
2469                        skb = tx_q->tx_skbuff[entry];
2470                } else {
2471                        xdpf = NULL;
2472                        skb = NULL;
2473                }
2474
2475                if (priv->extend_desc)
2476                        p = (struct dma_desc *)(tx_q->dma_etx + entry);
2477                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2478                        p = &tx_q->dma_entx[entry].basic;
2479                else
2480                        p = tx_q->dma_tx + entry;
2481
2482                status = stmmac_tx_status(priv, &priv->dev->stats,
2483                                &priv->xstats, p, priv->ioaddr);
2484                /* Check if the descriptor is owned by the DMA */
2485                if (unlikely(status & tx_dma_own))
2486                        break;
2487
2488                count++;
2489
2490                /* Make sure descriptor fields are read after reading
2491                 * the own bit.
2492                 */
2493                dma_rmb();
2494
2495                /* Just consider the last segment and ...*/
2496                if (likely(!(status & tx_not_ls))) {
2497                        /* ... verify the status error condition */
2498                        if (unlikely(status & tx_err)) {
2499                                priv->dev->stats.tx_errors++;
2500                        } else {
2501                                priv->dev->stats.tx_packets++;
2502                                priv->xstats.tx_pkt_n++;
2503                        }
2504                        if (skb)
2505                                stmmac_get_tx_hwtstamp(priv, p, skb);
2506                }
2507
2508                if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2509                           tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2510                        if (tx_q->tx_skbuff_dma[entry].map_as_page)
2511                                dma_unmap_page(priv->device,
2512                                               tx_q->tx_skbuff_dma[entry].buf,
2513                                               tx_q->tx_skbuff_dma[entry].len,
2514                                               DMA_TO_DEVICE);
2515                        else
2516                                dma_unmap_single(priv->device,
2517                                                 tx_q->tx_skbuff_dma[entry].buf,
2518                                                 tx_q->tx_skbuff_dma[entry].len,
2519                                                 DMA_TO_DEVICE);
2520                        tx_q->tx_skbuff_dma[entry].buf = 0;
2521                        tx_q->tx_skbuff_dma[entry].len = 0;
2522                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
2523                }
2524
2525                stmmac_clean_desc3(priv, tx_q, p);
2526
2527                tx_q->tx_skbuff_dma[entry].last_segment = false;
2528                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2529
2530                if (xdpf &&
2531                    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2532                        xdp_return_frame_rx_napi(xdpf);
2533                        tx_q->xdpf[entry] = NULL;
2534                }
2535
2536                if (xdpf &&
2537                    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2538                        xdp_return_frame(xdpf);
2539                        tx_q->xdpf[entry] = NULL;
2540                }
2541
2542                if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2543                        tx_q->xsk_frames_done++;
2544
2545                if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2546                        if (likely(skb)) {
2547                                pkts_compl++;
2548                                bytes_compl += skb->len;
2549                                dev_consume_skb_any(skb);
2550                                tx_q->tx_skbuff[entry] = NULL;
2551                        }
2552                }
2553
2554                stmmac_release_tx_desc(priv, p, priv->mode);
2555
2556                entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2557        }
2558        tx_q->dirty_tx = entry;
2559
2560        netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2561                                  pkts_compl, bytes_compl);
2562
2563        if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2564                                                                queue))) &&
2565            stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2566
2567                netif_dbg(priv, tx_done, priv->dev,
2568                          "%s: restart transmit\n", __func__);
2569                netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2570        }
2571
2572        if (tx_q->xsk_pool) {
2573                bool work_done;
2574
2575                if (tx_q->xsk_frames_done)
2576                        xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2577
2578                if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2579                        xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2580
2581                /* For XSK TX, we try to send as many as possible.
2582                 * If XSK work done (XSK TX desc empty and budget still
2583                 * available), return "budget - 1" to reenable TX IRQ.
2584                 * Else, return "budget" to make NAPI continue polling.
2585                 */
2586                work_done = stmmac_xdp_xmit_zc(priv, queue,
2587                                               STMMAC_XSK_TX_BUDGET_MAX);
2588                if (work_done)
2589                        xmits = budget - 1;
2590                else
2591                        xmits = budget;
2592        }
2593
2594        if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2595            priv->eee_sw_timer_en) {
2596                stmmac_enable_eee_mode(priv);
2597                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2598        }
2599
2600        /* We still have pending packets, let's call for a new scheduling */
2601        if (tx_q->dirty_tx != tx_q->cur_tx)
2602                hrtimer_start(&tx_q->txtimer,
2603                              STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2604                              HRTIMER_MODE_REL);
2605
2606        __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2607
2608        /* Combine decisions from TX clean and XSK TX */
2609        return max(count, xmits);
2610}
2611
2612/**
2613 * stmmac_tx_err - to manage the tx error
2614 * @priv: driver private structure
2615 * @chan: channel index
2616 * Description: it cleans the descriptors and restarts the transmission
2617 * in case of transmission errors.
2618 */
2619static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2620{
2621        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2622
2623        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2624
2625        stmmac_stop_tx_dma(priv, chan);
2626        dma_free_tx_skbufs(priv, chan);
2627        stmmac_clear_tx_descriptors(priv, chan);
2628        tx_q->dirty_tx = 0;
2629        tx_q->cur_tx = 0;
2630        tx_q->mss = 0;
2631        netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2632        stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2633                            tx_q->dma_tx_phy, chan);
2634        stmmac_start_tx_dma(priv, chan);
2635
2636        priv->dev->stats.tx_errors++;
2637        netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2638}
2639
2640/**
2641 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2642 *  @priv: driver private structure
2643 *  @txmode: TX operating mode
2644 *  @rxmode: RX operating mode
2645 *  @chan: channel index
2646 *  Description: it is used for configuring of the DMA operation mode in
2647 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2648 *  mode.
2649 */
2650static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2651                                          u32 rxmode, u32 chan)
2652{
2653        u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2654        u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2655        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2656        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2657        int rxfifosz = priv->plat->rx_fifo_size;
2658        int txfifosz = priv->plat->tx_fifo_size;
2659
2660        if (rxfifosz == 0)
2661                rxfifosz = priv->dma_cap.rx_fifo_size;
2662        if (txfifosz == 0)
2663                txfifosz = priv->dma_cap.tx_fifo_size;
2664
2665        /* Adjust for real per queue fifo size */
2666        rxfifosz /= rx_channels_count;
2667        txfifosz /= tx_channels_count;
2668
2669        stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2670        stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2671}
2672
2673static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2674{
2675        int ret;
2676
2677        ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2678                        priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2679        if (ret && (ret != -EINVAL)) {
2680                stmmac_global_err(priv);
2681                return true;
2682        }
2683
2684        return false;
2685}
2686
2687static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2688{
2689        int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2690                                                 &priv->xstats, chan, dir);
2691        struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2692        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2693        struct stmmac_channel *ch = &priv->channel[chan];
2694        struct napi_struct *rx_napi;
2695        struct napi_struct *tx_napi;
2696        unsigned long flags;
2697
2698        rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2699        tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2700
2701        if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2702                if (napi_schedule_prep(rx_napi)) {
2703                        spin_lock_irqsave(&ch->lock, flags);
2704                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2705                        spin_unlock_irqrestore(&ch->lock, flags);
2706                        __napi_schedule(rx_napi);
2707                }
2708        }
2709
2710        if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2711                if (napi_schedule_prep(tx_napi)) {
2712                        spin_lock_irqsave(&ch->lock, flags);
2713                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2714                        spin_unlock_irqrestore(&ch->lock, flags);
2715                        __napi_schedule(tx_napi);
2716                }
2717        }
2718
2719        return status;
2720}
2721
2722/**
2723 * stmmac_dma_interrupt - DMA ISR
2724 * @priv: driver private structure
2725 * Description: this is the DMA ISR. It is called by the main ISR.
2726 * It calls the dwmac dma routine and schedule poll method in case of some
2727 * work can be done.
2728 */
2729static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2730{
2731        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2732        u32 rx_channel_count = priv->plat->rx_queues_to_use;
2733        u32 channels_to_check = tx_channel_count > rx_channel_count ?
2734                                tx_channel_count : rx_channel_count;
2735        u32 chan;
2736        int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2737
2738        /* Make sure we never check beyond our status buffer. */
2739        if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2740                channels_to_check = ARRAY_SIZE(status);
2741
2742        for (chan = 0; chan < channels_to_check; chan++)
2743                status[chan] = stmmac_napi_check(priv, chan,
2744                                                 DMA_DIR_RXTX);
2745
2746        for (chan = 0; chan < tx_channel_count; chan++) {
2747                if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2748                        /* Try to bump up the dma threshold on this failure */
2749                        if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2750                            (tc <= 256)) {
2751                                tc += 64;
2752                                if (priv->plat->force_thresh_dma_mode)
2753                                        stmmac_set_dma_operation_mode(priv,
2754                                                                      tc,
2755                                                                      tc,
2756                                                                      chan);
2757                                else
2758                                        stmmac_set_dma_operation_mode(priv,
2759                                                                    tc,
2760                                                                    SF_DMA_MODE,
2761                                                                    chan);
2762                                priv->xstats.threshold = tc;
2763                        }
2764                } else if (unlikely(status[chan] == tx_hard_error)) {
2765                        stmmac_tx_err(priv, chan);
2766                }
2767        }
2768}
2769
2770/**
2771 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2772 * @priv: driver private structure
2773 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2774 */
2775static void stmmac_mmc_setup(struct stmmac_priv *priv)
2776{
2777        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2778                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2779
2780        stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2781
2782        if (priv->dma_cap.rmon) {
2783                stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2784                memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2785        } else
2786                netdev_info(priv->dev, "No MAC Management Counters available\n");
2787}
2788
2789/**
2790 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2791 * @priv: driver private structure
2792 * Description:
2793 *  new GMAC chip generations have a new register to indicate the
2794 *  presence of the optional feature/functions.
2795 *  This can be also used to override the value passed through the
2796 *  platform and necessary for old MAC10/100 and GMAC chips.
2797 */
2798static int stmmac_get_hw_features(struct stmmac_priv *priv)
2799{
2800        return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2801}
2802
2803/**
2804 * stmmac_check_ether_addr - check if the MAC addr is valid
2805 * @priv: driver private structure
2806 * Description:
2807 * it is to verify if the MAC address is valid, in case of failures it
2808 * generates a random MAC address
2809 */
2810static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2811{
2812        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2813                stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2814                if (!is_valid_ether_addr(priv->dev->dev_addr))
2815                        eth_hw_addr_random(priv->dev);
2816                dev_info(priv->device, "device MAC address %pM\n",
2817                         priv->dev->dev_addr);
2818        }
2819}
2820
2821/**
2822 * stmmac_init_dma_engine - DMA init.
2823 * @priv: driver private structure
2824 * Description:
2825 * It inits the DMA invoking the specific MAC/GMAC callback.
2826 * Some DMA parameters can be passed from the platform;
2827 * in case of these are not passed a default is kept for the MAC or GMAC.
2828 */
2829static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2830{
2831        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2832        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2833        u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2834        struct stmmac_rx_queue *rx_q;
2835        struct stmmac_tx_queue *tx_q;
2836        u32 chan = 0;
2837        int atds = 0;
2838        int ret = 0;
2839
2840        if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2841                dev_err(priv->device, "Invalid DMA configuration\n");
2842                return -EINVAL;
2843        }
2844
2845        if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2846                atds = 1;
2847
2848        ret = stmmac_reset(priv, priv->ioaddr);
2849        if (ret) {
2850                dev_err(priv->device, "Failed to reset the dma\n");
2851                return ret;
2852        }
2853
2854        /* DMA Configuration */
2855        stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2856
2857        if (priv->plat->axi)
2858                stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2859
2860        /* DMA CSR Channel configuration */
2861        for (chan = 0; chan < dma_csr_ch; chan++)
2862                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2863
2864        /* DMA RX Channel Configuration */
2865        for (chan = 0; chan < rx_channels_count; chan++) {
2866                rx_q = &priv->rx_queue[chan];
2867
2868                stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2869                                    rx_q->dma_rx_phy, chan);
2870
2871                rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2872                                     (rx_q->buf_alloc_num *
2873                                      sizeof(struct dma_desc));
2874                stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2875                                       rx_q->rx_tail_addr, chan);
2876        }
2877
2878        /* DMA TX Channel Configuration */
2879        for (chan = 0; chan < tx_channels_count; chan++) {
2880                tx_q = &priv->tx_queue[chan];
2881
2882                stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2883                                    tx_q->dma_tx_phy, chan);
2884
2885                tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2886                stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2887                                       tx_q->tx_tail_addr, chan);
2888        }
2889
2890        return ret;
2891}
2892
2893static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2894{
2895        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2896
2897        hrtimer_start(&tx_q->txtimer,
2898                      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2899                      HRTIMER_MODE_REL);
2900}
2901
2902/**
2903 * stmmac_tx_timer - mitigation sw timer for tx.
2904 * @t: data pointer
2905 * Description:
2906 * This is the timer handler to directly invoke the stmmac_tx_clean.
2907 */
2908static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2909{
2910        struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2911        struct stmmac_priv *priv = tx_q->priv_data;
2912        struct stmmac_channel *ch;
2913        struct napi_struct *napi;
2914
2915        ch = &priv->channel[tx_q->queue_index];
2916        napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2917
2918        if (likely(napi_schedule_prep(napi))) {
2919                unsigned long flags;
2920
2921                spin_lock_irqsave(&ch->lock, flags);
2922                stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2923                spin_unlock_irqrestore(&ch->lock, flags);
2924                __napi_schedule(napi);
2925        }
2926
2927        return HRTIMER_NORESTART;
2928}
2929
2930/**
2931 * stmmac_init_coalesce - init mitigation options.
2932 * @priv: driver private structure
2933 * Description:
2934 * This inits the coalesce parameters: i.e. timer rate,
2935 * timer handler and default threshold used for enabling the
2936 * interrupt on completion bit.
2937 */
2938static void stmmac_init_coalesce(struct stmmac_priv *priv)
2939{
2940        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2941        u32 rx_channel_count = priv->plat->rx_queues_to_use;
2942        u32 chan;
2943
2944        for (chan = 0; chan < tx_channel_count; chan++) {
2945                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2946
2947                priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2948                priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2949
2950                hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2951                tx_q->txtimer.function = stmmac_tx_timer;
2952        }
2953
2954        for (chan = 0; chan < rx_channel_count; chan++)
2955                priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2956}
2957
2958static void stmmac_set_rings_length(struct stmmac_priv *priv)
2959{
2960        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2961        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2962        u32 chan;
2963
2964        /* set TX ring length */
2965        for (chan = 0; chan < tx_channels_count; chan++)
2966                stmmac_set_tx_ring_len(priv, priv->ioaddr,
2967                                       (priv->dma_tx_size - 1), chan);
2968
2969        /* set RX ring length */
2970        for (chan = 0; chan < rx_channels_count; chan++)
2971                stmmac_set_rx_ring_len(priv, priv->ioaddr,
2972                                       (priv->dma_rx_size - 1), chan);
2973}
2974
2975/**
2976 *  stmmac_set_tx_queue_weight - Set TX queue weight
2977 *  @priv: driver private structure
2978 *  Description: It is used for setting TX queues weight
2979 */
2980static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2981{
2982        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2983        u32 weight;
2984        u32 queue;
2985
2986        for (queue = 0; queue < tx_queues_count; queue++) {
2987                weight = priv->plat->tx_queues_cfg[queue].weight;
2988                stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2989        }
2990}
2991
2992/**
2993 *  stmmac_configure_cbs - Configure CBS in TX queue
2994 *  @priv: driver private structure
2995 *  Description: It is used for configuring CBS in AVB TX queues
2996 */
2997static void stmmac_configure_cbs(struct stmmac_priv *priv)
2998{
2999        u32 tx_queues_count = priv->plat->tx_queues_to_use;
3000        u32 mode_to_use;
3001        u32 queue;
3002
3003        /* queue 0 is reserved for legacy traffic */
3004        for (queue = 1; queue < tx_queues_count; queue++) {
3005                mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3006                if (mode_to_use == MTL_QUEUE_DCB)
3007                        continue;
3008
3009                stmmac_config_cbs(priv, priv->hw,
3010                                priv->plat->tx_queues_cfg[queue].send_slope,
3011                                priv->plat->tx_queues_cfg[queue].idle_slope,
3012                                priv->plat->tx_queues_cfg[queue].high_credit,
3013                                priv->plat->tx_queues_cfg[queue].low_credit,
3014                                queue);
3015        }
3016}
3017
3018/**
3019 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3020 *  @priv: driver private structure
3021 *  Description: It is used for mapping RX queues to RX dma channels
3022 */
3023static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3024{
3025        u32 rx_queues_count = priv->plat->rx_queues_to_use;
3026        u32 queue;
3027        u32 chan;
3028
3029        for (queue = 0; queue < rx_queues_count; queue++) {
3030                chan = priv->plat->rx_queues_cfg[queue].chan;
3031                stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3032        }
3033}
3034
3035/**
3036 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3037 *  @priv: driver private structure
3038 *  Description: It is used for configuring the RX Queue Priority
3039 */
3040static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3041{
3042        u32 rx_queues_count = priv->plat->rx_queues_to_use;
3043        u32 queue;
3044        u32 prio;
3045
3046        for (queue = 0; queue < rx_queues_count; queue++) {
3047                if (!priv->plat->rx_queues_cfg[queue].use_prio)
3048                        continue;
3049
3050                prio = priv->plat->rx_queues_cfg[queue].prio;
3051                stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3052        }
3053}
3054
3055/**
3056 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3057 *  @priv: driver private structure
3058 *  Description: It is used for configuring the TX Queue Priority
3059 */
3060static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3061{
3062        u32 tx_queues_count = priv->plat->tx_queues_to_use;
3063        u32 queue;
3064        u32 prio;
3065
3066        for (queue = 0; queue < tx_queues_count; queue++) {
3067                if (!priv->plat->tx_queues_cfg[queue].use_prio)
3068                        continue;
3069
3070                prio = priv->plat->tx_queues_cfg[queue].prio;
3071                stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3072        }
3073}
3074
3075/**
3076 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3077 *  @priv: driver private structure
3078 *  Description: It is used for configuring the RX queue routing
3079 */
3080static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3081{
3082        u32 rx_queues_count = priv->plat->rx_queues_to_use;
3083        u32 queue;
3084        u8 packet;
3085
3086        for (queue = 0; queue < rx_queues_count; queue++) {
3087                /* no specific packet type routing specified for the queue */
3088                if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3089                        continue;
3090
3091                packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3092                stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3093        }
3094}
3095
3096static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3097{
3098        if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3099                priv->rss.enable = false;
3100                return;
3101        }
3102
3103        if (priv->dev->features & NETIF_F_RXHASH)
3104                priv->rss.enable = true;
3105        else
3106                priv->rss.enable = false;
3107
3108        stmmac_rss_configure(priv, priv->hw, &priv->rss,
3109                             priv->plat->rx_queues_to_use);
3110}
3111
3112/**
3113 *  stmmac_mtl_configuration - Configure MTL
3114 *  @priv: driver private structure
3115 *  Description: It is used for configurring MTL
3116 */
3117static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3118{
3119        u32 rx_queues_count = priv->plat->rx_queues_to_use;
3120        u32 tx_queues_count = priv->plat->tx_queues_to_use;
3121
3122        if (tx_queues_count > 1)
3123                stmmac_set_tx_queue_weight(priv);
3124
3125        /* Configure MTL RX algorithms */
3126        if (rx_queues_count > 1)
3127                stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3128                                priv->plat->rx_sched_algorithm);
3129
3130        /* Configure MTL TX algorithms */
3131        if (tx_queues_count > 1)
3132                stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3133                                priv->plat->tx_sched_algorithm);
3134
3135        /* Configure CBS in AVB TX queues */
3136        if (tx_queues_count > 1)
3137                stmmac_configure_cbs(priv);
3138
3139        /* Map RX MTL to DMA channels */
3140        stmmac_rx_queue_dma_chan_map(priv);
3141
3142        /* Enable MAC RX Queues */
3143        stmmac_mac_enable_rx_queues(priv);
3144
3145        /* Set RX priorities */
3146        if (rx_queues_count > 1)
3147                stmmac_mac_config_rx_queues_prio(priv);
3148
3149        /* Set TX priorities */
3150        if (tx_queues_count > 1)
3151                stmmac_mac_config_tx_queues_prio(priv);
3152
3153        /* Set RX routing */
3154        if (rx_queues_count > 1)
3155                stmmac_mac_config_rx_queues_routing(priv);
3156
3157        /* Receive Side Scaling */
3158        if (rx_queues_count > 1)
3159                stmmac_mac_config_rss(priv);
3160}
3161
3162static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3163{
3164        if (priv->dma_cap.asp) {
3165                netdev_info(priv->dev, "Enabling Safety Features\n");
3166                stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3167                                          priv->plat->safety_feat_cfg);
3168        } else {
3169                netdev_info(priv->dev, "No Safety Features support found\n");
3170        }
3171}
3172
3173static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3174{
3175        char *name;
3176
3177        clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3178        clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3179
3180        name = priv->wq_name;
3181        sprintf(name, "%s-fpe", priv->dev->name);
3182
3183        priv->fpe_wq = create_singlethread_workqueue(name);
3184        if (!priv->fpe_wq) {
3185                netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3186
3187                return -ENOMEM;
3188        }
3189        netdev_info(priv->dev, "FPE workqueue start");
3190
3191        return 0;
3192}
3193
3194/**
3195 * stmmac_hw_setup - setup mac in a usable state.
3196 *  @dev : pointer to the device structure.
3197 *  @init_ptp: initialize PTP if set
3198 *  Description:
3199 *  this is the main function to setup the HW in a usable state because the
3200 *  dma engine is reset, the core registers are configured (e.g. AXI,
3201 *  Checksum features, timers). The DMA is ready to start receiving and
3202 *  transmitting.
3203 *  Return value:
3204 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3205 *  file on failure.
3206 */
3207static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3208{
3209        struct stmmac_priv *priv = netdev_priv(dev);
3210        u32 rx_cnt = priv->plat->rx_queues_to_use;
3211        u32 tx_cnt = priv->plat->tx_queues_to_use;
3212        bool sph_en;
3213        u32 chan;
3214        int ret;
3215
3216        /* DMA initialization and SW reset */
3217        ret = stmmac_init_dma_engine(priv);
3218        if (ret < 0) {
3219                netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3220                           __func__);
3221                return ret;
3222        }
3223
3224        /* Copy the MAC addr into the HW  */
3225        stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3226
3227        /* PS and related bits will be programmed according to the speed */
3228        if (priv->hw->pcs) {
3229                int speed = priv->plat->mac_port_sel_speed;
3230
3231                if ((speed == SPEED_10) || (speed == SPEED_100) ||
3232                    (speed == SPEED_1000)) {
3233                        priv->hw->ps = speed;
3234                } else {
3235                        dev_warn(priv->device, "invalid port speed\n");
3236                        priv->hw->ps = 0;
3237                }
3238        }
3239
3240        /* Initialize the MAC Core */
3241        stmmac_core_init(priv, priv->hw, dev);
3242
3243        /* Initialize MTL*/
3244        stmmac_mtl_configuration(priv);
3245
3246        /* Initialize Safety Features */
3247        stmmac_safety_feat_configuration(priv);
3248
3249        ret = stmmac_rx_ipc(priv, priv->hw);
3250        if (!ret) {
3251                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3252                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3253                priv->hw->rx_csum = 0;
3254        }
3255
3256        /* Enable the MAC Rx/Tx */
3257        stmmac_mac_set(priv, priv->ioaddr, true);
3258
3259        /* Set the HW DMA mode and the COE */
3260        stmmac_dma_operation_mode(priv);
3261
3262        stmmac_mmc_setup(priv);
3263
3264        if (init_ptp) {
3265                ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3266                if (ret < 0)
3267                        netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
3268
3269                ret = stmmac_init_ptp(priv);
3270                if (ret == -EOPNOTSUPP)
3271                        netdev_warn(priv->dev, "PTP not supported by HW\n");
3272                else if (ret)
3273                        netdev_warn(priv->dev, "PTP init failed\n");
3274        }
3275
3276        priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3277
3278        /* Convert the timer from msec to usec */
3279        if (!priv->tx_lpi_timer)
3280                priv->tx_lpi_timer = eee_timer * 1000;
3281
3282        if (priv->use_riwt) {
3283                u32 queue;
3284
3285                for (queue = 0; queue < rx_cnt; queue++) {
3286                        if (!priv->rx_riwt[queue])
3287                                priv->rx_riwt[queue] = DEF_DMA_RIWT;
3288
3289                        stmmac_rx_watchdog(priv, priv->ioaddr,
3290                                           priv->rx_riwt[queue], queue);
3291                }
3292        }
3293
3294        if (priv->hw->pcs)
3295                stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3296
3297        /* set TX and RX rings length */
3298        stmmac_set_rings_length(priv);
3299
3300        /* Enable TSO */
3301        if (priv->tso) {
3302                for (chan = 0; chan < tx_cnt; chan++) {
3303                        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3304
3305                        /* TSO and TBS cannot co-exist */
3306                        if (tx_q->tbs & STMMAC_TBS_AVAIL)
3307                                continue;
3308
3309                        stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3310                }
3311        }
3312
3313        /* Enable Split Header */
3314        sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3315        for (chan = 0; chan < rx_cnt; chan++)
3316                stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3317
3318
3319        /* VLAN Tag Insertion */
3320        if (priv->dma_cap.vlins)
3321                stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3322
3323        /* TBS */
3324        for (chan = 0; chan < tx_cnt; chan++) {
3325                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3326                int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3327
3328                stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3329        }
3330
3331        /* Configure real RX and TX queues */
3332        netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3333        netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3334
3335        /* Start the ball rolling... */
3336        stmmac_start_all_dma(priv);
3337
3338        if (priv->dma_cap.fpesel) {
3339                stmmac_fpe_start_wq(priv);
3340
3341                if (priv->plat->fpe_cfg->enable)
3342                        stmmac_fpe_handshake(priv, true);
3343        }
3344
3345        return 0;
3346}
3347
3348static void stmmac_hw_teardown(struct net_device *dev)
3349{
3350        struct stmmac_priv *priv = netdev_priv(dev);
3351
3352        clk_disable_unprepare(priv->plat->clk_ptp_ref);
3353}
3354
3355static void stmmac_free_irq(struct net_device *dev,
3356                            enum request_irq_err irq_err, int irq_idx)
3357{
3358        struct stmmac_priv *priv = netdev_priv(dev);
3359        int j;
3360
3361        switch (irq_err) {
3362        case REQ_IRQ_ERR_ALL:
3363                irq_idx = priv->plat->tx_queues_to_use;
3364                fallthrough;
3365        case REQ_IRQ_ERR_TX:
3366                for (j = irq_idx - 1; j >= 0; j--) {
3367                        if (priv->tx_irq[j] > 0) {
3368                                irq_set_affinity_hint(priv->tx_irq[j], NULL);
3369                                free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3370                        }
3371                }
3372                irq_idx = priv->plat->rx_queues_to_use;
3373                fallthrough;
3374        case REQ_IRQ_ERR_RX:
3375                for (j = irq_idx - 1; j >= 0; j--) {
3376                        if (priv->rx_irq[j] > 0) {
3377                                irq_set_affinity_hint(priv->rx_irq[j], NULL);
3378                                free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3379                        }
3380                }
3381
3382                if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3383                        free_irq(priv->sfty_ue_irq, dev);
3384                fallthrough;
3385        case REQ_IRQ_ERR_SFTY_UE:
3386                if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3387                        free_irq(priv->sfty_ce_irq, dev);
3388                fallthrough;
3389        case REQ_IRQ_ERR_SFTY_CE:
3390                if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3391                        free_irq(priv->lpi_irq, dev);
3392                fallthrough;
3393        case REQ_IRQ_ERR_LPI:
3394                if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3395                        free_irq(priv->wol_irq, dev);
3396                fallthrough;
3397        case REQ_IRQ_ERR_WOL:
3398                free_irq(dev->irq, dev);
3399                fallthrough;
3400        case REQ_IRQ_ERR_MAC:
3401        case REQ_IRQ_ERR_NO:
3402                /* If MAC IRQ request error, no more IRQ to free */
3403                break;
3404        }
3405}
3406
3407static int stmmac_request_irq_multi_msi(struct net_device *dev)
3408{
3409        struct stmmac_priv *priv = netdev_priv(dev);
3410        enum request_irq_err irq_err;
3411        cpumask_t cpu_mask;
3412        int irq_idx = 0;
3413        char *int_name;
3414        int ret;
3415        int i;
3416
3417        /* For common interrupt */
3418        int_name = priv->int_name_mac;
3419        sprintf(int_name, "%s:%s", dev->name, "mac");
3420        ret = request_irq(dev->irq, stmmac_mac_interrupt,
3421                          0, int_name, dev);
3422        if (unlikely(ret < 0)) {
3423                netdev_err(priv->dev,
3424                           "%s: alloc mac MSI %d (error: %d)\n",
3425                           __func__, dev->irq, ret);
3426                irq_err = REQ_IRQ_ERR_MAC;
3427                goto irq_error;
3428        }
3429
3430        /* Request the Wake IRQ in case of another line
3431         * is used for WoL
3432         */
3433        if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3434                int_name = priv->int_name_wol;
3435                sprintf(int_name, "%s:%s", dev->name, "wol");
3436                ret = request_irq(priv->wol_irq,
3437                                  stmmac_mac_interrupt,
3438                                  0, int_name, dev);
3439                if (unlikely(ret < 0)) {
3440                        netdev_err(priv->dev,
3441                                   "%s: alloc wol MSI %d (error: %d)\n",
3442                                   __func__, priv->wol_irq, ret);
3443                        irq_err = REQ_IRQ_ERR_WOL;
3444                        goto irq_error;
3445                }
3446        }
3447
3448        /* Request the LPI IRQ in case of another line
3449         * is used for LPI
3450         */
3451        if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3452                int_name = priv->int_name_lpi;
3453                sprintf(int_name, "%s:%s", dev->name, "lpi");
3454                ret = request_irq(priv->lpi_irq,
3455                                  stmmac_mac_interrupt,
3456                                  0, int_name, dev);
3457                if (unlikely(ret < 0)) {
3458                        netdev_err(priv->dev,
3459                                   "%s: alloc lpi MSI %d (error: %d)\n",
3460                                   __func__, priv->lpi_irq, ret);
3461                        irq_err = REQ_IRQ_ERR_LPI;
3462                        goto irq_error;
3463                }
3464        }
3465
3466        /* Request the Safety Feature Correctible Error line in
3467         * case of another line is used
3468         */
3469        if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3470                int_name = priv->int_name_sfty_ce;
3471                sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3472                ret = request_irq(priv->sfty_ce_irq,
3473                                  stmmac_safety_interrupt,
3474                                  0, int_name, dev);
3475                if (unlikely(ret < 0)) {
3476                        netdev_err(priv->dev,
3477                                   "%s: alloc sfty ce MSI %d (error: %d)\n",
3478                                   __func__, priv->sfty_ce_irq, ret);
3479                        irq_err = REQ_IRQ_ERR_SFTY_CE;
3480                        goto irq_error;
3481                }
3482        }
3483
3484        /* Request the Safety Feature Uncorrectible Error line in
3485         * case of another line is used
3486         */
3487        if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3488                int_name = priv->int_name_sfty_ue;
3489                sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3490                ret = request_irq(priv->sfty_ue_irq,
3491                                  stmmac_safety_interrupt,
3492                                  0, int_name, dev);
3493                if (unlikely(ret < 0)) {
3494                        netdev_err(priv->dev,
3495                                   "%s: alloc sfty ue MSI %d (error: %d)\n",
3496                                   __func__, priv->sfty_ue_irq, ret);
3497                        irq_err = REQ_IRQ_ERR_SFTY_UE;
3498                        goto irq_error;
3499                }
3500        }
3501
3502        /* Request Rx MSI irq */
3503        for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3504                if (priv->rx_irq[i] == 0)
3505                        continue;
3506
3507                int_name = priv->int_name_rx_irq[i];
3508                sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3509                ret = request_irq(priv->rx_irq[i],
3510                                  stmmac_msi_intr_rx,
3511                                  0, int_name, &priv->rx_queue[i]);
3512                if (unlikely(ret < 0)) {
3513                        netdev_err(priv->dev,
3514                                   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3515                                   __func__, i, priv->rx_irq[i], ret);
3516                        irq_err = REQ_IRQ_ERR_RX;
3517                        irq_idx = i;
3518                        goto irq_error;
3519                }
3520                cpumask_clear(&cpu_mask);
3521                cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3522                irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3523        }
3524
3525        /* Request Tx MSI irq */
3526        for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3527                if (priv->tx_irq[i] == 0)
3528                        continue;
3529
3530                int_name = priv->int_name_tx_irq[i];
3531                sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3532                ret = request_irq(priv->tx_irq[i],
3533                                  stmmac_msi_intr_tx,
3534                                  0, int_name, &priv->tx_queue[i]);
3535                if (unlikely(ret < 0)) {
3536                        netdev_err(priv->dev,
3537                                   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3538                                   __func__, i, priv->tx_irq[i], ret);
3539                        irq_err = REQ_IRQ_ERR_TX;
3540                        irq_idx = i;
3541                        goto irq_error;
3542                }
3543                cpumask_clear(&cpu_mask);
3544                cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3545                irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3546        }
3547
3548        return 0;
3549
3550irq_error:
3551        stmmac_free_irq(dev, irq_err, irq_idx);
3552        return ret;
3553}
3554
3555static int stmmac_request_irq_single(struct net_device *dev)
3556{
3557        struct stmmac_priv *priv = netdev_priv(dev);
3558        enum request_irq_err irq_err;
3559        int ret;
3560
3561        ret = request_irq(dev->irq, stmmac_interrupt,
3562                          IRQF_SHARED, dev->name, dev);
3563        if (unlikely(ret < 0)) {
3564                netdev_err(priv->dev,
3565                           "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3566                           __func__, dev->irq, ret);
3567                irq_err = REQ_IRQ_ERR_MAC;
3568                goto irq_error;
3569        }
3570
3571        /* Request the Wake IRQ in case of another line
3572         * is used for WoL
3573         */
3574        if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3575                ret = request_irq(priv->wol_irq, stmmac_interrupt,
3576                                  IRQF_SHARED, dev->name, dev);
3577                if (unlikely(ret < 0)) {
3578                        netdev_err(priv->dev,
3579                                   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3580                                   __func__, priv->wol_irq, ret);
3581                        irq_err = REQ_IRQ_ERR_WOL;
3582                        goto irq_error;
3583                }
3584        }
3585
3586        /* Request the IRQ lines */
3587        if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3588                ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3589                                  IRQF_SHARED, dev->name, dev);
3590                if (unlikely(ret < 0)) {
3591                        netdev_err(priv->dev,
3592                                   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3593                                   __func__, priv->lpi_irq, ret);
3594                        irq_err = REQ_IRQ_ERR_LPI;
3595                        goto irq_error;
3596                }
3597        }
3598
3599        return 0;
3600
3601irq_error:
3602        stmmac_free_irq(dev, irq_err, 0);
3603        return ret;
3604}
3605
3606static int stmmac_request_irq(struct net_device *dev)
3607{
3608        struct stmmac_priv *priv = netdev_priv(dev);
3609        int ret;
3610
3611        /* Request the IRQ lines */
3612        if (priv->plat->multi_msi_en)
3613                ret = stmmac_request_irq_multi_msi(dev);
3614        else
3615                ret = stmmac_request_irq_single(dev);
3616
3617        return ret;
3618}
3619
3620/**
3621 *  stmmac_open - open entry point of the driver
3622 *  @dev : pointer to the device structure.
3623 *  Description:
3624 *  This function is the open entry point of the driver.
3625 *  Return value:
3626 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3627 *  file on failure.
3628 */
3629int stmmac_open(struct net_device *dev)
3630{
3631        struct stmmac_priv *priv = netdev_priv(dev);
3632        int mode = priv->plat->phy_interface;
3633        int bfsize = 0;
3634        u32 chan;
3635        int ret;
3636
3637        ret = pm_runtime_get_sync(priv->device);
3638        if (ret < 0) {
3639                pm_runtime_put_noidle(priv->device);
3640                return ret;
3641        }
3642
3643        if (priv->hw->pcs != STMMAC_PCS_TBI &&
3644            priv->hw->pcs != STMMAC_PCS_RTBI &&
3645            (!priv->hw->xpcs ||
3646             xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3647                ret = stmmac_init_phy(dev);
3648                if (ret) {
3649                        netdev_err(priv->dev,
3650                                   "%s: Cannot attach to PHY (error: %d)\n",
3651                                   __func__, ret);
3652                        goto init_phy_error;
3653                }
3654        }
3655
3656        /* Extra statistics */
3657        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3658        priv->xstats.threshold = tc;
3659
3660        bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3661        if (bfsize < 0)
3662                bfsize = 0;
3663
3664        if (bfsize < BUF_SIZE_16KiB)
3665                bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3666
3667        priv->dma_buf_sz = bfsize;
3668        buf_sz = bfsize;
3669
3670        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3671
3672        if (!priv->dma_tx_size)
3673                priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3674        if (!priv->dma_rx_size)
3675                priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3676
3677        /* Earlier check for TBS */
3678        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3679                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3680                int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3681
3682                /* Setup per-TXQ tbs flag before TX descriptor alloc */
3683                tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3684        }
3685
3686        ret = alloc_dma_desc_resources(priv);
3687        if (ret < 0) {
3688                netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3689                           __func__);
3690                goto dma_desc_error;
3691        }
3692
3693        ret = init_dma_desc_rings(dev, GFP_KERNEL);
3694        if (ret < 0) {
3695                netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3696                           __func__);
3697                goto init_error;
3698        }
3699
3700        ret = stmmac_hw_setup(dev, true);
3701        if (ret < 0) {
3702                netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3703                goto init_error;
3704        }
3705
3706        stmmac_init_coalesce(priv);
3707
3708        phylink_start(priv->phylink);
3709        /* We may have called phylink_speed_down before */
3710        phylink_speed_up(priv->phylink);
3711
3712        ret = stmmac_request_irq(dev);
3713        if (ret)
3714                goto irq_error;
3715
3716        stmmac_enable_all_queues(priv);
3717        netif_tx_start_all_queues(priv->dev);
3718
3719        return 0;
3720
3721irq_error:
3722        phylink_stop(priv->phylink);
3723
3724        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3725                hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3726
3727        stmmac_hw_teardown(dev);
3728init_error:
3729        free_dma_desc_resources(priv);
3730dma_desc_error:
3731        phylink_disconnect_phy(priv->phylink);
3732init_phy_error:
3733        pm_runtime_put(priv->device);
3734        return ret;
3735}
3736
3737static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3738{
3739        set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3740
3741        if (priv->fpe_wq)
3742                destroy_workqueue(priv->fpe_wq);
3743
3744        netdev_info(priv->dev, "FPE workqueue stop");
3745}
3746
3747/**
3748 *  stmmac_release - close entry point of the driver
3749 *  @dev : device pointer.
3750 *  Description:
3751 *  This is the stop entry point of the driver.
3752 */
3753int stmmac_release(struct net_device *dev)
3754{
3755        struct stmmac_priv *priv = netdev_priv(dev);
3756        u32 chan;
3757
3758        if (device_may_wakeup(priv->device))
3759                phylink_speed_down(priv->phylink, false);
3760        /* Stop and disconnect the PHY */
3761        phylink_stop(priv->phylink);
3762        phylink_disconnect_phy(priv->phylink);
3763
3764        stmmac_disable_all_queues(priv);
3765
3766        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3767                hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3768
3769        /* Free the IRQ lines */
3770        stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3771
3772        if (priv->eee_enabled) {
3773                priv->tx_path_in_lpi_mode = false;
3774                del_timer_sync(&priv->eee_ctrl_timer);
3775        }
3776
3777        /* Stop TX/RX DMA and clear the descriptors */
3778        stmmac_stop_all_dma(priv);
3779
3780        /* Release and free the Rx/Tx resources */
3781        free_dma_desc_resources(priv);
3782
3783        /* Disable the MAC Rx/Tx */
3784        stmmac_mac_set(priv, priv->ioaddr, false);
3785
3786        netif_carrier_off(dev);
3787
3788        stmmac_release_ptp(priv);
3789
3790        pm_runtime_put(priv->device);
3791
3792        if (priv->dma_cap.fpesel)
3793                stmmac_fpe_stop_wq(priv);
3794
3795        return 0;
3796}
3797
3798static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3799                               struct stmmac_tx_queue *tx_q)
3800{
3801        u16 tag = 0x0, inner_tag = 0x0;
3802        u32 inner_type = 0x0;
3803        struct dma_desc *p;
3804
3805        if (!priv->dma_cap.vlins)
3806                return false;
3807        if (!skb_vlan_tag_present(skb))
3808                return false;
3809        if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3810                inner_tag = skb_vlan_tag_get(skb);
3811                inner_type = STMMAC_VLAN_INSERT;
3812        }
3813
3814        tag = skb_vlan_tag_get(skb);
3815
3816        if (tx_q->tbs & STMMAC_TBS_AVAIL)
3817                p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3818        else
3819                p = &tx_q->dma_tx[tx_q->cur_tx];
3820
3821        if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3822                return false;
3823
3824        stmmac_set_tx_owner(priv, p);
3825        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3826        return true;
3827}
3828
3829/**
3830 *  stmmac_tso_allocator - close entry point of the driver
3831 *  @priv: driver private structure
3832 *  @des: buffer start address
3833 *  @total_len: total length to fill in descriptors
3834 *  @last_segment: condition for the last descriptor
3835 *  @queue: TX queue index
3836 *  Description:
3837 *  This function fills descriptor and request new descriptors according to
3838 *  buffer length to fill
3839 */
3840static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3841                                 int total_len, bool last_segment, u32 queue)
3842{
3843        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3844        struct dma_desc *desc;
3845        u32 buff_size;
3846        int tmp_len;
3847
3848        tmp_len = total_len;
3849
3850        while (tmp_len > 0) {
3851                dma_addr_t curr_addr;
3852
3853                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3854                                                priv->dma_tx_size);
3855                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3856
3857                if (tx_q->tbs & STMMAC_TBS_AVAIL)
3858                        desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3859                else
3860                        desc = &tx_q->dma_tx[tx_q->cur_tx];
3861
3862                curr_addr = des + (total_len - tmp_len);
3863                if (priv->dma_cap.addr64 <= 32)
3864                        desc->des0 = cpu_to_le32(curr_addr);
3865                else
3866                        stmmac_set_desc_addr(priv, desc, curr_addr);
3867
3868                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3869                            TSO_MAX_BUFF_SIZE : tmp_len;
3870
3871                stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3872                                0, 1,
3873                                (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3874                                0, 0);
3875
3876                tmp_len -= TSO_MAX_BUFF_SIZE;
3877        }
3878}
3879
3880static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3881{
3882        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3883        int desc_size;
3884
3885        if (likely(priv->extend_desc))
3886                desc_size = sizeof(struct dma_extended_desc);
3887        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3888                desc_size = sizeof(struct dma_edesc);
3889        else
3890                desc_size = sizeof(struct dma_desc);
3891
3892        /* The own bit must be the latest setting done when prepare the
3893         * descriptor and then barrier is needed to make sure that
3894         * all is coherent before granting the DMA engine.
3895         */
3896        wmb();
3897
3898        tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3899        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3900}
3901
3902/**
3903 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3904 *  @skb : the socket buffer
3905 *  @dev : device pointer
3906 *  Description: this is the transmit function that is called on TSO frames
3907 *  (support available on GMAC4 and newer chips).
3908 *  Diagram below show the ring programming in case of TSO frames:
3909 *
3910 *  First Descriptor
3911 *   --------
3912 *   | DES0 |---> buffer1 = L2/L3/L4 header
3913 *   | DES1 |---> TCP Payload (can continue on next descr...)
3914 *   | DES2 |---> buffer 1 and 2 len
3915 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3916 *   --------
3917 *      |
3918 *     ...
3919 *      |
3920 *   --------
3921 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3922 *   | DES1 | --|
3923 *   | DES2 | --> buffer 1 and 2 len
3924 *   | DES3 |
3925 *   --------
3926 *
3927 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3928 */
3929static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3930{
3931        struct dma_desc *desc, *first, *mss_desc = NULL;
3932        struct stmmac_priv *priv = netdev_priv(dev);
3933        int nfrags = skb_shinfo(skb)->nr_frags;
3934        u32 queue = skb_get_queue_mapping(skb);
3935        unsigned int first_entry, tx_packets;
3936        int tmp_pay_len = 0, first_tx;
3937        struct stmmac_tx_queue *tx_q;
3938        bool has_vlan, set_ic;
3939        u8 proto_hdr_len, hdr;
3940        u32 pay_len, mss;
3941        dma_addr_t des;
3942        int i;
3943
3944        tx_q = &priv->tx_queue[queue];
3945        first_tx = tx_q->cur_tx;
3946
3947        /* Compute header lengths */
3948        if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3949                proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3950                hdr = sizeof(struct udphdr);
3951        } else {
3952                proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3953                hdr = tcp_hdrlen(skb);
3954        }
3955
3956        /* Desc availability based on threshold should be enough safe */
3957        if (unlikely(stmmac_tx_avail(priv, queue) <
3958                (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3959                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3960                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3961                                                                queue));
3962                        /* This is a hard error, log it. */
3963                        netdev_err(priv->dev,
3964                                   "%s: Tx Ring full when queue awake\n",
3965                                   __func__);
3966                }
3967                return NETDEV_TX_BUSY;
3968        }
3969
3970        pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3971
3972        mss = skb_shinfo(skb)->gso_size;
3973
3974        /* set new MSS value if needed */
3975        if (mss != tx_q->mss) {
3976                if (tx_q->tbs & STMMAC_TBS_AVAIL)
3977                        mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3978                else
3979                        mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3980
3981                stmmac_set_mss(priv, mss_desc, mss);
3982                tx_q->mss = mss;
3983                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3984                                                priv->dma_tx_size);
3985                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3986        }
3987
3988        if (netif_msg_tx_queued(priv)) {
3989                pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3990                        __func__, hdr, proto_hdr_len, pay_len, mss);
3991                pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3992                        skb->data_len);
3993        }
3994
3995        /* Check if VLAN can be inserted by HW */
3996        has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3997
3998        first_entry = tx_q->cur_tx;
3999        WARN_ON(tx_q->tx_skbuff[first_entry]);
4000
4001        if (tx_q->tbs & STMMAC_TBS_AVAIL)
4002                desc = &tx_q->dma_entx[first_entry].basic;
4003        else
4004                desc = &tx_q->dma_tx[first_entry];
4005        first = desc;
4006
4007        if (has_vlan)
4008                stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4009
4010        /* first descriptor: fill Headers on Buf1 */
4011        des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4012                             DMA_TO_DEVICE);
4013        if (dma_mapping_error(priv->device, des))
4014                goto dma_map_err;
4015
4016        tx_q->tx_skbuff_dma[first_entry].buf = des;
4017        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4018        tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4019        tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4020
4021        if (priv->dma_cap.addr64 <= 32) {
4022                first->des0 = cpu_to_le32(des);
4023
4024                /* Fill start of payload in buff2 of first descriptor */
4025                if (pay_len)
4026                        first->des1 = cpu_to_le32(des + proto_hdr_len);
4027
4028                /* If needed take extra descriptors to fill the remaining payload */
4029                tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4030        } else {
4031                stmmac_set_desc_addr(priv, first, des);
4032                tmp_pay_len = pay_len;
4033                des += proto_hdr_len;
4034                pay_len = 0;
4035        }
4036
4037        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4038
4039        /* Prepare fragments */
4040        for (i = 0; i < nfrags; i++) {
4041                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4042
4043                des = skb_frag_dma_map(priv->device, frag, 0,
4044                                       skb_frag_size(frag),
4045                                       DMA_TO_DEVICE);
4046                if (dma_mapping_error(priv->device, des))
4047                        goto dma_map_err;
4048
4049                stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4050                                     (i == nfrags - 1), queue);
4051
4052                tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4053                tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4054                tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4055                tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4056        }
4057
4058        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4059
4060        /* Only the last descriptor gets to point to the skb. */
4061        tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4062        tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4063
4064        /* Manage tx mitigation */
4065        tx_packets = (tx_q->cur_tx + 1) - first_tx;
4066        tx_q->tx_count_frames += tx_packets;
4067
4068        if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4069                set_ic = true;
4070        else if (!priv->tx_coal_frames[queue])
4071                set_ic = false;
4072        else if (tx_packets > priv->tx_coal_frames[queue])
4073                set_ic = true;
4074        else if ((tx_q->tx_count_frames %
4075                  priv->tx_coal_frames[queue]) < tx_packets)
4076                set_ic = true;
4077        else
4078                set_ic = false;
4079
4080        if (set_ic) {
4081                if (tx_q->tbs & STMMAC_TBS_AVAIL)
4082                        desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4083                else
4084                        desc = &tx_q->dma_tx[tx_q->cur_tx];
4085
4086                tx_q->tx_count_frames = 0;
4087                stmmac_set_tx_ic(priv, desc);
4088                priv->xstats.tx_set_ic_bit++;
4089        }
4090
4091        /* We've used all descriptors we need for this skb, however,
4092         * advance cur_tx so that it references a fresh descriptor.
4093         * ndo_start_xmit will fill this descriptor the next time it's
4094         * called and stmmac_tx_clean may clean up to this descriptor.
4095         */
4096        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4097
4098        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4099                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4100                          __func__);
4101                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4102        }
4103
4104        dev->stats.tx_bytes += skb->len;
4105        priv->xstats.tx_tso_frames++;
4106        priv->xstats.tx_tso_nfrags += nfrags;
4107
4108        if (priv->sarc_type)
4109                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4110
4111        skb_tx_timestamp(skb);
4112
4113        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4114                     priv->hwts_tx_en)) {
4115                /* declare that device is doing timestamping */
4116                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4117                stmmac_enable_tx_timestamp(priv, first);
4118        }
4119
4120        /* Complete the first descriptor before granting the DMA */
4121        stmmac_prepare_tso_tx_desc(priv, first, 1,
4122                        proto_hdr_len,
4123                        pay_len,
4124                        1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4125                        hdr / 4, (skb->len - proto_hdr_len));
4126
4127        /* If context desc is used to change MSS */
4128        if (mss_desc) {
4129                /* Make sure that first descriptor has been completely
4130                 * written, including its own bit. This is because MSS is
4131                 * actually before first descriptor, so we need to make
4132                 * sure that MSS's own bit is the last thing written.
4133                 */
4134                dma_wmb();
4135                stmmac_set_tx_owner(priv, mss_desc);
4136        }
4137
4138        if (netif_msg_pktdata(priv)) {
4139                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4140                        __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4141                        tx_q->cur_tx, first, nfrags);
4142                pr_info(">>> frame to be transmitted: ");
4143                print_pkt(skb->data, skb_headlen(skb));
4144        }
4145
4146        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4147
4148        stmmac_flush_tx_descriptors(priv, queue);
4149        stmmac_tx_timer_arm(priv, queue);
4150
4151        return NETDEV_TX_OK;
4152
4153dma_map_err:
4154        dev_err(priv->device, "Tx dma map failed\n");
4155        dev_kfree_skb(skb);
4156        priv->dev->stats.tx_dropped++;
4157        return NETDEV_TX_OK;
4158}
4159
4160/**
4161 *  stmmac_xmit - Tx entry point of the driver
4162 *  @skb : the socket buffer
4163 *  @dev : device pointer
4164 *  Description : this is the tx entry point of the driver.
4165 *  It programs the chain or the ring and supports oversized frames
4166 *  and SG feature.
4167 */
4168static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4169{
4170        unsigned int first_entry, tx_packets, enh_desc;
4171        struct stmmac_priv *priv = netdev_priv(dev);
4172        unsigned int nopaged_len = skb_headlen(skb);
4173        int i, csum_insertion = 0, is_jumbo = 0;
4174        u32 queue = skb_get_queue_mapping(skb);
4175        int nfrags = skb_shinfo(skb)->nr_frags;
4176        int gso = skb_shinfo(skb)->gso_type;
4177        struct dma_edesc *tbs_desc = NULL;
4178        struct dma_desc *desc, *first;
4179        struct stmmac_tx_queue *tx_q;
4180        bool has_vlan, set_ic;
4181        int entry, first_tx;
4182        dma_addr_t des;
4183
4184        tx_q = &priv->tx_queue[queue];
4185        first_tx = tx_q->cur_tx;
4186
4187        if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4188                stmmac_disable_eee_mode(priv);
4189
4190        /* Manage oversized TCP frames for GMAC4 device */
4191        if (skb_is_gso(skb) && priv->tso) {
4192                if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4193                        return stmmac_tso_xmit(skb, dev);
4194                if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4195                        return stmmac_tso_xmit(skb, dev);
4196        }
4197
4198        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4199                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4200                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4201                                                                queue));
4202                        /* This is a hard error, log it. */
4203                        netdev_err(priv->dev,
4204                                   "%s: Tx Ring full when queue awake\n",
4205                                   __func__);
4206                }
4207                return NETDEV_TX_BUSY;
4208        }
4209
4210        /* Check if VLAN can be inserted by HW */
4211        has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4212
4213        entry = tx_q->cur_tx;
4214        first_entry = entry;
4215        WARN_ON(tx_q->tx_skbuff[first_entry]);
4216
4217        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4218
4219        if (likely(priv->extend_desc))
4220                desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4221        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4222                desc = &tx_q->dma_entx[entry].basic;
4223        else
4224                desc = tx_q->dma_tx + entry;
4225
4226        first = desc;
4227
4228        if (has_vlan)
4229                stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4230
4231        enh_desc = priv->plat->enh_desc;
4232        /* To program the descriptors according to the size of the frame */
4233        if (enh_desc)
4234                is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4235
4236        if (unlikely(is_jumbo)) {
4237                entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4238                if (unlikely(entry < 0) && (entry != -EINVAL))
4239                        goto dma_map_err;
4240        }
4241
4242        for (i = 0; i < nfrags; i++) {
4243                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4244                int len = skb_frag_size(frag);
4245                bool last_segment = (i == (nfrags - 1));
4246
4247                entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4248                WARN_ON(tx_q->tx_skbuff[entry]);
4249
4250                if (likely(priv->extend_desc))
4251                        desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4252                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4253                        desc = &tx_q->dma_entx[entry].basic;
4254                else
4255                        desc = tx_q->dma_tx + entry;
4256
4257                des = skb_frag_dma_map(priv->device, frag, 0, len,
4258                                       DMA_TO_DEVICE);
4259                if (dma_mapping_error(priv->device, des))
4260                        goto dma_map_err; /* should reuse desc w/o issues */
4261
4262                tx_q->tx_skbuff_dma[entry].buf = des;
4263
4264                stmmac_set_desc_addr(priv, desc, des);
4265
4266                tx_q->tx_skbuff_dma[entry].map_as_page = true;
4267                tx_q->tx_skbuff_dma[entry].len = len;
4268                tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4269                tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4270
4271                /* Prepare the descriptor and set the own bit too */
4272                stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4273                                priv->mode, 1, last_segment, skb->len);
4274        }
4275
4276        /* Only the last descriptor gets to point to the skb. */
4277        tx_q->tx_skbuff[entry] = skb;
4278        tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4279
4280        /* According to the coalesce parameter the IC bit for the latest
4281         * segment is reset and the timer re-started to clean the tx status.
4282         * This approach takes care about the fragments: desc is the first
4283         * element in case of no SG.
4284         */
4285        tx_packets = (entry + 1) - first_tx;
4286        tx_q->tx_count_frames += tx_packets;
4287
4288        if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4289                set_ic = true;
4290        else if (!priv->tx_coal_frames[queue])
4291                set_ic = false;
4292        else if (tx_packets > priv->tx_coal_frames[queue])
4293                set_ic = true;
4294        else if ((tx_q->tx_count_frames %
4295                  priv->tx_coal_frames[queue]) < tx_packets)
4296                set_ic = true;
4297        else
4298                set_ic = false;
4299
4300        if (set_ic) {
4301                if (likely(priv->extend_desc))
4302                        desc = &tx_q->dma_etx[entry].basic;
4303                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4304                        desc = &tx_q->dma_entx[entry].basic;
4305                else
4306                        desc = &tx_q->dma_tx[entry];
4307
4308                tx_q->tx_count_frames = 0;
4309                stmmac_set_tx_ic(priv, desc);
4310                priv->xstats.tx_set_ic_bit++;
4311        }
4312
4313        /* We've used all descriptors we need for this skb, however,
4314         * advance cur_tx so that it references a fresh descriptor.
4315         * ndo_start_xmit will fill this descriptor the next time it's
4316         * called and stmmac_tx_clean may clean up to this descriptor.
4317         */
4318        entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4319        tx_q->cur_tx = entry;
4320
4321        if (netif_msg_pktdata(priv)) {
4322                netdev_dbg(priv->dev,
4323                           "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4324                           __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4325                           entry, first, nfrags);
4326
4327                netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4328                print_pkt(skb->data, skb->len);
4329        }
4330
4331        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4332                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4333                          __func__);
4334                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4335        }
4336
4337        dev->stats.tx_bytes += skb->len;
4338
4339        if (priv->sarc_type)
4340                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4341
4342        skb_tx_timestamp(skb);
4343
4344        /* Ready to fill the first descriptor and set the OWN bit w/o any
4345         * problems because all the descriptors are actually ready to be
4346         * passed to the DMA engine.
4347         */
4348        if (likely(!is_jumbo)) {
4349                bool last_segment = (nfrags == 0);
4350
4351                des = dma_map_single(priv->device, skb->data,
4352                                     nopaged_len, DMA_TO_DEVICE);
4353                if (dma_mapping_error(priv->device, des))
4354                        goto dma_map_err;
4355
4356                tx_q->tx_skbuff_dma[first_entry].buf = des;
4357                tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4358                tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4359
4360                stmmac_set_desc_addr(priv, first, des);
4361
4362                tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4363                tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4364
4365                if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4366                             priv->hwts_tx_en)) {
4367                        /* declare that device is doing timestamping */
4368                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4369                        stmmac_enable_tx_timestamp(priv, first);
4370                }
4371
4372                /* Prepare the first descriptor setting the OWN bit too */
4373                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4374                                csum_insertion, priv->mode, 0, last_segment,
4375                                skb->len);
4376        }
4377
4378        if (tx_q->tbs & STMMAC_TBS_EN) {
4379                struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4380
4381                tbs_desc = &tx_q->dma_entx[first_entry];
4382                stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4383        }
4384
4385        stmmac_set_tx_owner(priv, first);
4386
4387        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4388
4389        stmmac_enable_dma_transmission(priv, priv->ioaddr);
4390
4391        stmmac_flush_tx_descriptors(priv, queue);
4392        stmmac_tx_timer_arm(priv, queue);
4393
4394        return NETDEV_TX_OK;
4395
4396dma_map_err:
4397        netdev_err(priv->dev, "Tx DMA map failed\n");
4398        dev_kfree_skb(skb);
4399        priv->dev->stats.tx_dropped++;
4400        return NETDEV_TX_OK;
4401}
4402
4403static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4404{
4405        struct vlan_ethhdr *veth;
4406        __be16 vlan_proto;
4407        u16 vlanid;
4408
4409        veth = (struct vlan_ethhdr *)skb->data;
4410        vlan_proto = veth->h_vlan_proto;
4411
4412        if ((vlan_proto == htons(ETH_P_8021Q) &&
4413             dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4414            (vlan_proto == htons(ETH_P_8021AD) &&
4415             dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4416                /* pop the vlan tag */
4417                vlanid = ntohs(veth->h_vlan_TCI);
4418                memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4419                skb_pull(skb, VLAN_HLEN);
4420                __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4421        }
4422}
4423
4424/**
4425 * stmmac_rx_refill - refill used skb preallocated buffers
4426 * @priv: driver private structure
4427 * @queue: RX queue index
4428 * Description : this is to reallocate the skb for the reception process
4429 * that is based on zero-copy.
4430 */
4431static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4432{
4433        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4434        int dirty = stmmac_rx_dirty(priv, queue);
4435        unsigned int entry = rx_q->dirty_rx;
4436
4437        while (dirty-- > 0) {
4438                struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4439                struct dma_desc *p;
4440                bool use_rx_wd;
4441
4442                if (priv->extend_desc)
4443                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
4444                else
4445                        p = rx_q->dma_rx + entry;
4446
4447                if (!buf->page) {
4448                        buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4449                        if (!buf->page)
4450                                break;
4451                }
4452
4453                if (priv->sph && !buf->sec_page) {
4454                        buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4455                        if (!buf->sec_page)
4456                                break;
4457
4458                        buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4459                }
4460
4461                buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4462
4463                stmmac_set_desc_addr(priv, p, buf->addr);
4464                if (priv->sph)
4465                        stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4466                else
4467                        stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4468                stmmac_refill_desc3(priv, rx_q, p);
4469
4470                rx_q->rx_count_frames++;
4471                rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4472                if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4473                        rx_q->rx_count_frames = 0;
4474
4475                use_rx_wd = !priv->rx_coal_frames[queue];
4476                use_rx_wd |= rx_q->rx_count_frames > 0;
4477                if (!priv->use_riwt)
4478                        use_rx_wd = false;
4479
4480                dma_wmb();
4481                stmmac_set_rx_owner(priv, p, use_rx_wd);
4482
4483                entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4484        }
4485        rx_q->dirty_rx = entry;
4486        rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4487                            (rx_q->dirty_rx * sizeof(struct dma_desc));
4488        stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4489}
4490
4491static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4492                                       struct dma_desc *p,
4493                                       int status, unsigned int len)
4494{
4495        unsigned int plen = 0, hlen = 0;
4496        int coe = priv->hw->rx_csum;
4497
4498        /* Not first descriptor, buffer is always zero */
4499        if (priv->sph && len)
4500                return 0;
4501
4502        /* First descriptor, get split header length */
4503        stmmac_get_rx_header_len(priv, p, &hlen);
4504        if (priv->sph && hlen) {
4505                priv->xstats.rx_split_hdr_pkt_n++;
4506                return hlen;
4507        }
4508
4509        /* First descriptor, not last descriptor and not split header */
4510        if (status & rx_not_ls)
4511                return priv->dma_buf_sz;
4512
4513        plen = stmmac_get_rx_frame_len(priv, p, coe);
4514
4515        /* First descriptor and last descriptor and not split header */
4516        return min_t(unsigned int, priv->dma_buf_sz, plen);
4517}
4518
4519static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4520                                       struct dma_desc *p,
4521                                       int status, unsigned int len)
4522{
4523        int coe = priv->hw->rx_csum;
4524        unsigned int plen = 0;
4525
4526        /* Not split header, buffer is not available */
4527        if (!priv->sph)
4528                return 0;
4529
4530        /* Not last descriptor */
4531        if (status & rx_not_ls)
4532                return priv->dma_buf_sz;
4533
4534        plen = stmmac_get_rx_frame_len(priv, p, coe);
4535
4536        /* Last descriptor */
4537        return plen - len;
4538}
4539
4540static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4541                                struct xdp_frame *xdpf, bool dma_map)
4542{
4543        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4544        unsigned int entry = tx_q->cur_tx;
4545        struct dma_desc *tx_desc;
4546        dma_addr_t dma_addr;
4547        bool set_ic;
4548
4549        if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4550                return STMMAC_XDP_CONSUMED;
4551
4552        if (likely(priv->extend_desc))
4553                tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4554        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4555                tx_desc = &tx_q->dma_entx[entry].basic;
4556        else
4557                tx_desc = tx_q->dma_tx + entry;
4558
4559        if (dma_map) {
4560                dma_addr = dma_map_single(priv->device, xdpf->data,
4561                                          xdpf->len, DMA_TO_DEVICE);
4562                if (dma_mapping_error(priv->device, dma_addr))
4563                        return STMMAC_XDP_CONSUMED;
4564
4565                tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4566        } else {
4567                struct page *page = virt_to_page(xdpf->data);
4568
4569                dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4570                           xdpf->headroom;
4571                dma_sync_single_for_device(priv->device, dma_addr,
4572                                           xdpf->len, DMA_BIDIRECTIONAL);
4573
4574                tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4575        }
4576
4577        tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4578        tx_q->tx_skbuff_dma[entry].map_as_page = false;
4579        tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4580        tx_q->tx_skbuff_dma[entry].last_segment = true;
4581        tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4582
4583        tx_q->xdpf[entry] = xdpf;
4584
4585        stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4586
4587        stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4588                               true, priv->mode, true, true,
4589                               xdpf->len);
4590
4591        tx_q->tx_count_frames++;
4592
4593        if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4594                set_ic = true;
4595        else
4596                set_ic = false;
4597
4598        if (set_ic) {
4599                tx_q->tx_count_frames = 0;
4600                stmmac_set_tx_ic(priv, tx_desc);
4601                priv->xstats.tx_set_ic_bit++;
4602        }
4603
4604        stmmac_enable_dma_transmission(priv, priv->ioaddr);
4605
4606        entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4607        tx_q->cur_tx = entry;
4608
4609        return STMMAC_XDP_TX;
4610}
4611
4612static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4613                                   int cpu)
4614{
4615        int index = cpu;
4616
4617        if (unlikely(index < 0))
4618                index = 0;
4619
4620        while (index >= priv->plat->tx_queues_to_use)
4621                index -= priv->plat->tx_queues_to_use;
4622
4623        return index;
4624}
4625
4626static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4627                                struct xdp_buff *xdp)
4628{
4629        struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4630        int cpu = smp_processor_id();
4631        struct netdev_queue *nq;
4632        int queue;
4633        int res;
4634
4635        if (unlikely(!xdpf))
4636                return STMMAC_XDP_CONSUMED;
4637
4638        queue = stmmac_xdp_get_tx_queue(priv, cpu);
4639        nq = netdev_get_tx_queue(priv->dev, queue);
4640
4641        __netif_tx_lock(nq, cpu);
4642        /* Avoids TX time-out as we are sharing with slow path */
4643        nq->trans_start = jiffies;
4644
4645        res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4646        if (res == STMMAC_XDP_TX)
4647                stmmac_flush_tx_descriptors(priv, queue);
4648
4649        __netif_tx_unlock(nq);
4650
4651        return res;
4652}
4653
4654static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4655                                 struct bpf_prog *prog,
4656                                 struct xdp_buff *xdp)
4657{
4658        u32 act;
4659        int res;
4660
4661        act = bpf_prog_run_xdp(prog, xdp);
4662        switch (act) {
4663        case XDP_PASS:
4664                res = STMMAC_XDP_PASS;
4665                break;
4666        case XDP_TX:
4667                res = stmmac_xdp_xmit_back(priv, xdp);
4668                break;
4669        case XDP_REDIRECT:
4670                if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4671                        res = STMMAC_XDP_CONSUMED;
4672                else
4673                        res = STMMAC_XDP_REDIRECT;
4674                break;
4675        default:
4676                bpf_warn_invalid_xdp_action(act);
4677                fallthrough;
4678        case XDP_ABORTED:
4679                trace_xdp_exception(priv->dev, prog, act);
4680                fallthrough;
4681        case XDP_DROP:
4682                res = STMMAC_XDP_CONSUMED;
4683                break;
4684        }
4685
4686        return res;
4687}
4688
4689static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4690                                           struct xdp_buff *xdp)
4691{
4692        struct bpf_prog *prog;
4693        int res;
4694
4695        prog = READ_ONCE(priv->xdp_prog);
4696        if (!prog) {
4697                res = STMMAC_XDP_PASS;
4698                goto out;
4699        }
4700
4701        res = __stmmac_xdp_run_prog(priv, prog, xdp);
4702out:
4703        return ERR_PTR(-res);
4704}
4705
4706static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4707                                   int xdp_status)
4708{
4709        int cpu = smp_processor_id();
4710        int queue;
4711
4712        queue = stmmac_xdp_get_tx_queue(priv, cpu);
4713
4714        if (xdp_status & STMMAC_XDP_TX)
4715                stmmac_tx_timer_arm(priv, queue);
4716
4717        if (xdp_status & STMMAC_XDP_REDIRECT)
4718                xdp_do_flush();
4719}
4720
4721static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4722                                               struct xdp_buff *xdp)
4723{
4724        unsigned int metasize = xdp->data - xdp->data_meta;
4725        unsigned int datasize = xdp->data_end - xdp->data;
4726        struct sk_buff *skb;
4727
4728        skb = __napi_alloc_skb(&ch->rxtx_napi,
4729                               xdp->data_end - xdp->data_hard_start,
4730                               GFP_ATOMIC | __GFP_NOWARN);
4731        if (unlikely(!skb))
4732                return NULL;
4733
4734        skb_reserve(skb, xdp->data - xdp->data_hard_start);
4735        memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4736        if (metasize)
4737                skb_metadata_set(skb, metasize);
4738
4739        return skb;
4740}
4741
4742static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4743                                   struct dma_desc *p, struct dma_desc *np,
4744                                   struct xdp_buff *xdp)
4745{
4746        struct stmmac_channel *ch = &priv->channel[queue];
4747        unsigned int len = xdp->data_end - xdp->data;
4748        enum pkt_hash_types hash_type;
4749        int coe = priv->hw->rx_csum;
4750        struct sk_buff *skb;
4751        u32 hash;
4752
4753        skb = stmmac_construct_skb_zc(ch, xdp);
4754        if (!skb) {
4755                priv->dev->stats.rx_dropped++;
4756                return;
4757        }
4758
4759        stmmac_get_rx_hwtstamp(priv, p, np, skb);
4760        stmmac_rx_vlan(priv->dev, skb);
4761        skb->protocol = eth_type_trans(skb, priv->dev);
4762
4763        if (unlikely(!coe))
4764                skb_checksum_none_assert(skb);
4765        else
4766                skb->ip_summed = CHECKSUM_UNNECESSARY;
4767
4768        if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4769                skb_set_hash(skb, hash, hash_type);
4770
4771        skb_record_rx_queue(skb, queue);
4772        napi_gro_receive(&ch->rxtx_napi, skb);
4773
4774        priv->dev->stats.rx_packets++;
4775        priv->dev->stats.rx_bytes += len;
4776}
4777
4778static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4779{
4780        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4781        unsigned int entry = rx_q->dirty_rx;
4782        struct dma_desc *rx_desc = NULL;
4783        bool ret = true;
4784
4785        budget = min(budget, stmmac_rx_dirty(priv, queue));
4786
4787        while (budget-- > 0 && entry != rx_q->cur_rx) {
4788                struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4789                dma_addr_t dma_addr;
4790                bool use_rx_wd;
4791
4792                if (!buf->xdp) {
4793                        buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4794                        if (!buf->xdp) {
4795                                ret = false;
4796                                break;
4797                        }
4798                }
4799
4800                if (priv->extend_desc)
4801                        rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4802                else
4803                        rx_desc = rx_q->dma_rx + entry;
4804
4805                dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4806                stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4807                stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4808                stmmac_refill_desc3(priv, rx_q, rx_desc);
4809
4810                rx_q->rx_count_frames++;
4811                rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4812                if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4813                        rx_q->rx_count_frames = 0;
4814
4815                use_rx_wd = !priv->rx_coal_frames[queue];
4816                use_rx_wd |= rx_q->rx_count_frames > 0;
4817                if (!priv->use_riwt)
4818                        use_rx_wd = false;
4819
4820                dma_wmb();
4821                stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4822
4823                entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4824        }
4825
4826        if (rx_desc) {
4827                rx_q->dirty_rx = entry;
4828                rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4829                                     (rx_q->dirty_rx * sizeof(struct dma_desc));
4830                stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4831        }
4832
4833        return ret;
4834}
4835
4836static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4837{
4838        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4839        unsigned int count = 0, error = 0, len = 0;
4840        int dirty = stmmac_rx_dirty(priv, queue);
4841        unsigned int next_entry = rx_q->cur_rx;
4842        unsigned int desc_size;
4843        struct bpf_prog *prog;
4844        bool failure = false;
4845        int xdp_status = 0;
4846        int status = 0;
4847
4848        if (netif_msg_rx_status(priv)) {
4849                void *rx_head;
4850
4851                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4852                if (priv->extend_desc) {
4853                        rx_head = (void *)rx_q->dma_erx;
4854                        desc_size = sizeof(struct dma_extended_desc);
4855                } else {
4856                        rx_head = (void *)rx_q->dma_rx;
4857                        desc_size = sizeof(struct dma_desc);
4858                }
4859
4860                stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4861                                    rx_q->dma_rx_phy, desc_size);
4862        }
4863        while (count < limit) {
4864                struct stmmac_rx_buffer *buf;
4865                unsigned int buf1_len = 0;
4866                struct dma_desc *np, *p;
4867                int entry;
4868                int res;
4869
4870                if (!count && rx_q->state_saved) {
4871                        error = rx_q->state.error;
4872                        len = rx_q->state.len;
4873                } else {
4874                        rx_q->state_saved = false;
4875                        error = 0;
4876                        len = 0;
4877                }
4878
4879                if (count >= limit)
4880                        break;
4881
4882read_again:
4883                buf1_len = 0;
4884                entry = next_entry;
4885                buf = &rx_q->buf_pool[entry];
4886
4887                if (dirty >= STMMAC_RX_FILL_BATCH) {
4888                        failure = failure ||
4889                                  !stmmac_rx_refill_zc(priv, queue, dirty);
4890                        dirty = 0;
4891                }
4892
4893                if (priv->extend_desc)
4894                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
4895                else
4896                        p = rx_q->dma_rx + entry;
4897
4898                /* read the status of the incoming frame */
4899                status = stmmac_rx_status(priv, &priv->dev->stats,
4900                                          &priv->xstats, p);
4901                /* check if managed by the DMA otherwise go ahead */
4902                if (unlikely(status & dma_own))
4903                        break;
4904
4905                /* Prefetch the next RX descriptor */
4906                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4907                                                priv->dma_rx_size);
4908                next_entry = rx_q->cur_rx;
4909
4910                if (priv->extend_desc)
4911                        np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4912                else
4913                        np = rx_q->dma_rx + next_entry;
4914
4915                prefetch(np);
4916
4917                /* Ensure a valid XSK buffer before proceed */
4918                if (!buf->xdp)
4919                        break;
4920
4921                if (priv->extend_desc)
4922                        stmmac_rx_extended_status(priv, &priv->dev->stats,
4923                                                  &priv->xstats,
4924                                                  rx_q->dma_erx + entry);
4925                if (unlikely(status == discard_frame)) {
4926                        xsk_buff_free(buf->xdp);
4927                        buf->xdp = NULL;
4928                        dirty++;
4929                        error = 1;
4930                        if (!priv->hwts_rx_en)
4931                                priv->dev->stats.rx_errors++;
4932                }
4933
4934                if (unlikely(error && (status & rx_not_ls)))
4935                        goto read_again;
4936                if (unlikely(error)) {
4937                        count++;
4938                        continue;
4939                }
4940
4941                /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4942                if (likely(status & rx_not_ls)) {
4943                        xsk_buff_free(buf->xdp);
4944                        buf->xdp = NULL;
4945                        dirty++;
4946                        count++;
4947                        goto read_again;
4948                }
4949
4950                /* XDP ZC Frame only support primary buffers for now */
4951                buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4952                len += buf1_len;
4953
4954                /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4955                 * Type frames (LLC/LLC-SNAP)
4956                 *
4957                 * llc_snap is never checked in GMAC >= 4, so this ACS
4958                 * feature is always disabled and packets need to be
4959                 * stripped manually.
4960                 */
4961                if (likely(!(status & rx_not_ls)) &&
4962                    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4963                     unlikely(status != llc_snap))) {
4964                        buf1_len -= ETH_FCS_LEN;
4965                        len -= ETH_FCS_LEN;
4966                }
4967
4968                /* RX buffer is good and fit into a XSK pool buffer */
4969                buf->xdp->data_end = buf->xdp->data + buf1_len;
4970                xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
4971
4972                prog = READ_ONCE(priv->xdp_prog);
4973                res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
4974
4975                switch (res) {
4976                case STMMAC_XDP_PASS:
4977                        stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
4978                        xsk_buff_free(buf->xdp);
4979                        break;
4980                case STMMAC_XDP_CONSUMED:
4981                        xsk_buff_free(buf->xdp);
4982                        priv->dev->stats.rx_dropped++;
4983                        break;
4984                case STMMAC_XDP_TX:
4985                case STMMAC_XDP_REDIRECT:
4986                        xdp_status |= res;
4987                        break;
4988                }
4989
4990                buf->xdp = NULL;
4991                dirty++;
4992                count++;
4993        }
4994
4995        if (status & rx_not_ls) {
4996                rx_q->state_saved = true;
4997                rx_q->state.error = error;
4998                rx_q->state.len = len;
4999        }
5000
5001        stmmac_finalize_xdp_rx(priv, xdp_status);
5002
5003        if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5004                if (failure || stmmac_rx_dirty(priv, queue) > 0)
5005                        xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5006                else
5007                        xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5008
5009                return (int)count;
5010        }
5011
5012        return failure ? limit : (int)count;
5013}
5014
5015/**
5016 * stmmac_rx - manage the receive process
5017 * @priv: driver private structure
5018 * @limit: napi bugget
5019 * @queue: RX queue index.
5020 * Description :  this the function called by the napi poll method.
5021 * It gets all the frames inside the ring.
5022 */
5023static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5024{
5025        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5026        struct stmmac_channel *ch = &priv->channel[queue];
5027        unsigned int count = 0, error = 0, len = 0;
5028        int status = 0, coe = priv->hw->rx_csum;
5029        unsigned int next_entry = rx_q->cur_rx;
5030        enum dma_data_direction dma_dir;
5031        unsigned int desc_size;
5032        struct sk_buff *skb = NULL;
5033        struct xdp_buff xdp;
5034        int xdp_status = 0;
5035        int buf_sz;
5036
5037        dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5038        buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5039
5040        if (netif_msg_rx_status(priv)) {
5041                void *rx_head;
5042
5043                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5044                if (priv->extend_desc) {
5045                        rx_head = (void *)rx_q->dma_erx;
5046                        desc_size = sizeof(struct dma_extended_desc);
5047                } else {
5048                        rx_head = (void *)rx_q->dma_rx;
5049                        desc_size = sizeof(struct dma_desc);
5050                }
5051
5052                stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5053                                    rx_q->dma_rx_phy, desc_size);
5054        }
5055        while (count < limit) {
5056                unsigned int buf1_len = 0, buf2_len = 0;
5057                enum pkt_hash_types hash_type;
5058                struct stmmac_rx_buffer *buf;
5059                struct dma_desc *np, *p;
5060                int entry;
5061                u32 hash;
5062
5063                if (!count && rx_q->state_saved) {
5064                        skb = rx_q->state.skb;
5065                        error = rx_q->state.error;
5066                        len = rx_q->state.len;
5067                } else {
5068                        rx_q->state_saved = false;
5069                        skb = NULL;
5070                        error = 0;
5071                        len = 0;
5072                }
5073
5074                if (count >= limit)
5075                        break;
5076
5077read_again:
5078                buf1_len = 0;
5079                buf2_len = 0;
5080                entry = next_entry;
5081                buf = &rx_q->buf_pool[entry];
5082
5083                if (priv->extend_desc)
5084                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
5085                else
5086                        p = rx_q->dma_rx + entry;
5087
5088                /* read the status of the incoming frame */
5089                status = stmmac_rx_status(priv, &priv->dev->stats,
5090                                &priv->xstats, p);
5091                /* check if managed by the DMA otherwise go ahead */
5092                if (unlikely(status & dma_own))
5093                        break;
5094
5095                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5096                                                priv->dma_rx_size);
5097                next_entry = rx_q->cur_rx;
5098
5099                if (priv->extend_desc)
5100                        np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5101                else
5102                        np = rx_q->dma_rx + next_entry;
5103
5104                prefetch(np);
5105
5106                if (priv->extend_desc)
5107                        stmmac_rx_extended_status(priv, &priv->dev->stats,
5108                                        &priv->xstats, rx_q->dma_erx + entry);
5109                if (unlikely(status == discard_frame)) {
5110                        page_pool_recycle_direct(rx_q->page_pool, buf->page);
5111                        buf->page = NULL;
5112                        error = 1;
5113                        if (!priv->hwts_rx_en)
5114                                priv->dev->stats.rx_errors++;
5115                }
5116
5117                if (unlikely(error && (status & rx_not_ls)))
5118                        goto read_again;
5119                if (unlikely(error)) {
5120                        dev_kfree_skb(skb);
5121                        skb = NULL;
5122                        count++;
5123                        continue;
5124                }
5125
5126                /* Buffer is good. Go on. */
5127
5128                prefetch(page_address(buf->page) + buf->page_offset);
5129                if (buf->sec_page)
5130                        prefetch(page_address(buf->sec_page));
5131
5132                buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5133                len += buf1_len;
5134                buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5135                len += buf2_len;
5136
5137                /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5138                 * Type frames (LLC/LLC-SNAP)
5139                 *
5140                 * llc_snap is never checked in GMAC >= 4, so this ACS
5141                 * feature is always disabled and packets need to be
5142                 * stripped manually.
5143                 */
5144                if (likely(!(status & rx_not_ls)) &&
5145                    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5146                     unlikely(status != llc_snap))) {
5147                        if (buf2_len)
5148                                buf2_len -= ETH_FCS_LEN;
5149                        else
5150                                buf1_len -= ETH_FCS_LEN;
5151
5152                        len -= ETH_FCS_LEN;
5153                }
5154
5155                if (!skb) {
5156                        unsigned int pre_len, sync_len;
5157
5158                        dma_sync_single_for_cpu(priv->device, buf->addr,
5159                                                buf1_len, dma_dir);
5160
5161                        xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5162                        xdp_prepare_buff(&xdp, page_address(buf->page),
5163                                         buf->page_offset, buf1_len, false);
5164
5165                        pre_len = xdp.data_end - xdp.data_hard_start -
5166                                  buf->page_offset;
5167                        skb = stmmac_xdp_run_prog(priv, &xdp);
5168                        /* Due xdp_adjust_tail: DMA sync for_device
5169                         * cover max len CPU touch
5170                         */
5171                        sync_len = xdp.data_end - xdp.data_hard_start -
5172                                   buf->page_offset;
5173                        sync_len = max(sync_len, pre_len);
5174
5175                        /* For Not XDP_PASS verdict */
5176                        if (IS_ERR(skb)) {
5177                                unsigned int xdp_res = -PTR_ERR(skb);
5178
5179                                if (xdp_res & STMMAC_XDP_CONSUMED) {
5180                                        page_pool_put_page(rx_q->page_pool,
5181                                                           virt_to_head_page(xdp.data),
5182                                                           sync_len, true);
5183                                        buf->page = NULL;
5184                                        priv->dev->stats.rx_dropped++;
5185
5186                                        /* Clear skb as it was set as
5187                                         * status by XDP program.
5188                                         */
5189                                        skb = NULL;
5190
5191                                        if (unlikely((status & rx_not_ls)))
5192                                                goto read_again;
5193
5194                                        count++;
5195                                        continue;
5196                                } else if (xdp_res & (STMMAC_XDP_TX |
5197                                                      STMMAC_XDP_REDIRECT)) {
5198                                        xdp_status |= xdp_res;
5199                                        buf->page = NULL;
5200                                        skb = NULL;
5201                                        count++;
5202                                        continue;
5203                                }
5204                        }
5205                }
5206
5207                if (!skb) {
5208                        /* XDP program may expand or reduce tail */
5209                        buf1_len = xdp.data_end - xdp.data;
5210
5211                        skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5212                        if (!skb) {
5213                                priv->dev->stats.rx_dropped++;
5214                                count++;
5215                                goto drain_data;
5216                        }
5217
5218                        /* XDP program may adjust header */
5219                        skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5220                        skb_put(skb, buf1_len);
5221
5222                        /* Data payload copied into SKB, page ready for recycle */
5223                        page_pool_recycle_direct(rx_q->page_pool, buf->page);
5224                        buf->page = NULL;
5225                } else if (buf1_len) {
5226                        dma_sync_single_for_cpu(priv->device, buf->addr,
5227                                                buf1_len, dma_dir);
5228                        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5229                                        buf->page, buf->page_offset, buf1_len,
5230                                        priv->dma_buf_sz);
5231
5232                        /* Data payload appended into SKB */
5233                        page_pool_release_page(rx_q->page_pool, buf->page);
5234                        buf->page = NULL;
5235                }
5236
5237                if (buf2_len) {
5238                        dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5239                                                buf2_len, dma_dir);
5240                        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5241                                        buf->sec_page, 0, buf2_len,
5242                                        priv->dma_buf_sz);
5243
5244                        /* Data payload appended into SKB */
5245                        page_pool_release_page(rx_q->page_pool, buf->sec_page);
5246                        buf->sec_page = NULL;
5247                }
5248
5249drain_data:
5250                if (likely(status & rx_not_ls))
5251                        goto read_again;
5252                if (!skb)
5253                        continue;
5254
5255                /* Got entire packet into SKB. Finish it. */
5256
5257                stmmac_get_rx_hwtstamp(priv, p, np, skb);
5258                stmmac_rx_vlan(priv->dev, skb);
5259                skb->protocol = eth_type_trans(skb, priv->dev);
5260
5261                if (unlikely(!coe))
5262                        skb_checksum_none_assert(skb);
5263                else
5264                        skb->ip_summed = CHECKSUM_UNNECESSARY;
5265
5266                if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5267                        skb_set_hash(skb, hash, hash_type);
5268
5269                skb_record_rx_queue(skb, queue);
5270                napi_gro_receive(&ch->rx_napi, skb);
5271                skb = NULL;
5272
5273                priv->dev->stats.rx_packets++;
5274                priv->dev->stats.rx_bytes += len;
5275                count++;
5276        }
5277
5278        if (status & rx_not_ls || skb) {
5279                rx_q->state_saved = true;
5280                rx_q->state.skb = skb;
5281                rx_q->state.error = error;
5282                rx_q->state.len = len;
5283        }
5284
5285        stmmac_finalize_xdp_rx(priv, xdp_status);
5286
5287        stmmac_rx_refill(priv, queue);
5288
5289        priv->xstats.rx_pkt_n += count;
5290
5291        return count;
5292}
5293
5294static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5295{
5296        struct stmmac_channel *ch =
5297                container_of(napi, struct stmmac_channel, rx_napi);
5298        struct stmmac_priv *priv = ch->priv_data;
5299        u32 chan = ch->index;
5300        int work_done;
5301
5302        priv->xstats.napi_poll++;
5303
5304        work_done = stmmac_rx(priv, budget, chan);
5305        if (work_done < budget && napi_complete_done(napi, work_done)) {
5306                unsigned long flags;
5307
5308                spin_lock_irqsave(&ch->lock, flags);
5309                stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5310                spin_unlock_irqrestore(&ch->lock, flags);
5311        }
5312
5313        return work_done;
5314}
5315
5316static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5317{
5318        struct stmmac_channel *ch =
5319                container_of(napi, struct stmmac_channel, tx_napi);
5320        struct stmmac_priv *priv = ch->priv_data;
5321        u32 chan = ch->index;
5322        int work_done;
5323
5324        priv->xstats.napi_poll++;
5325
5326        work_done = stmmac_tx_clean(priv, budget, chan);
5327        work_done = min(work_done, budget);
5328
5329        if (work_done < budget && napi_complete_done(napi, work_done)) {
5330                unsigned long flags;
5331
5332                spin_lock_irqsave(&ch->lock, flags);
5333                stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5334                spin_unlock_irqrestore(&ch->lock, flags);
5335        }
5336
5337        return work_done;
5338}
5339
5340static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5341{
5342        struct stmmac_channel *ch =
5343                container_of(napi, struct stmmac_channel, rxtx_napi);
5344        struct stmmac_priv *priv = ch->priv_data;
5345        int rx_done, tx_done;
5346        u32 chan = ch->index;
5347
5348        priv->xstats.napi_poll++;
5349
5350        tx_done = stmmac_tx_clean(priv, budget, chan);
5351        tx_done = min(tx_done, budget);
5352
5353        rx_done = stmmac_rx_zc(priv, budget, chan);
5354
5355        /* If either TX or RX work is not complete, return budget
5356         * and keep pooling
5357         */
5358        if (tx_done >= budget || rx_done >= budget)
5359                return budget;
5360
5361        /* all work done, exit the polling mode */
5362        if (napi_complete_done(napi, rx_done)) {
5363                unsigned long flags;
5364
5365                spin_lock_irqsave(&ch->lock, flags);
5366                /* Both RX and TX work done are compelte,
5367                 * so enable both RX & TX IRQs.
5368                 */
5369                stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5370                spin_unlock_irqrestore(&ch->lock, flags);
5371        }
5372
5373        return min(rx_done, budget - 1);
5374}
5375
5376/**
5377 *  stmmac_tx_timeout
5378 *  @dev : Pointer to net device structure
5379 *  @txqueue: the index of the hanging transmit queue
5380 *  Description: this function is called when a packet transmission fails to
5381 *   complete within a reasonable time. The driver will mark the error in the
5382 *   netdev structure and arrange for the device to be reset to a sane state
5383 *   in order to transmit a new packet.
5384 */
5385static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5386{
5387        struct stmmac_priv *priv = netdev_priv(dev);
5388
5389        stmmac_global_err(priv);
5390}
5391
5392/**
5393 *  stmmac_set_rx_mode - entry point for multicast addressing
5394 *  @dev : pointer to the device structure
5395 *  Description:
5396 *  This function is a driver entry point which gets called by the kernel
5397 *  whenever multicast addresses must be enabled/disabled.
5398 *  Return value:
5399 *  void.
5400 */
5401static void stmmac_set_rx_mode(struct net_device *dev)
5402{
5403        struct stmmac_priv *priv = netdev_priv(dev);
5404
5405        stmmac_set_filter(priv, priv->hw, dev);
5406}
5407
5408/**
5409 *  stmmac_change_mtu - entry point to change MTU size for the device.
5410 *  @dev : device pointer.
5411 *  @new_mtu : the new MTU size for the device.
5412 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5413 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5414 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5415 *  Return value:
5416 *  0 on success and an appropriate (-)ve integer as defined in errno.h
5417 *  file on failure.
5418 */
5419static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5420{
5421        struct stmmac_priv *priv = netdev_priv(dev);
5422        int txfifosz = priv->plat->tx_fifo_size;
5423        const int mtu = new_mtu;
5424
5425        if (txfifosz == 0)
5426                txfifosz = priv->dma_cap.tx_fifo_size;
5427
5428        txfifosz /= priv->plat->tx_queues_to_use;
5429
5430        if (netif_running(dev)) {
5431                netdev_err(priv->dev, "must be stopped to change its MTU\n");
5432                return -EBUSY;
5433        }
5434
5435        if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5436                netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5437                return -EINVAL;
5438        }
5439
5440        new_mtu = STMMAC_ALIGN(new_mtu);
5441
5442        /* If condition true, FIFO is too small or MTU too large */
5443        if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5444                return -EINVAL;
5445
5446        dev->mtu = mtu;
5447
5448        netdev_update_features(dev);
5449
5450        return 0;
5451}
5452
5453static netdev_features_t stmmac_fix_features(struct net_device *dev,
5454                                             netdev_features_t features)
5455{
5456        struct stmmac_priv *priv = netdev_priv(dev);
5457
5458        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5459                features &= ~NETIF_F_RXCSUM;
5460
5461        if (!priv->plat->tx_coe)
5462                features &= ~NETIF_F_CSUM_MASK;
5463
5464        /* Some GMAC devices have a bugged Jumbo frame support that
5465         * needs to have the Tx COE disabled for oversized frames
5466         * (due to limited buffer sizes). In this case we disable
5467         * the TX csum insertion in the TDES and not use SF.
5468         */
5469        if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5470                features &= ~NETIF_F_CSUM_MASK;
5471
5472        /* Disable tso if asked by ethtool */
5473        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5474                if (features & NETIF_F_TSO)
5475                        priv->tso = true;
5476                else
5477                        priv->tso = false;
5478        }
5479
5480        return features;
5481}
5482
5483static int stmmac_set_features(struct net_device *netdev,
5484                               netdev_features_t features)
5485{
5486        struct stmmac_priv *priv = netdev_priv(netdev);
5487        bool sph_en;
5488        u32 chan;
5489
5490        /* Keep the COE Type in case of csum is supporting */
5491        if (features & NETIF_F_RXCSUM)
5492                priv->hw->rx_csum = priv->plat->rx_coe;
5493        else
5494                priv->hw->rx_csum = 0;
5495        /* No check needed because rx_coe has been set before and it will be
5496         * fixed in case of issue.
5497         */
5498        stmmac_rx_ipc(priv, priv->hw);
5499
5500        sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5501
5502        for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5503                stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5504
5505        return 0;
5506}
5507
5508static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5509{
5510        struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5511        enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5512        enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5513        bool *hs_enable = &fpe_cfg->hs_enable;
5514
5515        if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5516                return;
5517
5518        /* If LP has sent verify mPacket, LP is FPE capable */
5519        if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5520                if (*lp_state < FPE_STATE_CAPABLE)
5521                        *lp_state = FPE_STATE_CAPABLE;
5522
5523                /* If user has requested FPE enable, quickly response */
5524                if (*hs_enable)
5525                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5526                                                MPACKET_RESPONSE);
5527        }
5528
5529        /* If Local has sent verify mPacket, Local is FPE capable */
5530        if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5531                if (*lo_state < FPE_STATE_CAPABLE)
5532                        *lo_state = FPE_STATE_CAPABLE;
5533        }
5534
5535        /* If LP has sent response mPacket, LP is entering FPE ON */
5536        if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5537                *lp_state = FPE_STATE_ENTERING_ON;
5538
5539        /* If Local has sent response mPacket, Local is entering FPE ON */
5540        if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5541                *lo_state = FPE_STATE_ENTERING_ON;
5542
5543        if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5544            !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5545            priv->fpe_wq) {
5546                queue_work(priv->fpe_wq, &priv->fpe_task);
5547        }
5548}
5549
5550static void stmmac_common_interrupt(struct stmmac_priv *priv)
5551{
5552        u32 rx_cnt = priv->plat->rx_queues_to_use;
5553        u32 tx_cnt = priv->plat->tx_queues_to_use;
5554        u32 queues_count;
5555        u32 queue;
5556        bool xmac;
5557
5558        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5559        queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5560
5561        if (priv->irq_wake)
5562                pm_wakeup_event(priv->device, 0);
5563
5564        if (priv->dma_cap.estsel)
5565                stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5566                                      &priv->xstats, tx_cnt);
5567
5568        if (priv->dma_cap.fpesel) {
5569                int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5570                                                   priv->dev);
5571
5572                stmmac_fpe_event_status(priv, status);
5573        }
5574
5575        /* To handle GMAC own interrupts */
5576        if ((priv->plat->has_gmac) || xmac) {
5577                int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5578
5579                if (unlikely(status)) {
5580                        /* For LPI we need to save the tx status */
5581                        if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5582                                priv->tx_path_in_lpi_mode = true;
5583                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5584                                priv->tx_path_in_lpi_mode = false;
5585                }
5586
5587                for (queue = 0; queue < queues_count; queue++) {
5588                        status = stmmac_host_mtl_irq_status(priv, priv->hw,
5589                                                            queue);
5590                }
5591
5592                /* PCS link status */
5593                if (priv->hw->pcs) {
5594                        if (priv->xstats.pcs_link)
5595                                netif_carrier_on(priv->dev);
5596                        else
5597                                netif_carrier_off(priv->dev);
5598                }
5599
5600                stmmac_timestamp_interrupt(priv, priv);
5601        }
5602}
5603
5604/**
5605 *  stmmac_interrupt - main ISR
5606 *  @irq: interrupt number.
5607 *  @dev_id: to pass the net device pointer.
5608 *  Description: this is the main driver interrupt service routine.
5609 *  It can call:
5610 *  o DMA service routine (to manage incoming frame reception and transmission
5611 *    status)
5612 *  o Core interrupts to manage: remote wake-up, management counter, LPI
5613 *    interrupts.
5614 */
5615static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5616{
5617        struct net_device *dev = (struct net_device *)dev_id;
5618        struct stmmac_priv *priv = netdev_priv(dev);
5619
5620        /* Check if adapter is up */
5621        if (test_bit(STMMAC_DOWN, &priv->state))
5622                return IRQ_HANDLED;
5623
5624        /* Check if a fatal error happened */
5625        if (stmmac_safety_feat_interrupt(priv))
5626                return IRQ_HANDLED;
5627
5628        /* To handle Common interrupts */
5629        stmmac_common_interrupt(priv);
5630
5631        /* To handle DMA interrupts */
5632        stmmac_dma_interrupt(priv);
5633
5634        return IRQ_HANDLED;
5635}
5636
5637static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5638{
5639        struct net_device *dev = (struct net_device *)dev_id;
5640        struct stmmac_priv *priv = netdev_priv(dev);
5641
5642        if (unlikely(!dev)) {
5643                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5644                return IRQ_NONE;
5645        }
5646
5647        /* Check if adapter is up */
5648        if (test_bit(STMMAC_DOWN, &priv->state))
5649                return IRQ_HANDLED;
5650
5651        /* To handle Common interrupts */
5652        stmmac_common_interrupt(priv);
5653
5654        return IRQ_HANDLED;
5655}
5656
5657static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5658{
5659        struct net_device *dev = (struct net_device *)dev_id;
5660        struct stmmac_priv *priv = netdev_priv(dev);
5661
5662        if (unlikely(!dev)) {
5663                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5664                return IRQ_NONE;
5665        }
5666
5667        /* Check if adapter is up */
5668        if (test_bit(STMMAC_DOWN, &priv->state))
5669                return IRQ_HANDLED;
5670
5671        /* Check if a fatal error happened */
5672        stmmac_safety_feat_interrupt(priv);
5673
5674        return IRQ_HANDLED;
5675}
5676
5677static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5678{
5679        struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5680        int chan = tx_q->queue_index;
5681        struct stmmac_priv *priv;
5682        int status;
5683
5684        priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5685
5686        if (unlikely(!data)) {
5687                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5688                return IRQ_NONE;
5689        }
5690
5691        /* Check if adapter is up */
5692        if (test_bit(STMMAC_DOWN, &priv->state))
5693                return IRQ_HANDLED;
5694
5695        status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5696
5697        if (unlikely(status & tx_hard_error_bump_tc)) {
5698                /* Try to bump up the dma threshold on this failure */
5699                if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
5700                    tc <= 256) {
5701                        tc += 64;
5702                        if (priv->plat->force_thresh_dma_mode)
5703                                stmmac_set_dma_operation_mode(priv,
5704                                                              tc,
5705                                                              tc,
5706                                                              chan);
5707                        else
5708                                stmmac_set_dma_operation_mode(priv,
5709                                                              tc,
5710                                                              SF_DMA_MODE,
5711                                                              chan);
5712                        priv->xstats.threshold = tc;
5713                }
5714        } else if (unlikely(status == tx_hard_error)) {
5715                stmmac_tx_err(priv, chan);
5716        }
5717
5718        return IRQ_HANDLED;
5719}
5720
5721static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5722{
5723        struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5724        int chan = rx_q->queue_index;
5725        struct stmmac_priv *priv;
5726
5727        priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5728
5729        if (unlikely(!data)) {
5730                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5731                return IRQ_NONE;
5732        }
5733
5734        /* Check if adapter is up */
5735        if (test_bit(STMMAC_DOWN, &priv->state))
5736                return IRQ_HANDLED;
5737
5738        stmmac_napi_check(priv, chan, DMA_DIR_RX);
5739
5740        return IRQ_HANDLED;
5741}
5742
5743#ifdef CONFIG_NET_POLL_CONTROLLER
5744/* Polling receive - used by NETCONSOLE and other diagnostic tools
5745 * to allow network I/O with interrupts disabled.
5746 */
5747static void stmmac_poll_controller(struct net_device *dev)
5748{
5749        struct stmmac_priv *priv = netdev_priv(dev);
5750        int i;
5751
5752        /* If adapter is down, do nothing */
5753        if (test_bit(STMMAC_DOWN, &priv->state))
5754                return;
5755
5756        if (priv->plat->multi_msi_en) {
5757                for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5758                        stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5759
5760                for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5761                        stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5762        } else {
5763                disable_irq(dev->irq);
5764                stmmac_interrupt(dev->irq, dev);
5765                enable_irq(dev->irq);
5766        }
5767}
5768#endif
5769
5770/**
5771 *  stmmac_ioctl - Entry point for the Ioctl
5772 *  @dev: Device pointer.
5773 *  @rq: An IOCTL specefic structure, that can contain a pointer to
5774 *  a proprietary structure used to pass information to the driver.
5775 *  @cmd: IOCTL command
5776 *  Description:
5777 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5778 */
5779static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5780{
5781        struct stmmac_priv *priv = netdev_priv (dev);
5782        int ret = -EOPNOTSUPP;
5783
5784        if (!netif_running(dev))
5785                return -EINVAL;
5786
5787        switch (cmd) {
5788        case SIOCGMIIPHY:
5789        case SIOCGMIIREG:
5790        case SIOCSMIIREG:
5791                ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5792                break;
5793        case SIOCSHWTSTAMP:
5794                ret = stmmac_hwtstamp_set(dev, rq);
5795                break;
5796        case SIOCGHWTSTAMP:
5797                ret = stmmac_hwtstamp_get(dev, rq);
5798                break;
5799        default:
5800                break;
5801        }
5802
5803        return ret;
5804}
5805
5806static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5807                                    void *cb_priv)
5808{
5809        struct stmmac_priv *priv = cb_priv;
5810        int ret = -EOPNOTSUPP;
5811
5812        if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5813                return ret;
5814
5815        __stmmac_disable_all_queues(priv);
5816
5817        switch (type) {
5818        case TC_SETUP_CLSU32:
5819                ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5820                break;
5821        case TC_SETUP_CLSFLOWER:
5822                ret = stmmac_tc_setup_cls(priv, priv, type_data);
5823                break;
5824        default:
5825                break;
5826        }
5827
5828        stmmac_enable_all_queues(priv);
5829        return ret;
5830}
5831
5832static LIST_HEAD(stmmac_block_cb_list);
5833
5834static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5835                           void *type_data)
5836{
5837        struct stmmac_priv *priv = netdev_priv(ndev);
5838
5839        switch (type) {
5840        case TC_SETUP_BLOCK:
5841                return flow_block_cb_setup_simple(type_data,
5842                                                  &stmmac_block_cb_list,
5843                                                  stmmac_setup_tc_block_cb,
5844                                                  priv, priv, true);
5845        case TC_SETUP_QDISC_CBS:
5846                return stmmac_tc_setup_cbs(priv, priv, type_data);
5847        case TC_SETUP_QDISC_TAPRIO:
5848                return stmmac_tc_setup_taprio(priv, priv, type_data);
5849        case TC_SETUP_QDISC_ETF:
5850                return stmmac_tc_setup_etf(priv, priv, type_data);
5851        default:
5852                return -EOPNOTSUPP;
5853        }
5854}
5855
5856static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5857                               struct net_device *sb_dev)
5858{
5859        int gso = skb_shinfo(skb)->gso_type;
5860
5861        if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5862                /*
5863                 * There is no way to determine the number of TSO/USO
5864                 * capable Queues. Let's use always the Queue 0
5865                 * because if TSO/USO is supported then at least this
5866                 * one will be capable.
5867                 */
5868                return 0;
5869        }
5870
5871        return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5872}
5873
5874static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5875{
5876        struct stmmac_priv *priv = netdev_priv(ndev);
5877        int ret = 0;
5878
5879        ret = pm_runtime_get_sync(priv->device);
5880        if (ret < 0) {
5881                pm_runtime_put_noidle(priv->device);
5882                return ret;
5883        }
5884
5885        ret = eth_mac_addr(ndev, addr);
5886        if (ret)
5887                goto set_mac_error;
5888
5889        stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5890
5891set_mac_error:
5892        pm_runtime_put(priv->device);
5893
5894        return ret;
5895}
5896
5897#ifdef CONFIG_DEBUG_FS
5898static struct dentry *stmmac_fs_dir;
5899
5900static void sysfs_display_ring(void *head, int size, int extend_desc,
5901                               struct seq_file *seq, dma_addr_t dma_phy_addr)
5902{
5903        int i;
5904        struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5905        struct dma_desc *p = (struct dma_desc *)head;
5906        dma_addr_t dma_addr;
5907
5908        for (i = 0; i < size; i++) {
5909                if (extend_desc) {
5910                        dma_addr = dma_phy_addr + i * sizeof(*ep);
5911                        seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5912                                   i, &dma_addr,
5913                                   le32_to_cpu(ep->basic.des0),
5914                                   le32_to_cpu(ep->basic.des1),
5915                                   le32_to_cpu(ep->basic.des2),
5916                                   le32_to_cpu(ep->basic.des3));
5917                        ep++;
5918                } else {
5919                        dma_addr = dma_phy_addr + i * sizeof(*p);
5920                        seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5921                                   i, &dma_addr,
5922                                   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5923                                   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5924                        p++;
5925                }
5926                seq_printf(seq, "\n");
5927        }
5928}
5929
5930static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5931{
5932        struct net_device *dev = seq->private;
5933        struct stmmac_priv *priv = netdev_priv(dev);
5934        u32 rx_count = priv->plat->rx_queues_to_use;
5935        u32 tx_count = priv->plat->tx_queues_to_use;
5936        u32 queue;
5937
5938        if ((dev->flags & IFF_UP) == 0)
5939                return 0;
5940
5941        for (queue = 0; queue < rx_count; queue++) {
5942                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5943
5944                seq_printf(seq, "RX Queue %d:\n", queue);
5945
5946                if (priv->extend_desc) {
5947                        seq_printf(seq, "Extended descriptor ring:\n");
5948                        sysfs_display_ring((void *)rx_q->dma_erx,
5949                                           priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5950                } else {
5951                        seq_printf(seq, "Descriptor ring:\n");
5952                        sysfs_display_ring((void *)rx_q->dma_rx,
5953                                           priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
5954                }
5955        }
5956
5957        for (queue = 0; queue < tx_count; queue++) {
5958                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5959
5960                seq_printf(seq, "TX Queue %d:\n", queue);
5961
5962                if (priv->extend_desc) {
5963                        seq_printf(seq, "Extended descriptor ring:\n");
5964                        sysfs_display_ring((void *)tx_q->dma_etx,
5965                                           priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
5966                } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
5967                        seq_printf(seq, "Descriptor ring:\n");
5968                        sysfs_display_ring((void *)tx_q->dma_tx,
5969                                           priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
5970                }
5971        }
5972
5973        return 0;
5974}
5975DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
5976
5977static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
5978{
5979        struct net_device *dev = seq->private;
5980        struct stmmac_priv *priv = netdev_priv(dev);
5981
5982        if (!priv->hw_cap_support) {
5983                seq_printf(seq, "DMA HW features not supported\n");
5984                return 0;
5985        }
5986
5987        seq_printf(seq, "==============================\n");
5988        seq_printf(seq, "\tDMA HW features\n");
5989        seq_printf(seq, "==============================\n");
5990
5991        seq_printf(seq, "\t10/100 Mbps: %s\n",
5992                   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
5993        seq_printf(seq, "\t1000 Mbps: %s\n",
5994                   (priv->dma_cap.mbps_1000) ? "Y" : "N");
5995        seq_printf(seq, "\tHalf duplex: %s\n",
5996                   (priv->dma_cap.half_duplex) ? "Y" : "N");
5997        seq_printf(seq, "\tHash Filter: %s\n",
5998                   (priv->dma_cap.hash_filter) ? "Y" : "N");
5999        seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6000                   (priv->dma_cap.multi_addr) ? "Y" : "N");
6001        seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6002                   (priv->dma_cap.pcs) ? "Y" : "N");
6003        seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6004                   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6005        seq_printf(seq, "\tPMT Remote wake up: %s\n",
6006                   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6007        seq_printf(seq, "\tPMT Magic Frame: %s\n",
6008                   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6009        seq_printf(seq, "\tRMON module: %s\n",
6010                   (priv->dma_cap.rmon) ? "Y" : "N");
6011        seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6012                   (priv->dma_cap.time_stamp) ? "Y" : "N");
6013        seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6014                   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6015        seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6016                   (priv->dma_cap.eee) ? "Y" : "N");
6017        seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6018        seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6019                   (priv->dma_cap.tx_coe) ? "Y" : "N");
6020        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6021                seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6022                           (priv->dma_cap.rx_coe) ? "Y" : "N");
6023        } else {
6024                seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6025                           (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6026                seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6027                           (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6028        }
6029        seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6030                   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6031        seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6032                   priv->dma_cap.number_rx_channel);
6033        seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6034                   priv->dma_cap.number_tx_channel);
6035        seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6036                   priv->dma_cap.number_rx_queues);
6037        seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6038                   priv->dma_cap.number_tx_queues);
6039        seq_printf(seq, "\tEnhanced descriptors: %s\n",
6040                   (priv->dma_cap.enh_desc) ? "Y" : "N");
6041        seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6042        seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6043        seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6044        seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6045        seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6046                   priv->dma_cap.pps_out_num);
6047        seq_printf(seq, "\tSafety Features: %s\n",
6048                   priv->dma_cap.asp ? "Y" : "N");
6049        seq_printf(seq, "\tFlexible RX Parser: %s\n",
6050                   priv->dma_cap.frpsel ? "Y" : "N");
6051        seq_printf(seq, "\tEnhanced Addressing: %d\n",
6052                   priv->dma_cap.addr64);
6053        seq_printf(seq, "\tReceive Side Scaling: %s\n",
6054                   priv->dma_cap.rssen ? "Y" : "N");
6055        seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6056                   priv->dma_cap.vlhash ? "Y" : "N");
6057        seq_printf(seq, "\tSplit Header: %s\n",
6058                   priv->dma_cap.sphen ? "Y" : "N");
6059        seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6060                   priv->dma_cap.vlins ? "Y" : "N");
6061        seq_printf(seq, "\tDouble VLAN: %s\n",
6062                   priv->dma_cap.dvlan ? "Y" : "N");
6063        seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6064                   priv->dma_cap.l3l4fnum);
6065        seq_printf(seq, "\tARP Offloading: %s\n",
6066                   priv->dma_cap.arpoffsel ? "Y" : "N");
6067        seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6068                   priv->dma_cap.estsel ? "Y" : "N");
6069        seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6070                   priv->dma_cap.fpesel ? "Y" : "N");
6071        seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6072                   priv->dma_cap.tbssel ? "Y" : "N");
6073        return 0;
6074}
6075DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6076
6077/* Use network device events to rename debugfs file entries.
6078 */
6079static int stmmac_device_event(struct notifier_block *unused,
6080                               unsigned long event, void *ptr)
6081{
6082        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6083        struct stmmac_priv *priv = netdev_priv(dev);
6084
6085        if (dev->netdev_ops != &stmmac_netdev_ops)
6086                goto done;
6087
6088        switch (event) {
6089        case NETDEV_CHANGENAME:
6090                if (priv->dbgfs_dir)
6091                        priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6092                                                         priv->dbgfs_dir,
6093                                                         stmmac_fs_dir,
6094                                                         dev->name);
6095                break;
6096        }
6097done:
6098        return NOTIFY_DONE;
6099}
6100
6101static struct notifier_block stmmac_notifier = {
6102        .notifier_call = stmmac_device_event,
6103};
6104
6105static void stmmac_init_fs(struct net_device *dev)
6106{
6107        struct stmmac_priv *priv = netdev_priv(dev);
6108
6109        rtnl_lock();
6110
6111        /* Create per netdev entries */
6112        priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6113
6114        /* Entry to report DMA RX/TX rings */
6115        debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6116                            &stmmac_rings_status_fops);
6117
6118        /* Entry to report the DMA HW features */
6119        debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6120                            &stmmac_dma_cap_fops);
6121
6122        rtnl_unlock();
6123}
6124
6125static void stmmac_exit_fs(struct net_device *dev)
6126{
6127        struct stmmac_priv *priv = netdev_priv(dev);
6128
6129        debugfs_remove_recursive(priv->dbgfs_dir);
6130}
6131#endif /* CONFIG_DEBUG_FS */
6132
6133static u32 stmmac_vid_crc32_le(__le16 vid_le)
6134{
6135        unsigned char *data = (unsigned char *)&vid_le;
6136        unsigned char data_byte = 0;
6137        u32 crc = ~0x0;
6138        u32 temp = 0;
6139        int i, bits;
6140
6141        bits = get_bitmask_order(VLAN_VID_MASK);
6142        for (i = 0; i < bits; i++) {
6143                if ((i % 8) == 0)
6144                        data_byte = data[i / 8];
6145
6146                temp = ((crc & 1) ^ data_byte) & 1;
6147                crc >>= 1;
6148                data_byte >>= 1;
6149
6150                if (temp)
6151                        crc ^= 0xedb88320;
6152        }
6153
6154        return crc;
6155}
6156
6157static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6158{
6159        u32 crc, hash = 0;
6160        __le16 pmatch = 0;
6161        int count = 0;
6162        u16 vid = 0;
6163
6164        for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6165                __le16 vid_le = cpu_to_le16(vid);
6166                crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6167                hash |= (1 << crc);
6168                count++;
6169        }
6170
6171        if (!priv->dma_cap.vlhash) {
6172                if (count > 2) /* VID = 0 always passes filter */
6173                        return -EOPNOTSUPP;
6174
6175                pmatch = cpu_to_le16(vid);
6176                hash = 0;
6177        }
6178
6179        return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6180}
6181
6182static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6183{
6184        struct stmmac_priv *priv = netdev_priv(ndev);
6185        bool is_double = false;
6186        int ret;
6187
6188        if (be16_to_cpu(proto) == ETH_P_8021AD)
6189                is_double = true;
6190
6191        set_bit(vid, priv->active_vlans);
6192        ret = stmmac_vlan_update(priv, is_double);
6193        if (ret) {
6194                clear_bit(vid, priv->active_vlans);
6195                return ret;
6196        }
6197
6198        if (priv->hw->num_vlan) {
6199                ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6200                if (ret)
6201                        return ret;
6202        }
6203
6204        return 0;
6205}
6206
6207static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6208{
6209        struct stmmac_priv *priv = netdev_priv(ndev);
6210        bool is_double = false;
6211        int ret;
6212
6213        ret = pm_runtime_get_sync(priv->device);
6214        if (ret < 0) {
6215                pm_runtime_put_noidle(priv->device);
6216                return ret;
6217        }
6218
6219        if (be16_to_cpu(proto) == ETH_P_8021AD)
6220                is_double = true;
6221
6222        clear_bit(vid, priv->active_vlans);
6223
6224        if (priv->hw->num_vlan) {
6225                ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6226                if (ret)
6227                        goto del_vlan_error;
6228        }
6229
6230        ret = stmmac_vlan_update(priv, is_double);
6231
6232del_vlan_error:
6233        pm_runtime_put(priv->device);
6234
6235        return ret;
6236}
6237
6238static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6239{
6240        struct stmmac_priv *priv = netdev_priv(dev);
6241
6242        switch (bpf->command) {
6243        case XDP_SETUP_PROG:
6244                return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6245        case XDP_SETUP_XSK_POOL:
6246                return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6247                                             bpf->xsk.queue_id);
6248        default:
6249                return -EOPNOTSUPP;
6250        }
6251}
6252
6253static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6254                           struct xdp_frame **frames, u32 flags)
6255{
6256        struct stmmac_priv *priv = netdev_priv(dev);
6257        int cpu = smp_processor_id();
6258        struct netdev_queue *nq;
6259        int i, nxmit = 0;
6260        int queue;
6261
6262        if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6263                return -ENETDOWN;
6264
6265        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6266                return -EINVAL;
6267
6268        queue = stmmac_xdp_get_tx_queue(priv, cpu);
6269        nq = netdev_get_tx_queue(priv->dev, queue);
6270
6271        __netif_tx_lock(nq, cpu);
6272        /* Avoids TX time-out as we are sharing with slow path */
6273        nq->trans_start = jiffies;
6274
6275        for (i = 0; i < num_frames; i++) {
6276                int res;
6277
6278                res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6279                if (res == STMMAC_XDP_CONSUMED)
6280                        break;
6281
6282                nxmit++;
6283        }
6284
6285        if (flags & XDP_XMIT_FLUSH) {
6286                stmmac_flush_tx_descriptors(priv, queue);
6287                stmmac_tx_timer_arm(priv, queue);
6288        }
6289
6290        __netif_tx_unlock(nq);
6291
6292        return nxmit;
6293}
6294
6295void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6296{
6297        struct stmmac_channel *ch = &priv->channel[queue];
6298        unsigned long flags;
6299
6300        spin_lock_irqsave(&ch->lock, flags);
6301        stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6302        spin_unlock_irqrestore(&ch->lock, flags);
6303
6304        stmmac_stop_rx_dma(priv, queue);
6305        __free_dma_rx_desc_resources(priv, queue);
6306}
6307
6308void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6309{
6310        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6311        struct stmmac_channel *ch = &priv->channel[queue];
6312        unsigned long flags;
6313        u32 buf_size;
6314        int ret;
6315
6316        ret = __alloc_dma_rx_desc_resources(priv, queue);
6317        if (ret) {
6318                netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6319                return;
6320        }
6321
6322        ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6323        if (ret) {
6324                __free_dma_rx_desc_resources(priv, queue);
6325                netdev_err(priv->dev, "Failed to init RX desc.\n");
6326                return;
6327        }
6328
6329        stmmac_clear_rx_descriptors(priv, queue);
6330
6331        stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6332                            rx_q->dma_rx_phy, rx_q->queue_index);
6333
6334        rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6335                             sizeof(struct dma_desc));
6336        stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6337                               rx_q->rx_tail_addr, rx_q->queue_index);
6338
6339        if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6340                buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6341                stmmac_set_dma_bfsize(priv, priv->ioaddr,
6342                                      buf_size,
6343                                      rx_q->queue_index);
6344        } else {
6345                stmmac_set_dma_bfsize(priv, priv->ioaddr,
6346                                      priv->dma_buf_sz,
6347                                      rx_q->queue_index);
6348        }
6349
6350        stmmac_start_rx_dma(priv, queue);
6351
6352        spin_lock_irqsave(&ch->lock, flags);
6353        stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6354        spin_unlock_irqrestore(&ch->lock, flags);
6355}
6356
6357void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6358{
6359        struct stmmac_channel *ch = &priv->channel[queue];
6360        unsigned long flags;
6361
6362        spin_lock_irqsave(&ch->lock, flags);
6363        stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6364        spin_unlock_irqrestore(&ch->lock, flags);
6365
6366        stmmac_stop_tx_dma(priv, queue);
6367        __free_dma_tx_desc_resources(priv, queue);
6368}
6369
6370void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6371{
6372        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6373        struct stmmac_channel *ch = &priv->channel[queue];
6374        unsigned long flags;
6375        int ret;
6376
6377        ret = __alloc_dma_tx_desc_resources(priv, queue);
6378        if (ret) {
6379                netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6380                return;
6381        }
6382
6383        ret = __init_dma_tx_desc_rings(priv, queue);
6384        if (ret) {
6385                __free_dma_tx_desc_resources(priv, queue);
6386                netdev_err(priv->dev, "Failed to init TX desc.\n");
6387                return;
6388        }
6389
6390        stmmac_clear_tx_descriptors(priv, queue);
6391
6392        stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6393                            tx_q->dma_tx_phy, tx_q->queue_index);
6394
6395        if (tx_q->tbs & STMMAC_TBS_AVAIL)
6396                stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6397
6398        tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6399        stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6400                               tx_q->tx_tail_addr, tx_q->queue_index);
6401
6402        stmmac_start_tx_dma(priv, queue);
6403
6404        spin_lock_irqsave(&ch->lock, flags);
6405        stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6406        spin_unlock_irqrestore(&ch->lock, flags);
6407}
6408
6409int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6410{
6411        struct stmmac_priv *priv = netdev_priv(dev);
6412        struct stmmac_rx_queue *rx_q;
6413        struct stmmac_tx_queue *tx_q;
6414        struct stmmac_channel *ch;
6415
6416        if (test_bit(STMMAC_DOWN, &priv->state) ||
6417            !netif_carrier_ok(priv->dev))
6418                return -ENETDOWN;
6419
6420        if (!stmmac_xdp_is_enabled(priv))
6421                return -ENXIO;
6422
6423        if (queue >= priv->plat->rx_queues_to_use ||
6424            queue >= priv->plat->tx_queues_to_use)
6425                return -EINVAL;
6426
6427        rx_q = &priv->rx_queue[queue];
6428        tx_q = &priv->tx_queue[queue];
6429        ch = &priv->channel[queue];
6430
6431        if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6432                return -ENXIO;
6433
6434        if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6435                /* EQoS does not have per-DMA channel SW interrupt,
6436                 * so we schedule RX Napi straight-away.
6437                 */
6438                if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6439                        __napi_schedule(&ch->rxtx_napi);
6440        }
6441
6442        return 0;
6443}
6444
6445static const struct net_device_ops stmmac_netdev_ops = {
6446        .ndo_open = stmmac_open,
6447        .ndo_start_xmit = stmmac_xmit,
6448        .ndo_stop = stmmac_release,
6449        .ndo_change_mtu = stmmac_change_mtu,
6450        .ndo_fix_features = stmmac_fix_features,
6451        .ndo_set_features = stmmac_set_features,
6452        .ndo_set_rx_mode = stmmac_set_rx_mode,
6453        .ndo_tx_timeout = stmmac_tx_timeout,
6454        .ndo_do_ioctl = stmmac_ioctl,
6455        .ndo_setup_tc = stmmac_setup_tc,
6456        .ndo_select_queue = stmmac_select_queue,
6457#ifdef CONFIG_NET_POLL_CONTROLLER
6458        .ndo_poll_controller = stmmac_poll_controller,
6459#endif
6460        .ndo_set_mac_address = stmmac_set_mac_address,
6461        .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6462        .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6463        .ndo_bpf = stmmac_bpf,
6464        .ndo_xdp_xmit = stmmac_xdp_xmit,
6465        .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6466};
6467
6468static void stmmac_reset_subtask(struct stmmac_priv *priv)
6469{
6470        if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6471                return;
6472        if (test_bit(STMMAC_DOWN, &priv->state))
6473                return;
6474
6475        netdev_err(priv->dev, "Reset adapter.\n");
6476
6477        rtnl_lock();
6478        netif_trans_update(priv->dev);
6479        while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6480                usleep_range(1000, 2000);
6481
6482        set_bit(STMMAC_DOWN, &priv->state);
6483        dev_close(priv->dev);
6484        dev_open(priv->dev, NULL);
6485        clear_bit(STMMAC_DOWN, &priv->state);
6486        clear_bit(STMMAC_RESETING, &priv->state);
6487        rtnl_unlock();
6488}
6489
6490static void stmmac_service_task(struct work_struct *work)
6491{
6492        struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6493                        service_task);
6494
6495        stmmac_reset_subtask(priv);
6496        clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6497}
6498
6499/**
6500 *  stmmac_hw_init - Init the MAC device
6501 *  @priv: driver private structure
6502 *  Description: this function is to configure the MAC device according to
6503 *  some platform parameters or the HW capability register. It prepares the
6504 *  driver to use either ring or chain modes and to setup either enhanced or
6505 *  normal descriptors.
6506 */
6507static int stmmac_hw_init(struct stmmac_priv *priv)
6508{
6509        int ret;
6510
6511        /* dwmac-sun8i only work in chain mode */
6512        if (priv->plat->has_sun8i)
6513                chain_mode = 1;
6514        priv->chain_mode = chain_mode;
6515
6516        /* Initialize HW Interface */
6517        ret = stmmac_hwif_init(priv);
6518        if (ret)
6519                return ret;
6520
6521        /* Get the HW capability (new GMAC newer than 3.50a) */
6522        priv->hw_cap_support = stmmac_get_hw_features(priv);
6523        if (priv->hw_cap_support) {
6524                dev_info(priv->device, "DMA HW capability register supported\n");
6525
6526                /* We can override some gmac/dma configuration fields: e.g.
6527                 * enh_desc, tx_coe (e.g. that are passed through the
6528                 * platform) with the values from the HW capability
6529                 * register (if supported).
6530                 */
6531                priv->plat->enh_desc = priv->dma_cap.enh_desc;
6532                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6533                                !priv->plat->use_phy_wol;
6534                priv->hw->pmt = priv->plat->pmt;
6535                if (priv->dma_cap.hash_tb_sz) {
6536                        priv->hw->multicast_filter_bins =
6537                                        (BIT(priv->dma_cap.hash_tb_sz) << 5);
6538                        priv->hw->mcast_bits_log2 =
6539                                        ilog2(priv->hw->multicast_filter_bins);
6540                }
6541
6542                /* TXCOE doesn't work in thresh DMA mode */
6543                if (priv->plat->force_thresh_dma_mode)
6544                        priv->plat->tx_coe = 0;
6545                else
6546                        priv->plat->tx_coe = priv->dma_cap.tx_coe;
6547
6548                /* In case of GMAC4 rx_coe is from HW cap register. */
6549                priv->plat->rx_coe = priv->dma_cap.rx_coe;
6550
6551                if (priv->dma_cap.rx_coe_type2)
6552                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6553                else if (priv->dma_cap.rx_coe_type1)
6554                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6555
6556        } else {
6557                dev_info(priv->device, "No HW DMA feature register supported\n");
6558        }
6559
6560        if (priv->plat->rx_coe) {
6561                priv->hw->rx_csum = priv->plat->rx_coe;
6562                dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6563                if (priv->synopsys_id < DWMAC_CORE_4_00)
6564                        dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6565        }
6566        if (priv->plat->tx_coe)
6567                dev_info(priv->device, "TX Checksum insertion supported\n");
6568
6569        if (priv->plat->pmt) {
6570                dev_info(priv->device, "Wake-Up On Lan supported\n");
6571                device_set_wakeup_capable(priv->device, 1);
6572        }
6573
6574        if (priv->dma_cap.tsoen)
6575                dev_info(priv->device, "TSO supported\n");
6576
6577        priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6578        priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6579
6580        /* Run HW quirks, if any */
6581        if (priv->hwif_quirks) {
6582                ret = priv->hwif_quirks(priv);
6583                if (ret)
6584                        return ret;
6585        }
6586
6587        /* Rx Watchdog is available in the COREs newer than the 3.40.
6588         * In some case, for example on bugged HW this feature
6589         * has to be disable and this can be done by passing the
6590         * riwt_off field from the platform.
6591         */
6592        if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6593            (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6594                priv->use_riwt = 1;
6595                dev_info(priv->device,
6596                         "Enable RX Mitigation via HW Watchdog Timer\n");
6597        }
6598
6599        return 0;
6600}
6601
6602static void stmmac_napi_add(struct net_device *dev)
6603{
6604        struct stmmac_priv *priv = netdev_priv(dev);
6605        u32 queue, maxq;
6606
6607        maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6608
6609        for (queue = 0; queue < maxq; queue++) {
6610                struct stmmac_channel *ch = &priv->channel[queue];
6611
6612                ch->priv_data = priv;
6613                ch->index = queue;
6614                spin_lock_init(&ch->lock);
6615
6616                if (queue < priv->plat->rx_queues_to_use) {
6617                        netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6618                                       NAPI_POLL_WEIGHT);
6619                }
6620                if (queue < priv->plat->tx_queues_to_use) {
6621                        netif_tx_napi_add(dev, &ch->tx_napi,
6622                                          stmmac_napi_poll_tx,
6623                                          NAPI_POLL_WEIGHT);
6624                }
6625                if (queue < priv->plat->rx_queues_to_use &&
6626                    queue < priv->plat->tx_queues_to_use) {
6627                        netif_napi_add(dev, &ch->rxtx_napi,
6628                                       stmmac_napi_poll_rxtx,
6629                                       NAPI_POLL_WEIGHT);
6630                }
6631        }
6632}
6633
6634static void stmmac_napi_del(struct net_device *dev)
6635{
6636        struct stmmac_priv *priv = netdev_priv(dev);
6637        u32 queue, maxq;
6638
6639        maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6640
6641        for (queue = 0; queue < maxq; queue++) {
6642                struct stmmac_channel *ch = &priv->channel[queue];
6643
6644                if (queue < priv->plat->rx_queues_to_use)
6645                        netif_napi_del(&ch->rx_napi);
6646                if (queue < priv->plat->tx_queues_to_use)
6647                        netif_napi_del(&ch->tx_napi);
6648                if (queue < priv->plat->rx_queues_to_use &&
6649                    queue < priv->plat->tx_queues_to_use) {
6650                        netif_napi_del(&ch->rxtx_napi);
6651                }
6652        }
6653}
6654
6655int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6656{
6657        struct stmmac_priv *priv = netdev_priv(dev);
6658        int ret = 0;
6659
6660        if (netif_running(dev))
6661                stmmac_release(dev);
6662
6663        stmmac_napi_del(dev);
6664
6665        priv->plat->rx_queues_to_use = rx_cnt;
6666        priv->plat->tx_queues_to_use = tx_cnt;
6667
6668        stmmac_napi_add(dev);
6669
6670        if (netif_running(dev))
6671                ret = stmmac_open(dev);
6672
6673        return ret;
6674}
6675
6676int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6677{
6678        struct stmmac_priv *priv = netdev_priv(dev);
6679        int ret = 0;
6680
6681        if (netif_running(dev))
6682                stmmac_release(dev);
6683
6684        priv->dma_rx_size = rx_size;
6685        priv->dma_tx_size = tx_size;
6686
6687        if (netif_running(dev))
6688                ret = stmmac_open(dev);
6689
6690        return ret;
6691}
6692
6693#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6694static void stmmac_fpe_lp_task(struct work_struct *work)
6695{
6696        struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6697                                                fpe_task);
6698        struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6699        enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6700        enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6701        bool *hs_enable = &fpe_cfg->hs_enable;
6702        bool *enable = &fpe_cfg->enable;
6703        int retries = 20;
6704
6705        while (retries-- > 0) {
6706                /* Bail out immediately if FPE handshake is OFF */
6707                if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6708                        break;
6709
6710                if (*lo_state == FPE_STATE_ENTERING_ON &&
6711                    *lp_state == FPE_STATE_ENTERING_ON) {
6712                        stmmac_fpe_configure(priv, priv->ioaddr,
6713                                             priv->plat->tx_queues_to_use,
6714                                             priv->plat->rx_queues_to_use,
6715                                             *enable);
6716
6717                        netdev_info(priv->dev, "configured FPE\n");
6718
6719                        *lo_state = FPE_STATE_ON;
6720                        *lp_state = FPE_STATE_ON;
6721                        netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6722                        break;
6723                }
6724
6725                if ((*lo_state == FPE_STATE_CAPABLE ||
6726                     *lo_state == FPE_STATE_ENTERING_ON) &&
6727                     *lp_state != FPE_STATE_ON) {
6728                        netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6729                                    *lo_state, *lp_state);
6730                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6731                                                MPACKET_VERIFY);
6732                }
6733                /* Sleep then retry */
6734                msleep(500);
6735        }
6736
6737        clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6738}
6739
6740void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6741{
6742        if (priv->plat->fpe_cfg->hs_enable != enable) {
6743                if (enable) {
6744                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6745                                                MPACKET_VERIFY);
6746                } else {
6747                        priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6748                        priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6749                }
6750
6751                priv->plat->fpe_cfg->hs_enable = enable;
6752        }
6753}
6754
6755/**
6756 * stmmac_dvr_probe
6757 * @device: device pointer
6758 * @plat_dat: platform data pointer
6759 * @res: stmmac resource pointer
6760 * Description: this is the main probe function used to
6761 * call the alloc_etherdev, allocate the priv structure.
6762 * Return:
6763 * returns 0 on success, otherwise errno.
6764 */
6765int stmmac_dvr_probe(struct device *device,
6766                     struct plat_stmmacenet_data *plat_dat,
6767                     struct stmmac_resources *res)
6768{
6769        struct net_device *ndev = NULL;
6770        struct stmmac_priv *priv;
6771        u32 rxq;
6772        int i, ret = 0;
6773
6774        ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6775                                       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6776        if (!ndev)
6777                return -ENOMEM;
6778
6779        SET_NETDEV_DEV(ndev, device);
6780
6781        priv = netdev_priv(ndev);
6782        priv->device = device;
6783        priv->dev = ndev;
6784
6785        stmmac_set_ethtool_ops(ndev);
6786        priv->pause = pause;
6787        priv->plat = plat_dat;
6788        priv->ioaddr = res->addr;
6789        priv->dev->base_addr = (unsigned long)res->addr;
6790        priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6791
6792        priv->dev->irq = res->irq;
6793        priv->wol_irq = res->wol_irq;
6794        priv->lpi_irq = res->lpi_irq;
6795        priv->sfty_ce_irq = res->sfty_ce_irq;
6796        priv->sfty_ue_irq = res->sfty_ue_irq;
6797        for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6798                priv->rx_irq[i] = res->rx_irq[i];
6799        for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6800                priv->tx_irq[i] = res->tx_irq[i];
6801
6802        if (!is_zero_ether_addr(res->mac))
6803                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
6804
6805        dev_set_drvdata(device, priv->dev);
6806
6807        /* Verify driver arguments */
6808        stmmac_verify_args();
6809
6810        priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6811        if (!priv->af_xdp_zc_qps)
6812                return -ENOMEM;
6813
6814        /* Allocate workqueue */
6815        priv->wq = create_singlethread_workqueue("stmmac_wq");
6816        if (!priv->wq) {
6817                dev_err(priv->device, "failed to create workqueue\n");
6818                return -ENOMEM;
6819        }
6820
6821        INIT_WORK(&priv->service_task, stmmac_service_task);
6822
6823        /* Initialize Link Partner FPE workqueue */
6824        INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
6825
6826        /* Override with kernel parameters if supplied XXX CRS XXX
6827         * this needs to have multiple instances
6828         */
6829        if ((phyaddr >= 0) && (phyaddr <= 31))
6830                priv->plat->phy_addr = phyaddr;
6831
6832        if (priv->plat->stmmac_rst) {
6833                ret = reset_control_assert(priv->plat->stmmac_rst);
6834                reset_control_deassert(priv->plat->stmmac_rst);
6835                /* Some reset controllers have only reset callback instead of
6836                 * assert + deassert callbacks pair.
6837                 */
6838                if (ret == -ENOTSUPP)
6839                        reset_control_reset(priv->plat->stmmac_rst);
6840        }
6841
6842        ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
6843        if (ret == -ENOTSUPP)
6844                dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
6845                        ERR_PTR(ret));
6846
6847        /* Init MAC and get the capabilities */
6848        ret = stmmac_hw_init(priv);
6849        if (ret)
6850                goto error_hw_init;
6851
6852        /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
6853         */
6854        if (priv->synopsys_id < DWMAC_CORE_5_20)
6855                priv->plat->dma_cfg->dche = false;
6856
6857        stmmac_check_ether_addr(priv);
6858
6859        ndev->netdev_ops = &stmmac_netdev_ops;
6860
6861        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6862                            NETIF_F_RXCSUM;
6863
6864        ret = stmmac_tc_init(priv, priv);
6865        if (!ret) {
6866                ndev->hw_features |= NETIF_F_HW_TC;
6867        }
6868
6869        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
6870                ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
6871                if (priv->plat->has_gmac4)
6872                        ndev->hw_features |= NETIF_F_GSO_UDP_L4;
6873                priv->tso = true;
6874                dev_info(priv->device, "TSO feature enabled\n");
6875        }
6876
6877        if (priv->dma_cap.sphen) {
6878                ndev->hw_features |= NETIF_F_GRO;
6879                priv->sph_cap = true;
6880                priv->sph = priv->sph_cap;
6881                dev_info(priv->device, "SPH feature enabled\n");
6882        }
6883
6884        /* The current IP register MAC_HW_Feature1[ADDR64] only define
6885         * 32/40/64 bit width, but some SOC support others like i.MX8MP
6886         * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
6887         * So overwrite dma_cap.addr64 according to HW real design.
6888         */
6889        if (priv->plat->addr64)
6890                priv->dma_cap.addr64 = priv->plat->addr64;
6891
6892        if (priv->dma_cap.addr64) {
6893                ret = dma_set_mask_and_coherent(device,
6894                                DMA_BIT_MASK(priv->dma_cap.addr64));
6895                if (!ret) {
6896                        dev_info(priv->device, "Using %d bits DMA width\n",
6897                                 priv->dma_cap.addr64);
6898
6899                        /*
6900                         * If more than 32 bits can be addressed, make sure to
6901                         * enable enhanced addressing mode.
6902                         */
6903                        if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
6904                                priv->plat->dma_cfg->eame = true;
6905                } else {
6906                        ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
6907                        if (ret) {
6908                                dev_err(priv->device, "Failed to set DMA Mask\n");
6909                                goto error_hw_init;
6910                        }
6911
6912                        priv->dma_cap.addr64 = 32;
6913                }
6914        }
6915
6916        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
6917        ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
6918#ifdef STMMAC_VLAN_TAG_USED
6919        /* Both mac100 and gmac support receive VLAN tag detection */
6920        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
6921        if (priv->dma_cap.vlhash) {
6922                ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6923                ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
6924        }
6925        if (priv->dma_cap.vlins) {
6926                ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
6927                if (priv->dma_cap.dvlan)
6928                        ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
6929        }
6930#endif
6931        priv->msg_enable = netif_msg_init(debug, default_msg_level);
6932
6933        /* Initialize RSS */
6934        rxq = priv->plat->rx_queues_to_use;
6935        netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
6936        for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
6937                priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
6938
6939        if (priv->dma_cap.rssen && priv->plat->rss_en)
6940                ndev->features |= NETIF_F_RXHASH;
6941
6942        /* MTU range: 46 - hw-specific max */
6943        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
6944        if (priv->plat->has_xgmac)
6945                ndev->max_mtu = XGMAC_JUMBO_LEN;
6946        else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
6947                ndev->max_mtu = JUMBO_LEN;
6948        else
6949                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
6950        /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
6951         * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
6952         */
6953        if ((priv->plat->maxmtu < ndev->max_mtu) &&
6954            (priv->plat->maxmtu >= ndev->min_mtu))
6955                ndev->max_mtu = priv->plat->maxmtu;
6956        else if (priv->plat->maxmtu < ndev->min_mtu)
6957                dev_warn(priv->device,
6958                         "%s: warning: maxmtu having invalid value (%d)\n",
6959                         __func__, priv->plat->maxmtu);
6960
6961        if (flow_ctrl)
6962                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
6963
6964        /* Setup channels NAPI */
6965        stmmac_napi_add(ndev);
6966
6967        mutex_init(&priv->lock);
6968
6969        /* If a specific clk_csr value is passed from the platform
6970         * this means that the CSR Clock Range selection cannot be
6971         * changed at run-time and it is fixed. Viceversa the driver'll try to
6972         * set the MDC clock dynamically according to the csr actual
6973         * clock input.
6974         */
6975        if (priv->plat->clk_csr >= 0)
6976                priv->clk_csr = priv->plat->clk_csr;
6977        else
6978                stmmac_clk_csr_set(priv);
6979
6980        stmmac_check_pcs_mode(priv);
6981
6982        pm_runtime_get_noresume(device);
6983        pm_runtime_set_active(device);
6984        pm_runtime_enable(device);
6985
6986        if (priv->hw->pcs != STMMAC_PCS_TBI &&
6987            priv->hw->pcs != STMMAC_PCS_RTBI) {
6988                /* MDIO bus Registration */
6989                ret = stmmac_mdio_register(ndev);
6990                if (ret < 0) {
6991                        dev_err(priv->device,
6992                                "%s: MDIO bus (id: %d) registration failed",
6993                                __func__, priv->plat->bus_id);
6994                        goto error_mdio_register;
6995                }
6996        }
6997
6998        if (priv->plat->speed_mode_2500)
6999                priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7000
7001        if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7002                ret = stmmac_xpcs_setup(priv->mii);
7003                if (ret)
7004                        goto error_xpcs_setup;
7005        }
7006
7007        ret = stmmac_phy_setup(priv);
7008        if (ret) {
7009                netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7010                goto error_phy_setup;
7011        }
7012
7013        ret = register_netdev(ndev);
7014        if (ret) {
7015                dev_err(priv->device, "%s: ERROR %i registering the device\n",
7016                        __func__, ret);
7017                goto error_netdev_register;
7018        }
7019
7020        if (priv->plat->serdes_powerup) {
7021                ret = priv->plat->serdes_powerup(ndev,
7022                                                 priv->plat->bsp_priv);
7023
7024                if (ret < 0)
7025                        goto error_serdes_powerup;
7026        }
7027
7028#ifdef CONFIG_DEBUG_FS
7029        stmmac_init_fs(ndev);
7030#endif
7031
7032        /* Let pm_runtime_put() disable the clocks.
7033         * If CONFIG_PM is not enabled, the clocks will stay powered.
7034         */
7035        pm_runtime_put(device);
7036
7037        return ret;
7038
7039error_serdes_powerup:
7040        unregister_netdev(ndev);
7041error_netdev_register:
7042        phylink_destroy(priv->phylink);
7043error_xpcs_setup:
7044error_phy_setup:
7045        if (priv->hw->pcs != STMMAC_PCS_TBI &&
7046            priv->hw->pcs != STMMAC_PCS_RTBI)
7047                stmmac_mdio_unregister(ndev);
7048error_mdio_register:
7049        stmmac_napi_del(ndev);
7050error_hw_init:
7051        destroy_workqueue(priv->wq);
7052        bitmap_free(priv->af_xdp_zc_qps);
7053
7054        return ret;
7055}
7056EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7057
7058/**
7059 * stmmac_dvr_remove
7060 * @dev: device pointer
7061 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7062 * changes the link status, releases the DMA descriptor rings.
7063 */
7064int stmmac_dvr_remove(struct device *dev)
7065{
7066        struct net_device *ndev = dev_get_drvdata(dev);
7067        struct stmmac_priv *priv = netdev_priv(ndev);
7068
7069        netdev_info(priv->dev, "%s: removing driver", __func__);
7070
7071        stmmac_stop_all_dma(priv);
7072        stmmac_mac_set(priv, priv->ioaddr, false);
7073        netif_carrier_off(ndev);
7074        unregister_netdev(ndev);
7075
7076        /* Serdes power down needs to happen after VLAN filter
7077         * is deleted that is triggered by unregister_netdev().
7078         */
7079        if (priv->plat->serdes_powerdown)
7080                priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7081
7082#ifdef CONFIG_DEBUG_FS
7083        stmmac_exit_fs(ndev);
7084#endif
7085        phylink_destroy(priv->phylink);
7086        if (priv->plat->stmmac_rst)
7087                reset_control_assert(priv->plat->stmmac_rst);
7088        reset_control_assert(priv->plat->stmmac_ahb_rst);
7089        pm_runtime_put(dev);
7090        pm_runtime_disable(dev);
7091        if (priv->hw->pcs != STMMAC_PCS_TBI &&
7092            priv->hw->pcs != STMMAC_PCS_RTBI)
7093                stmmac_mdio_unregister(ndev);
7094        destroy_workqueue(priv->wq);
7095        mutex_destroy(&priv->lock);
7096        bitmap_free(priv->af_xdp_zc_qps);
7097
7098        return 0;
7099}
7100EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7101
7102/**
7103 * stmmac_suspend - suspend callback
7104 * @dev: device pointer
7105 * Description: this is the function to suspend the device and it is called
7106 * by the platform driver to stop the network queue, release the resources,
7107 * program the PMT register (for WoL), clean and release driver resources.
7108 */
7109int stmmac_suspend(struct device *dev)
7110{
7111        struct net_device *ndev = dev_get_drvdata(dev);
7112        struct stmmac_priv *priv = netdev_priv(ndev);
7113        u32 chan;
7114        int ret;
7115
7116        if (!ndev || !netif_running(ndev))
7117                return 0;
7118
7119        phylink_mac_change(priv->phylink, false);
7120
7121        mutex_lock(&priv->lock);
7122
7123        netif_device_detach(ndev);
7124
7125        stmmac_disable_all_queues(priv);
7126
7127        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7128                hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7129
7130        if (priv->eee_enabled) {
7131                priv->tx_path_in_lpi_mode = false;
7132                del_timer_sync(&priv->eee_ctrl_timer);
7133        }
7134
7135        /* Stop TX/RX DMA */
7136        stmmac_stop_all_dma(priv);
7137
7138        if (priv->plat->serdes_powerdown)
7139                priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7140
7141        /* Enable Power down mode by programming the PMT regs */
7142        if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7143                stmmac_pmt(priv, priv->hw, priv->wolopts);
7144                priv->irq_wake = 1;
7145        } else {
7146                mutex_unlock(&priv->lock);
7147                rtnl_lock();
7148                if (device_may_wakeup(priv->device))
7149                        phylink_speed_down(priv->phylink, false);
7150                phylink_stop(priv->phylink);
7151                rtnl_unlock();
7152                mutex_lock(&priv->lock);
7153
7154                stmmac_mac_set(priv, priv->ioaddr, false);
7155                pinctrl_pm_select_sleep_state(priv->device);
7156                /* Disable clock in case of PWM is off */
7157                clk_disable_unprepare(priv->plat->clk_ptp_ref);
7158                ret = pm_runtime_force_suspend(dev);
7159                if (ret) {
7160                        mutex_unlock(&priv->lock);
7161                        return ret;
7162                }
7163        }
7164
7165        mutex_unlock(&priv->lock);
7166
7167        if (priv->dma_cap.fpesel) {
7168                /* Disable FPE */
7169                stmmac_fpe_configure(priv, priv->ioaddr,
7170                                     priv->plat->tx_queues_to_use,
7171                                     priv->plat->rx_queues_to_use, false);
7172
7173                stmmac_fpe_handshake(priv, false);
7174                stmmac_fpe_stop_wq(priv);
7175        }
7176
7177        priv->speed = SPEED_UNKNOWN;
7178        return 0;
7179}
7180EXPORT_SYMBOL_GPL(stmmac_suspend);
7181
7182/**
7183 * stmmac_reset_queues_param - reset queue parameters
7184 * @priv: device pointer
7185 */
7186static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7187{
7188        u32 rx_cnt = priv->plat->rx_queues_to_use;
7189        u32 tx_cnt = priv->plat->tx_queues_to_use;
7190        u32 queue;
7191
7192        for (queue = 0; queue < rx_cnt; queue++) {
7193                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7194
7195                rx_q->cur_rx = 0;
7196                rx_q->dirty_rx = 0;
7197        }
7198
7199        for (queue = 0; queue < tx_cnt; queue++) {
7200                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7201
7202                tx_q->cur_tx = 0;
7203                tx_q->dirty_tx = 0;
7204                tx_q->mss = 0;
7205
7206                netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7207        }
7208}
7209
7210/**
7211 * stmmac_resume - resume callback
7212 * @dev: device pointer
7213 * Description: when resume this function is invoked to setup the DMA and CORE
7214 * in a usable state.
7215 */
7216int stmmac_resume(struct device *dev)
7217{
7218        struct net_device *ndev = dev_get_drvdata(dev);
7219        struct stmmac_priv *priv = netdev_priv(ndev);
7220        int ret;
7221
7222        if (!netif_running(ndev))
7223                return 0;
7224
7225        /* Power Down bit, into the PM register, is cleared
7226         * automatically as soon as a magic packet or a Wake-up frame
7227         * is received. Anyway, it's better to manually clear
7228         * this bit because it can generate problems while resuming
7229         * from another devices (e.g. serial console).
7230         */
7231        if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7232                mutex_lock(&priv->lock);
7233                stmmac_pmt(priv, priv->hw, 0);
7234                mutex_unlock(&priv->lock);
7235                priv->irq_wake = 0;
7236        } else {
7237                pinctrl_pm_select_default_state(priv->device);
7238                /* enable the clk previously disabled */
7239                ret = pm_runtime_force_resume(dev);
7240                if (ret)
7241                        return ret;
7242                if (priv->plat->clk_ptp_ref)
7243                        clk_prepare_enable(priv->plat->clk_ptp_ref);
7244                /* reset the phy so that it's ready */
7245                if (priv->mii)
7246                        stmmac_mdio_reset(priv->mii);
7247        }
7248
7249        if (priv->plat->serdes_powerup) {
7250                ret = priv->plat->serdes_powerup(ndev,
7251                                                 priv->plat->bsp_priv);
7252
7253                if (ret < 0)
7254                        return ret;
7255        }
7256
7257        if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
7258                rtnl_lock();
7259                phylink_start(priv->phylink);
7260                /* We may have called phylink_speed_down before */
7261                phylink_speed_up(priv->phylink);
7262                rtnl_unlock();
7263        }
7264
7265        rtnl_lock();
7266        mutex_lock(&priv->lock);
7267
7268        stmmac_reset_queues_param(priv);
7269
7270        stmmac_free_tx_skbufs(priv);
7271        stmmac_clear_descriptors(priv);
7272
7273        stmmac_hw_setup(ndev, false);
7274        stmmac_init_coalesce(priv);
7275        stmmac_set_rx_mode(ndev);
7276
7277        stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7278
7279        stmmac_enable_all_queues(priv);
7280
7281        mutex_unlock(&priv->lock);
7282        rtnl_unlock();
7283
7284        phylink_mac_change(priv->phylink, true);
7285
7286        netif_device_attach(ndev);
7287
7288        return 0;
7289}
7290EXPORT_SYMBOL_GPL(stmmac_resume);
7291
7292#ifndef MODULE
7293static int __init stmmac_cmdline_opt(char *str)
7294{
7295        char *opt;
7296
7297        if (!str || !*str)
7298                return -EINVAL;
7299        while ((opt = strsep(&str, ",")) != NULL) {
7300                if (!strncmp(opt, "debug:", 6)) {
7301                        if (kstrtoint(opt + 6, 0, &debug))
7302                                goto err;
7303                } else if (!strncmp(opt, "phyaddr:", 8)) {
7304                        if (kstrtoint(opt + 8, 0, &phyaddr))
7305                                goto err;
7306                } else if (!strncmp(opt, "buf_sz:", 7)) {
7307                        if (kstrtoint(opt + 7, 0, &buf_sz))
7308                                goto err;
7309                } else if (!strncmp(opt, "tc:", 3)) {
7310                        if (kstrtoint(opt + 3, 0, &tc))
7311                                goto err;
7312                } else if (!strncmp(opt, "watchdog:", 9)) {
7313                        if (kstrtoint(opt + 9, 0, &watchdog))
7314                                goto err;
7315                } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7316                        if (kstrtoint(opt + 10, 0, &flow_ctrl))
7317                                goto err;
7318                } else if (!strncmp(opt, "pause:", 6)) {
7319                        if (kstrtoint(opt + 6, 0, &pause))
7320                                goto err;
7321                } else if (!strncmp(opt, "eee_timer:", 10)) {
7322                        if (kstrtoint(opt + 10, 0, &eee_timer))
7323                                goto err;
7324                } else if (!strncmp(opt, "chain_mode:", 11)) {
7325                        if (kstrtoint(opt + 11, 0, &chain_mode))
7326                                goto err;
7327                }
7328        }
7329        return 0;
7330
7331err:
7332        pr_err("%s: ERROR broken module parameter conversion", __func__);
7333        return -EINVAL;
7334}
7335
7336__setup("stmmaceth=", stmmac_cmdline_opt);
7337#endif /* MODULE */
7338
7339static int __init stmmac_init(void)
7340{
7341#ifdef CONFIG_DEBUG_FS
7342        /* Create debugfs main directory if it doesn't exist yet */
7343        if (!stmmac_fs_dir)
7344                stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7345        register_netdevice_notifier(&stmmac_notifier);
7346#endif
7347
7348        return 0;
7349}
7350
7351static void __exit stmmac_exit(void)
7352{
7353#ifdef CONFIG_DEBUG_FS
7354        unregister_netdevice_notifier(&stmmac_notifier);
7355        debugfs_remove_recursive(stmmac_fs_dir);
7356#endif
7357}
7358
7359module_init(stmmac_init)
7360module_exit(stmmac_exit)
7361
7362MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7363MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7364MODULE_LICENSE("GPL");
7365