linux/drivers/net/ethernet/ti/cpsw_new.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Texas Instruments Ethernet Switch Driver
   4 *
   5 * Copyright (C) 2019 Texas Instruments
   6 */
   7
   8#include <linux/io.h>
   9#include <linux/clk.h>
  10#include <linux/timer.h>
  11#include <linux/module.h>
  12#include <linux/irqreturn.h>
  13#include <linux/interrupt.h>
  14#include <linux/if_ether.h>
  15#include <linux/etherdevice.h>
  16#include <linux/net_tstamp.h>
  17#include <linux/phy.h>
  18#include <linux/phy/phy.h>
  19#include <linux/delay.h>
  20#include <linux/pinctrl/consumer.h>
  21#include <linux/pm_runtime.h>
  22#include <linux/gpio/consumer.h>
  23#include <linux/of.h>
  24#include <linux/of_mdio.h>
  25#include <linux/of_net.h>
  26#include <linux/of_device.h>
  27#include <linux/if_vlan.h>
  28#include <linux/kmemleak.h>
  29#include <linux/sys_soc.h>
  30
  31#include <net/page_pool.h>
  32#include <net/pkt_cls.h>
  33#include <net/devlink.h>
  34
  35#include "cpsw.h"
  36#include "cpsw_ale.h"
  37#include "cpsw_priv.h"
  38#include "cpsw_sl.h"
  39#include "cpsw_switchdev.h"
  40#include "cpts.h"
  41#include "davinci_cpdma.h"
  42
  43#include <net/pkt_sched.h>
  44
  45static int debug_level;
  46static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
  47static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
  48static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
  49
  50struct cpsw_devlink {
  51        struct cpsw_common *cpsw;
  52};
  53
  54enum cpsw_devlink_param_id {
  55        CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
  56        CPSW_DL_PARAM_SWITCH_MODE,
  57        CPSW_DL_PARAM_ALE_BYPASS,
  58};
  59
  60/* struct cpsw_common is not needed, kept here for compatibility
  61 * reasons witrh the old driver
  62 */
  63static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
  64                                 struct cpsw_priv *priv)
  65{
  66        if (priv->emac_port == HOST_PORT_NUM)
  67                return -1;
  68
  69        return priv->emac_port - 1;
  70}
  71
  72static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
  73{
  74        return !cpsw->data.dual_emac;
  75}
  76
  77static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
  78{
  79        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
  80        bool enable_uni = false;
  81        int i;
  82
  83        if (cpsw_is_switch_en(cpsw))
  84                return;
  85
  86        /* Enabling promiscuous mode for one interface will be
  87         * common for both the interface as the interface shares
  88         * the same hardware resource.
  89         */
  90        for (i = 0; i < cpsw->data.slaves; i++)
  91                if (cpsw->slaves[i].ndev &&
  92                    (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
  93                        enable_uni = true;
  94
  95        if (!enable && enable_uni) {
  96                enable = enable_uni;
  97                dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
  98        }
  99
 100        if (enable) {
 101                /* Enable unknown unicast, reg/unreg mcast */
 102                cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 103                                     ALE_P0_UNI_FLOOD, 1);
 104
 105                dev_dbg(cpsw->dev, "promiscuity enabled\n");
 106        } else {
 107                /* Disable unknown unicast */
 108                cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 109                                     ALE_P0_UNI_FLOOD, 0);
 110                dev_dbg(cpsw->dev, "promiscuity disabled\n");
 111        }
 112}
 113
 114/**
 115 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
 116 * if it's not deleted
 117 * @ndev: device to sync
 118 * @addr: address to be added or deleted
 119 * @vid: vlan id, if vid < 0 set/unset address for real device
 120 * @add: add address if the flag is set or remove otherwise
 121 */
 122static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
 123                       int vid, int add)
 124{
 125        struct cpsw_priv *priv = netdev_priv(ndev);
 126        struct cpsw_common *cpsw = priv->cpsw;
 127        int mask, flags, ret, slave_no;
 128
 129        slave_no = cpsw_slave_index(cpsw, priv);
 130        if (vid < 0)
 131                vid = cpsw->slaves[slave_no].port_vlan;
 132
 133        mask =  ALE_PORT_HOST;
 134        flags = vid ? ALE_VLAN : 0;
 135
 136        if (add)
 137                ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
 138        else
 139                ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
 140
 141        return ret;
 142}
 143
 144static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
 145{
 146        struct addr_sync_ctx *sync_ctx = ctx;
 147        struct netdev_hw_addr *ha;
 148        int found = 0, ret = 0;
 149
 150        if (!vdev || !(vdev->flags & IFF_UP))
 151                return 0;
 152
 153        /* vlan address is relevant if its sync_cnt != 0 */
 154        netdev_for_each_mc_addr(ha, vdev) {
 155                if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
 156                        found = ha->sync_cnt;
 157                        break;
 158                }
 159        }
 160
 161        if (found)
 162                sync_ctx->consumed++;
 163
 164        if (sync_ctx->flush) {
 165                if (!found)
 166                        cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
 167                return 0;
 168        }
 169
 170        if (found)
 171                ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
 172
 173        return ret;
 174}
 175
 176static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
 177{
 178        struct addr_sync_ctx sync_ctx;
 179        int ret;
 180
 181        sync_ctx.consumed = 0;
 182        sync_ctx.addr = addr;
 183        sync_ctx.ndev = ndev;
 184        sync_ctx.flush = 0;
 185
 186        ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
 187        if (sync_ctx.consumed < num && !ret)
 188                ret = cpsw_set_mc(ndev, addr, -1, 1);
 189
 190        return ret;
 191}
 192
 193static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
 194{
 195        struct addr_sync_ctx sync_ctx;
 196
 197        sync_ctx.consumed = 0;
 198        sync_ctx.addr = addr;
 199        sync_ctx.ndev = ndev;
 200        sync_ctx.flush = 1;
 201
 202        vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
 203        if (sync_ctx.consumed == num)
 204                cpsw_set_mc(ndev, addr, -1, 0);
 205
 206        return 0;
 207}
 208
 209static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
 210{
 211        struct addr_sync_ctx *sync_ctx = ctx;
 212        struct netdev_hw_addr *ha;
 213        int found = 0;
 214
 215        if (!vdev || !(vdev->flags & IFF_UP))
 216                return 0;
 217
 218        /* vlan address is relevant if its sync_cnt != 0 */
 219        netdev_for_each_mc_addr(ha, vdev) {
 220                if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
 221                        found = ha->sync_cnt;
 222                        break;
 223                }
 224        }
 225
 226        if (!found)
 227                return 0;
 228
 229        sync_ctx->consumed++;
 230        cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
 231        return 0;
 232}
 233
 234static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
 235{
 236        struct addr_sync_ctx sync_ctx;
 237
 238        sync_ctx.addr = addr;
 239        sync_ctx.ndev = ndev;
 240        sync_ctx.consumed = 0;
 241
 242        vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
 243        if (sync_ctx.consumed < num)
 244                cpsw_set_mc(ndev, addr, -1, 0);
 245
 246        return 0;
 247}
 248
 249static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 250{
 251        struct cpsw_priv *priv = netdev_priv(ndev);
 252        struct cpsw_common *cpsw = priv->cpsw;
 253
 254        if (ndev->flags & IFF_PROMISC) {
 255                /* Enable promiscuous mode */
 256                cpsw_set_promiscious(ndev, true);
 257                cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
 258                return;
 259        }
 260
 261        /* Disable promiscuous mode */
 262        cpsw_set_promiscious(ndev, false);
 263
 264        /* Restore allmulti on vlans if necessary */
 265        cpsw_ale_set_allmulti(cpsw->ale,
 266                              ndev->flags & IFF_ALLMULTI, priv->emac_port);
 267
 268        /* add/remove mcast address either for real netdev or for vlan */
 269        __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
 270                               cpsw_del_mc_addr);
 271}
 272
 273static unsigned int cpsw_rxbuf_total_len(unsigned int len)
 274{
 275        len += CPSW_HEADROOM;
 276        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 277
 278        return SKB_DATA_ALIGN(len);
 279}
 280
 281static void cpsw_rx_handler(void *token, int len, int status)
 282{
 283        struct page *new_page, *page = token;
 284        void *pa = page_address(page);
 285        int headroom = CPSW_HEADROOM;
 286        struct cpsw_meta_xdp *xmeta;
 287        struct cpsw_common *cpsw;
 288        struct net_device *ndev;
 289        int port, ch, pkt_size;
 290        struct cpsw_priv *priv;
 291        struct page_pool *pool;
 292        struct sk_buff *skb;
 293        struct xdp_buff xdp;
 294        int ret = 0;
 295        dma_addr_t dma;
 296
 297        xmeta = pa + CPSW_XMETA_OFFSET;
 298        cpsw = ndev_to_cpsw(xmeta->ndev);
 299        ndev = xmeta->ndev;
 300        pkt_size = cpsw->rx_packet_max;
 301        ch = xmeta->ch;
 302
 303        if (status >= 0) {
 304                port = CPDMA_RX_SOURCE_PORT(status);
 305                if (port)
 306                        ndev = cpsw->slaves[--port].ndev;
 307        }
 308
 309        priv = netdev_priv(ndev);
 310        pool = cpsw->page_pool[ch];
 311
 312        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
 313                /* In dual emac mode check for all interfaces */
 314                if (cpsw->usage_count && status >= 0) {
 315                        /* The packet received is for the interface which
 316                         * is already down and the other interface is up
 317                         * and running, instead of freeing which results
 318                         * in reducing of the number of rx descriptor in
 319                         * DMA engine, requeue page back to cpdma.
 320                         */
 321                        new_page = page;
 322                        goto requeue;
 323                }
 324
 325                /* the interface is going down, pages are purged */
 326                page_pool_recycle_direct(pool, page);
 327                return;
 328        }
 329
 330        new_page = page_pool_dev_alloc_pages(pool);
 331        if (unlikely(!new_page)) {
 332                new_page = page;
 333                ndev->stats.rx_dropped++;
 334                goto requeue;
 335        }
 336
 337        if (priv->xdp_prog) {
 338                int headroom = CPSW_HEADROOM, size = len;
 339
 340                xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
 341                if (status & CPDMA_RX_VLAN_ENCAP) {
 342                        headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
 343                        size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
 344                }
 345
 346                xdp_prepare_buff(&xdp, pa, headroom, size, false);
 347
 348                ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
 349                if (ret != CPSW_XDP_PASS)
 350                        goto requeue;
 351
 352                headroom = xdp.data - xdp.data_hard_start;
 353
 354                /* XDP prog can modify vlan tag, so can't use encap header */
 355                status &= ~CPDMA_RX_VLAN_ENCAP;
 356        }
 357
 358        /* pass skb to netstack if no XDP prog or returned XDP_PASS */
 359        skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
 360        if (!skb) {
 361                ndev->stats.rx_dropped++;
 362                page_pool_recycle_direct(pool, page);
 363                goto requeue;
 364        }
 365
 366        skb->offload_fwd_mark = priv->offload_fwd_mark;
 367        skb_reserve(skb, headroom);
 368        skb_put(skb, len);
 369        skb->dev = ndev;
 370        if (status & CPDMA_RX_VLAN_ENCAP)
 371                cpsw_rx_vlan_encap(skb);
 372        if (priv->rx_ts_enabled)
 373                cpts_rx_timestamp(cpsw->cpts, skb);
 374        skb->protocol = eth_type_trans(skb, ndev);
 375
 376        /* unmap page as no netstack skb page recycling */
 377        page_pool_release_page(pool, page);
 378        netif_receive_skb(skb);
 379
 380        ndev->stats.rx_bytes += len;
 381        ndev->stats.rx_packets++;
 382
 383requeue:
 384        xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
 385        xmeta->ndev = ndev;
 386        xmeta->ch = ch;
 387
 388        dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
 389        ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
 390                                       pkt_size, 0);
 391        if (ret < 0) {
 392                WARN_ON(ret == -ENOMEM);
 393                page_pool_recycle_direct(pool, new_page);
 394        }
 395}
 396
 397static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
 398                                   unsigned short vid)
 399{
 400        struct cpsw_common *cpsw = priv->cpsw;
 401        int unreg_mcast_mask = 0;
 402        int mcast_mask;
 403        u32 port_mask;
 404        int ret;
 405
 406        port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
 407
 408        mcast_mask = ALE_PORT_HOST;
 409        if (priv->ndev->flags & IFF_ALLMULTI)
 410                unreg_mcast_mask = mcast_mask;
 411
 412        ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
 413                                unreg_mcast_mask);
 414        if (ret != 0)
 415                return ret;
 416
 417        ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 418                                 HOST_PORT_NUM, ALE_VLAN, vid);
 419        if (ret != 0)
 420                goto clean_vid;
 421
 422        ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 423                                 mcast_mask, ALE_VLAN, vid, 0);
 424        if (ret != 0)
 425                goto clean_vlan_ucast;
 426        return 0;
 427
 428clean_vlan_ucast:
 429        cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
 430                           HOST_PORT_NUM, ALE_VLAN, vid);
 431clean_vid:
 432        cpsw_ale_del_vlan(cpsw->ale, vid, 0);
 433        return ret;
 434}
 435
 436static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
 437                                    __be16 proto, u16 vid)
 438{
 439        struct cpsw_priv *priv = netdev_priv(ndev);
 440        struct cpsw_common *cpsw = priv->cpsw;
 441        int ret, i;
 442
 443        if (cpsw_is_switch_en(cpsw)) {
 444                dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
 445                return 0;
 446        }
 447
 448        if (vid == cpsw->data.default_vlan)
 449                return 0;
 450
 451        ret = pm_runtime_get_sync(cpsw->dev);
 452        if (ret < 0) {
 453                pm_runtime_put_noidle(cpsw->dev);
 454                return ret;
 455        }
 456
 457        /* In dual EMAC, reserved VLAN id should not be used for
 458         * creating VLAN interfaces as this can break the dual
 459         * EMAC port separation
 460         */
 461        for (i = 0; i < cpsw->data.slaves; i++) {
 462                if (cpsw->slaves[i].ndev &&
 463                    vid == cpsw->slaves[i].port_vlan) {
 464                        ret = -EINVAL;
 465                        goto err;
 466                }
 467        }
 468
 469        dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
 470        ret = cpsw_add_vlan_ale_entry(priv, vid);
 471err:
 472        pm_runtime_put(cpsw->dev);
 473        return ret;
 474}
 475
 476static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
 477{
 478        struct cpsw_priv *priv = arg;
 479
 480        if (!vdev || !vid)
 481                return 0;
 482
 483        cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
 484        return 0;
 485}
 486
 487/* restore resources after port reset */
 488static void cpsw_restore(struct cpsw_priv *priv)
 489{
 490        struct cpsw_common *cpsw = priv->cpsw;
 491
 492        /* restore vlan configurations */
 493        vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
 494
 495        /* restore MQPRIO offload */
 496        cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
 497
 498        /* restore CBS offload */
 499        cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
 500}
 501
 502static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
 503{
 504        char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
 505
 506        cpsw_ale_add_mcast(cpsw->ale, stpa,
 507                           ALE_PORT_HOST, ALE_SUPER, 0,
 508                           ALE_MCAST_BLOCK_LEARN_FWD);
 509}
 510
 511static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
 512{
 513        int vlan = cpsw->data.default_vlan;
 514
 515        writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
 516
 517        writel(vlan, &cpsw->host_port_regs->port_vlan);
 518
 519        cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
 520                          ALE_ALL_PORTS, ALE_ALL_PORTS,
 521                          ALE_PORT_1 | ALE_PORT_2);
 522
 523        cpsw_init_stp_ale_entry(cpsw);
 524
 525        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
 526        dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
 527        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
 528}
 529
 530static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
 531{
 532        int vlan = cpsw->data.default_vlan;
 533
 534        writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
 535
 536        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
 537        dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
 538
 539        writel(vlan, &cpsw->host_port_regs->port_vlan);
 540
 541        cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 542        /* learning make no sense in dual_mac mode */
 543        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
 544}
 545
 546static void cpsw_init_host_port(struct cpsw_priv *priv)
 547{
 548        struct cpsw_common *cpsw = priv->cpsw;
 549        u32 control_reg;
 550
 551        /* soft reset the controller and initialize ale */
 552        soft_reset("cpsw", &cpsw->regs->soft_reset);
 553        cpsw_ale_start(cpsw->ale);
 554
 555        /* switch to vlan unaware mode */
 556        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
 557                             CPSW_ALE_VLAN_AWARE);
 558        control_reg = readl(&cpsw->regs->control);
 559        control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
 560        writel(control_reg, &cpsw->regs->control);
 561
 562        /* setup host port priority mapping */
 563        writel_relaxed(CPDMA_TX_PRIORITY_MAP,
 564                       &cpsw->host_port_regs->cpdma_tx_pri_map);
 565        writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
 566
 567        /* disable priority elevation */
 568        writel_relaxed(0, &cpsw->regs->ptype);
 569
 570        /* enable statistics collection only on all ports */
 571        writel_relaxed(0x7, &cpsw->regs->stat_port_en);
 572
 573        /* Enable internal fifo flow control */
 574        writel(0x7, &cpsw->regs->flow_control);
 575
 576        if (cpsw_is_switch_en(cpsw))
 577                cpsw_init_host_port_switch(cpsw);
 578        else
 579                cpsw_init_host_port_dual_mac(cpsw);
 580
 581        cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 582                             ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 583}
 584
 585static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
 586                                                    struct cpsw_slave *slave)
 587{
 588        u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
 589        struct cpsw_common *cpsw = priv->cpsw;
 590        u32 reg;
 591
 592        reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
 593               CPSW2_PORT_VLAN;
 594        slave_write(slave, slave->port_vlan, reg);
 595
 596        cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
 597                          port_mask, port_mask, 0);
 598        cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 599                           ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
 600                           ALE_MCAST_FWD);
 601        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 602                           HOST_PORT_NUM, ALE_VLAN |
 603                           ALE_SECURE, slave->port_vlan);
 604        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 605                             ALE_PORT_DROP_UNKNOWN_VLAN, 1);
 606        /* learning make no sense in dual_mac mode */
 607        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 608                             ALE_PORT_NOLEARN, 1);
 609}
 610
 611static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
 612                                                 struct cpsw_slave *slave)
 613{
 614        u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
 615        struct cpsw_common *cpsw = priv->cpsw;
 616        u32 reg;
 617
 618        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 619                             ALE_PORT_DROP_UNKNOWN_VLAN, 0);
 620        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 621                             ALE_PORT_NOLEARN, 0);
 622        /* disabling SA_UPDATE required to make stp work, without this setting
 623         * Host MAC addresses will jump between ports.
 624         * As per TRM MAC address can be defined as unicast supervisory (super)
 625         * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
 626         * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
 627         * causes STP packets to be dropped due to ingress filter
 628         *      if (source address found) and (secure) and
 629         *         (receive port number != port_number))
 630         *         then discard the packet
 631         */
 632        cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 633                             ALE_PORT_NO_SA_UPDATE, 1);
 634
 635        cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
 636                           port_mask, ALE_VLAN, slave->port_vlan,
 637                           ALE_MCAST_FWD_2);
 638        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
 639                           HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
 640
 641        reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
 642               CPSW2_PORT_VLAN;
 643        slave_write(slave, slave->port_vlan, reg);
 644}
 645
 646static void cpsw_adjust_link(struct net_device *ndev)
 647{
 648        struct cpsw_priv *priv = netdev_priv(ndev);
 649        struct cpsw_common *cpsw = priv->cpsw;
 650        struct cpsw_slave *slave;
 651        struct phy_device *phy;
 652        u32 mac_control = 0;
 653
 654        slave = &cpsw->slaves[priv->emac_port - 1];
 655        phy = slave->phy;
 656
 657        if (!phy)
 658                return;
 659
 660        if (phy->link) {
 661                mac_control = CPSW_SL_CTL_GMII_EN;
 662
 663                if (phy->speed == 1000)
 664                        mac_control |= CPSW_SL_CTL_GIG;
 665                if (phy->duplex)
 666                        mac_control |= CPSW_SL_CTL_FULLDUPLEX;
 667
 668                /* set speed_in input in case RMII mode is used in 100Mbps */
 669                if (phy->speed == 100)
 670                        mac_control |= CPSW_SL_CTL_IFCTL_A;
 671                /* in band mode only works in 10Mbps RGMII mode */
 672                else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
 673                        mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
 674
 675                if (priv->rx_pause)
 676                        mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
 677
 678                if (priv->tx_pause)
 679                        mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
 680
 681                if (mac_control != slave->mac_control)
 682                        cpsw_sl_ctl_set(slave->mac_sl, mac_control);
 683
 684                /* enable forwarding */
 685                cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 686                                     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
 687
 688                netif_tx_wake_all_queues(ndev);
 689
 690                if (priv->shp_cfg_speed &&
 691                    priv->shp_cfg_speed != slave->phy->speed &&
 692                    !cpsw_shp_is_off(priv))
 693                        dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
 694        } else {
 695                netif_tx_stop_all_queues(ndev);
 696
 697                mac_control = 0;
 698                /* disable forwarding */
 699                cpsw_ale_control_set(cpsw->ale, priv->emac_port,
 700                                     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
 701
 702                cpsw_sl_wait_for_idle(slave->mac_sl, 100);
 703
 704                cpsw_sl_ctl_reset(slave->mac_sl);
 705        }
 706
 707        if (mac_control != slave->mac_control)
 708                phy_print_status(phy);
 709
 710        slave->mac_control = mac_control;
 711
 712        if (phy->link && cpsw_need_resplit(cpsw))
 713                cpsw_split_res(cpsw);
 714}
 715
 716static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 717{
 718        struct cpsw_common *cpsw = priv->cpsw;
 719        struct phy_device *phy;
 720
 721        cpsw_sl_reset(slave->mac_sl, 100);
 722        cpsw_sl_ctl_reset(slave->mac_sl);
 723
 724        /* setup priority mapping */
 725        cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
 726                          RX_PRIORITY_MAPPING);
 727
 728        switch (cpsw->version) {
 729        case CPSW_VERSION_1:
 730                slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
 731                /* Increase RX FIFO size to 5 for supporting fullduplex
 732                 * flow control mode
 733                 */
 734                slave_write(slave,
 735                            (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
 736                            CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
 737                break;
 738        case CPSW_VERSION_2:
 739        case CPSW_VERSION_3:
 740        case CPSW_VERSION_4:
 741                slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
 742                /* Increase RX FIFO size to 5 for supporting fullduplex
 743                 * flow control mode
 744                 */
 745                slave_write(slave,
 746                            (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
 747                            CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
 748                break;
 749        }
 750
 751        /* setup max packet size, and mac address */
 752        cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
 753                          cpsw->rx_packet_max);
 754        cpsw_set_slave_mac(slave, priv);
 755
 756        slave->mac_control = 0; /* no link yet */
 757
 758        if (cpsw_is_switch_en(cpsw))
 759                cpsw_port_add_switch_def_ale_entries(priv, slave);
 760        else
 761                cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
 762
 763        if (!slave->data->phy_node)
 764                dev_err(priv->dev, "no phy found on slave %d\n",
 765                        slave->slave_num);
 766        phy = of_phy_connect(priv->ndev, slave->data->phy_node,
 767                             &cpsw_adjust_link, 0, slave->data->phy_if);
 768        if (!phy) {
 769                dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
 770                        slave->data->phy_node,
 771                        slave->slave_num);
 772                return;
 773        }
 774        slave->phy = phy;
 775
 776        phy_attached_info(slave->phy);
 777
 778        phy_start(slave->phy);
 779
 780        /* Configure GMII_SEL register */
 781        phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
 782                         slave->data->phy_if);
 783}
 784
 785static int cpsw_ndo_stop(struct net_device *ndev)
 786{
 787        struct cpsw_priv *priv = netdev_priv(ndev);
 788        struct cpsw_common *cpsw = priv->cpsw;
 789        struct cpsw_slave *slave;
 790
 791        cpsw_info(priv, ifdown, "shutting down ndev\n");
 792        slave = &cpsw->slaves[priv->emac_port - 1];
 793        if (slave->phy)
 794                phy_stop(slave->phy);
 795
 796        netif_tx_stop_all_queues(priv->ndev);
 797
 798        if (slave->phy) {
 799                phy_disconnect(slave->phy);
 800                slave->phy = NULL;
 801        }
 802
 803        __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
 804
 805        if (cpsw->usage_count <= 1) {
 806                napi_disable(&cpsw->napi_rx);
 807                napi_disable(&cpsw->napi_tx);
 808                cpts_unregister(cpsw->cpts);
 809                cpsw_intr_disable(cpsw);
 810                cpdma_ctlr_stop(cpsw->dma);
 811                cpsw_ale_stop(cpsw->ale);
 812                cpsw_destroy_xdp_rxqs(cpsw);
 813        }
 814
 815        if (cpsw_need_resplit(cpsw))
 816                cpsw_split_res(cpsw);
 817
 818        cpsw->usage_count--;
 819        pm_runtime_put_sync(cpsw->dev);
 820        return 0;
 821}
 822
 823static int cpsw_ndo_open(struct net_device *ndev)
 824{
 825        struct cpsw_priv *priv = netdev_priv(ndev);
 826        struct cpsw_common *cpsw = priv->cpsw;
 827        int ret;
 828
 829        dev_info(priv->dev, "starting ndev. mode: %s\n",
 830                 cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
 831        ret = pm_runtime_get_sync(cpsw->dev);
 832        if (ret < 0) {
 833                pm_runtime_put_noidle(cpsw->dev);
 834                return ret;
 835        }
 836
 837        /* Notify the stack of the actual queue counts. */
 838        ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
 839        if (ret) {
 840                dev_err(priv->dev, "cannot set real number of tx queues\n");
 841                goto pm_cleanup;
 842        }
 843
 844        ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
 845        if (ret) {
 846                dev_err(priv->dev, "cannot set real number of rx queues\n");
 847                goto pm_cleanup;
 848        }
 849
 850        /* Initialize host and slave ports */
 851        if (!cpsw->usage_count)
 852                cpsw_init_host_port(priv);
 853        cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
 854
 855        /* initialize shared resources for every ndev */
 856        if (!cpsw->usage_count) {
 857                /* create rxqs for both infs in dual mac as they use same pool
 858                 * and must be destroyed together when no users.
 859                 */
 860                ret = cpsw_create_xdp_rxqs(cpsw);
 861                if (ret < 0)
 862                        goto err_cleanup;
 863
 864                ret = cpsw_fill_rx_channels(priv);
 865                if (ret < 0)
 866                        goto err_cleanup;
 867
 868                if (cpsw->cpts) {
 869                        if (cpts_register(cpsw->cpts))
 870                                dev_err(priv->dev, "error registering cpts device\n");
 871                        else
 872                                writel(0x10, &cpsw->wr_regs->misc_en);
 873                }
 874
 875                napi_enable(&cpsw->napi_rx);
 876                napi_enable(&cpsw->napi_tx);
 877
 878                if (cpsw->tx_irq_disabled) {
 879                        cpsw->tx_irq_disabled = false;
 880                        enable_irq(cpsw->irqs_table[1]);
 881                }
 882
 883                if (cpsw->rx_irq_disabled) {
 884                        cpsw->rx_irq_disabled = false;
 885                        enable_irq(cpsw->irqs_table[0]);
 886                }
 887        }
 888
 889        cpsw_restore(priv);
 890
 891        /* Enable Interrupt pacing if configured */
 892        if (cpsw->coal_intvl != 0) {
 893                struct ethtool_coalesce coal;
 894
 895                coal.rx_coalesce_usecs = cpsw->coal_intvl;
 896                cpsw_set_coalesce(ndev, &coal);
 897        }
 898
 899        cpdma_ctlr_start(cpsw->dma);
 900        cpsw_intr_enable(cpsw);
 901        cpsw->usage_count++;
 902
 903        return 0;
 904
 905err_cleanup:
 906        cpsw_ndo_stop(ndev);
 907
 908pm_cleanup:
 909        pm_runtime_put_sync(cpsw->dev);
 910        return ret;
 911}
 912
 913static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 914                                       struct net_device *ndev)
 915{
 916        struct cpsw_priv *priv = netdev_priv(ndev);
 917        struct cpsw_common *cpsw = priv->cpsw;
 918        struct cpts *cpts = cpsw->cpts;
 919        struct netdev_queue *txq;
 920        struct cpdma_chan *txch;
 921        int ret, q_idx;
 922
 923        if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
 924                cpsw_err(priv, tx_err, "packet pad failed\n");
 925                ndev->stats.tx_dropped++;
 926                return NET_XMIT_DROP;
 927        }
 928
 929        if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 930            priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
 931                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 932
 933        q_idx = skb_get_queue_mapping(skb);
 934        if (q_idx >= cpsw->tx_ch_num)
 935                q_idx = q_idx % cpsw->tx_ch_num;
 936
 937        txch = cpsw->txv[q_idx].ch;
 938        txq = netdev_get_tx_queue(ndev, q_idx);
 939        skb_tx_timestamp(skb);
 940        ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
 941                                priv->emac_port);
 942        if (unlikely(ret != 0)) {
 943                cpsw_err(priv, tx_err, "desc submit failed\n");
 944                goto fail;
 945        }
 946
 947        /* If there is no more tx desc left free then we need to
 948         * tell the kernel to stop sending us tx frames.
 949         */
 950        if (unlikely(!cpdma_check_free_tx_desc(txch))) {
 951                netif_tx_stop_queue(txq);
 952
 953                /* Barrier, so that stop_queue visible to other cpus */
 954                smp_mb__after_atomic();
 955
 956                if (cpdma_check_free_tx_desc(txch))
 957                        netif_tx_wake_queue(txq);
 958        }
 959
 960        return NETDEV_TX_OK;
 961fail:
 962        ndev->stats.tx_dropped++;
 963        netif_tx_stop_queue(txq);
 964
 965        /* Barrier, so that stop_queue visible to other cpus */
 966        smp_mb__after_atomic();
 967
 968        if (cpdma_check_free_tx_desc(txch))
 969                netif_tx_wake_queue(txq);
 970
 971        return NETDEV_TX_BUSY;
 972}
 973
 974static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
 975{
 976        struct sockaddr *addr = (struct sockaddr *)p;
 977        struct cpsw_priv *priv = netdev_priv(ndev);
 978        struct cpsw_common *cpsw = priv->cpsw;
 979        int ret, slave_no;
 980        int flags = 0;
 981        u16 vid = 0;
 982
 983        slave_no = cpsw_slave_index(cpsw, priv);
 984        if (!is_valid_ether_addr(addr->sa_data))
 985                return -EADDRNOTAVAIL;
 986
 987        ret = pm_runtime_get_sync(cpsw->dev);
 988        if (ret < 0) {
 989                pm_runtime_put_noidle(cpsw->dev);
 990                return ret;
 991        }
 992
 993        vid = cpsw->slaves[slave_no].port_vlan;
 994        flags = ALE_VLAN | ALE_SECURE;
 995
 996        cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
 997                           flags, vid);
 998        cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
 999                           flags, vid);
1000
1001        ether_addr_copy(priv->mac_addr, addr->sa_data);
1002        ether_addr_copy(ndev->dev_addr, priv->mac_addr);
1003        cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
1004
1005        pm_runtime_put(cpsw->dev);
1006
1007        return 0;
1008}
1009
1010static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1011                                     __be16 proto, u16 vid)
1012{
1013        struct cpsw_priv *priv = netdev_priv(ndev);
1014        struct cpsw_common *cpsw = priv->cpsw;
1015        int ret;
1016        int i;
1017
1018        if (cpsw_is_switch_en(cpsw)) {
1019                dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
1020                return 0;
1021        }
1022
1023        if (vid == cpsw->data.default_vlan)
1024                return 0;
1025
1026        ret = pm_runtime_get_sync(cpsw->dev);
1027        if (ret < 0) {
1028                pm_runtime_put_noidle(cpsw->dev);
1029                return ret;
1030        }
1031
1032        /* reset the return code as pm_runtime_get_sync() can return
1033         * non zero values as well.
1034         */
1035        ret = 0;
1036        for (i = 0; i < cpsw->data.slaves; i++) {
1037                if (cpsw->slaves[i].ndev &&
1038                    vid == cpsw->slaves[i].port_vlan) {
1039                        ret = -EINVAL;
1040                        goto err;
1041                }
1042        }
1043
1044        dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1045        ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1046        if (ret)
1047                dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
1048        ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1049                                 HOST_PORT_NUM, ALE_VLAN, vid);
1050        if (ret)
1051                dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
1052                        ret);
1053        ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1054                                 0, ALE_VLAN, vid);
1055        if (ret)
1056                dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
1057                        ret);
1058        cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
1059        ret = 0;
1060err:
1061        pm_runtime_put(cpsw->dev);
1062        return ret;
1063}
1064
1065static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
1066                                       size_t len)
1067{
1068        struct cpsw_priv *priv = netdev_priv(ndev);
1069        int err;
1070
1071        err = snprintf(name, len, "p%d", priv->emac_port);
1072
1073        if (err >= len)
1074                return -EINVAL;
1075
1076        return 0;
1077}
1078
1079#ifdef CONFIG_NET_POLL_CONTROLLER
1080static void cpsw_ndo_poll_controller(struct net_device *ndev)
1081{
1082        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1083
1084        cpsw_intr_disable(cpsw);
1085        cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1086        cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1087        cpsw_intr_enable(cpsw);
1088}
1089#endif
1090
1091static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
1092                             struct xdp_frame **frames, u32 flags)
1093{
1094        struct cpsw_priv *priv = netdev_priv(ndev);
1095        struct xdp_frame *xdpf;
1096        int i, nxmit = 0;
1097
1098        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1099                return -EINVAL;
1100
1101        for (i = 0; i < n; i++) {
1102                xdpf = frames[i];
1103                if (xdpf->len < READ_ONCE(priv->tx_packet_min))
1104                        break;
1105
1106                if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
1107                        break;
1108                nxmit++;
1109        }
1110
1111        return nxmit;
1112}
1113
1114static int cpsw_get_port_parent_id(struct net_device *ndev,
1115                                   struct netdev_phys_item_id *ppid)
1116{
1117        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1118
1119        ppid->id_len = sizeof(cpsw->base_mac);
1120        memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
1121
1122        return 0;
1123}
1124
1125static const struct net_device_ops cpsw_netdev_ops = {
1126        .ndo_open               = cpsw_ndo_open,
1127        .ndo_stop               = cpsw_ndo_stop,
1128        .ndo_start_xmit         = cpsw_ndo_start_xmit,
1129        .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
1130        .ndo_do_ioctl           = cpsw_ndo_ioctl,
1131        .ndo_validate_addr      = eth_validate_addr,
1132        .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
1133        .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
1134        .ndo_set_tx_maxrate     = cpsw_ndo_set_tx_maxrate,
1135#ifdef CONFIG_NET_POLL_CONTROLLER
1136        .ndo_poll_controller    = cpsw_ndo_poll_controller,
1137#endif
1138        .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
1139        .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
1140        .ndo_setup_tc           = cpsw_ndo_setup_tc,
1141        .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
1142        .ndo_bpf                = cpsw_ndo_bpf,
1143        .ndo_xdp_xmit           = cpsw_ndo_xdp_xmit,
1144        .ndo_get_port_parent_id = cpsw_get_port_parent_id,
1145};
1146
1147static void cpsw_get_drvinfo(struct net_device *ndev,
1148                             struct ethtool_drvinfo *info)
1149{
1150        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1151        struct platform_device *pdev;
1152
1153        pdev = to_platform_device(cpsw->dev);
1154        strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
1155        strlcpy(info->version, "2.0", sizeof(info->version));
1156        strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1157}
1158
1159static int cpsw_set_pauseparam(struct net_device *ndev,
1160                               struct ethtool_pauseparam *pause)
1161{
1162        struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1163        struct cpsw_priv *priv = netdev_priv(ndev);
1164        int slave_no;
1165
1166        slave_no = cpsw_slave_index(cpsw, priv);
1167        if (!cpsw->slaves[slave_no].phy)
1168                return -EINVAL;
1169
1170        if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
1171                return -EINVAL;
1172
1173        priv->rx_pause = pause->rx_pause ? true : false;
1174        priv->tx_pause = pause->tx_pause ? true : false;
1175
1176        phy_set_asym_pause(cpsw->slaves[slave_no].phy,
1177                           priv->rx_pause, priv->tx_pause);
1178
1179        return 0;
1180}
1181
1182static int cpsw_set_channels(struct net_device *ndev,
1183                             struct ethtool_channels *chs)
1184{
1185        return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
1186}
1187
1188static const struct ethtool_ops cpsw_ethtool_ops = {
1189        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1190        .get_drvinfo            = cpsw_get_drvinfo,
1191        .get_msglevel           = cpsw_get_msglevel,
1192        .set_msglevel           = cpsw_set_msglevel,
1193        .get_link               = ethtool_op_get_link,
1194        .get_ts_info            = cpsw_get_ts_info,
1195        .get_coalesce           = cpsw_get_coalesce,
1196        .set_coalesce           = cpsw_set_coalesce,
1197        .get_sset_count         = cpsw_get_sset_count,
1198        .get_strings            = cpsw_get_strings,
1199        .get_ethtool_stats      = cpsw_get_ethtool_stats,
1200        .get_pauseparam         = cpsw_get_pauseparam,
1201        .set_pauseparam         = cpsw_set_pauseparam,
1202        .get_wol                = cpsw_get_wol,
1203        .set_wol                = cpsw_set_wol,
1204        .get_regs_len           = cpsw_get_regs_len,
1205        .get_regs               = cpsw_get_regs,
1206        .begin                  = cpsw_ethtool_op_begin,
1207        .complete               = cpsw_ethtool_op_complete,
1208        .get_channels           = cpsw_get_channels,
1209        .set_channels           = cpsw_set_channels,
1210        .get_link_ksettings     = cpsw_get_link_ksettings,
1211        .set_link_ksettings     = cpsw_set_link_ksettings,
1212        .get_eee                = cpsw_get_eee,
1213        .set_eee                = cpsw_set_eee,
1214        .nway_reset             = cpsw_nway_reset,
1215        .get_ringparam          = cpsw_get_ringparam,
1216        .set_ringparam          = cpsw_set_ringparam,
1217};
1218
1219static int cpsw_probe_dt(struct cpsw_common *cpsw)
1220{
1221        struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
1222        struct cpsw_platform_data *data = &cpsw->data;
1223        struct device *dev = cpsw->dev;
1224        int ret;
1225        u32 prop;
1226
1227        if (!node)
1228                return -EINVAL;
1229
1230        tmp_node = of_get_child_by_name(node, "ethernet-ports");
1231        if (!tmp_node)
1232                return -ENOENT;
1233        data->slaves = of_get_child_count(tmp_node);
1234        if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
1235                of_node_put(tmp_node);
1236                return -ENOENT;
1237        }
1238
1239        data->active_slave = 0;
1240        data->channels = CPSW_MAX_QUEUES;
1241        data->dual_emac = true;
1242        data->bd_ram_size = CPSW_BD_RAM_SIZE;
1243        data->mac_control = 0;
1244
1245        data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
1246                                        sizeof(struct cpsw_slave_data),
1247                                        GFP_KERNEL);
1248        if (!data->slave_data)
1249                return -ENOMEM;
1250
1251        /* Populate all the child nodes here...
1252         */
1253        ret = devm_of_platform_populate(dev);
1254        /* We do not want to force this, as in some cases may not have child */
1255        if (ret)
1256                dev_warn(dev, "Doesn't have any child node\n");
1257
1258        for_each_child_of_node(tmp_node, port_np) {
1259                struct cpsw_slave_data *slave_data;
1260                u32 port_id;
1261
1262                ret = of_property_read_u32(port_np, "reg", &port_id);
1263                if (ret < 0) {
1264                        dev_err(dev, "%pOF error reading port_id %d\n",
1265                                port_np, ret);
1266                        goto err_node_put;
1267                }
1268
1269                if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
1270                        dev_err(dev, "%pOF has invalid port_id %u\n",
1271                                port_np, port_id);
1272                        ret = -EINVAL;
1273                        goto err_node_put;
1274                }
1275
1276                slave_data = &data->slave_data[port_id - 1];
1277
1278                slave_data->disabled = !of_device_is_available(port_np);
1279                if (slave_data->disabled)
1280                        continue;
1281
1282                slave_data->slave_node = port_np;
1283                slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
1284                if (IS_ERR(slave_data->ifphy)) {
1285                        ret = PTR_ERR(slave_data->ifphy);
1286                        dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
1287                                port_np, ret);
1288                        goto err_node_put;
1289                }
1290
1291                if (of_phy_is_fixed_link(port_np)) {
1292                        ret = of_phy_register_fixed_link(port_np);
1293                        if (ret) {
1294                                if (ret != -EPROBE_DEFER)
1295                                        dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
1296                                                port_np, ret);
1297                                goto err_node_put;
1298                        }
1299                        slave_data->phy_node = of_node_get(port_np);
1300                } else {
1301                        slave_data->phy_node =
1302                                of_parse_phandle(port_np, "phy-handle", 0);
1303                }
1304
1305                if (!slave_data->phy_node) {
1306                        dev_err(dev, "%pOF no phy found\n", port_np);
1307                        ret = -ENODEV;
1308                        goto err_node_put;
1309                }
1310
1311                ret = of_get_phy_mode(port_np, &slave_data->phy_if);
1312                if (ret) {
1313                        dev_err(dev, "%pOF read phy-mode err %d\n",
1314                                port_np, ret);
1315                        goto err_node_put;
1316                }
1317
1318                ret = of_get_mac_address(port_np, slave_data->mac_addr);
1319                if (ret) {
1320                        ret = ti_cm_get_macid(dev, port_id - 1,
1321                                              slave_data->mac_addr);
1322                        if (ret)
1323                                goto err_node_put;
1324                }
1325
1326                if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
1327                                         &prop)) {
1328                        dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
1329                                port_np);
1330                        slave_data->dual_emac_res_vlan = port_id;
1331                        dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
1332                                port_np, slave_data->dual_emac_res_vlan);
1333                } else {
1334                        slave_data->dual_emac_res_vlan = prop;
1335                }
1336        }
1337
1338        of_node_put(tmp_node);
1339        return 0;
1340
1341err_node_put:
1342        of_node_put(port_np);
1343        return ret;
1344}
1345
1346static void cpsw_remove_dt(struct cpsw_common *cpsw)
1347{
1348        struct cpsw_platform_data *data = &cpsw->data;
1349        int i = 0;
1350
1351        for (i = 0; i < cpsw->data.slaves; i++) {
1352                struct cpsw_slave_data *slave_data = &data->slave_data[i];
1353                struct device_node *port_np = slave_data->phy_node;
1354
1355                if (port_np) {
1356                        if (of_phy_is_fixed_link(port_np))
1357                                of_phy_deregister_fixed_link(port_np);
1358
1359                        of_node_put(port_np);
1360                }
1361        }
1362}
1363
1364static int cpsw_create_ports(struct cpsw_common *cpsw)
1365{
1366        struct cpsw_platform_data *data = &cpsw->data;
1367        struct net_device *ndev, *napi_ndev = NULL;
1368        struct device *dev = cpsw->dev;
1369        struct cpsw_priv *priv;
1370        int ret = 0, i = 0;
1371
1372        for (i = 0; i < cpsw->data.slaves; i++) {
1373                struct cpsw_slave_data *slave_data = &data->slave_data[i];
1374
1375                if (slave_data->disabled)
1376                        continue;
1377
1378                ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
1379                                               CPSW_MAX_QUEUES,
1380                                               CPSW_MAX_QUEUES);
1381                if (!ndev) {
1382                        dev_err(dev, "error allocating net_device\n");
1383                        return -ENOMEM;
1384                }
1385
1386                priv = netdev_priv(ndev);
1387                priv->cpsw = cpsw;
1388                priv->ndev = ndev;
1389                priv->dev  = dev;
1390                priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1391                priv->emac_port = i + 1;
1392                priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
1393
1394                if (is_valid_ether_addr(slave_data->mac_addr)) {
1395                        ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1396                        dev_info(cpsw->dev, "Detected MACID = %pM\n",
1397                                 priv->mac_addr);
1398                } else {
1399                        eth_random_addr(slave_data->mac_addr);
1400                        dev_info(cpsw->dev, "Random MACID = %pM\n",
1401                                 priv->mac_addr);
1402                }
1403                ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
1404                ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
1405
1406                cpsw->slaves[i].ndev = ndev;
1407
1408                ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
1409                                  NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL;
1410
1411                ndev->netdev_ops = &cpsw_netdev_ops;
1412                ndev->ethtool_ops = &cpsw_ethtool_ops;
1413                SET_NETDEV_DEV(ndev, dev);
1414
1415                if (!napi_ndev) {
1416                        /* CPSW Host port CPDMA interface is shared between
1417                         * ports and there is only one TX and one RX IRQs
1418                         * available for all possible TX and RX channels
1419                         * accordingly.
1420                         */
1421                        netif_napi_add(ndev, &cpsw->napi_rx,
1422                                       cpsw->quirk_irq ?
1423                                       cpsw_rx_poll : cpsw_rx_mq_poll,
1424                                       CPSW_POLL_WEIGHT);
1425                        netif_tx_napi_add(ndev, &cpsw->napi_tx,
1426                                          cpsw->quirk_irq ?
1427                                          cpsw_tx_poll : cpsw_tx_mq_poll,
1428                                          CPSW_POLL_WEIGHT);
1429                }
1430
1431                napi_ndev = ndev;
1432        }
1433
1434        return ret;
1435}
1436
1437static void cpsw_unregister_ports(struct cpsw_common *cpsw)
1438{
1439        int i = 0;
1440
1441        for (i = 0; i < cpsw->data.slaves; i++) {
1442                if (!cpsw->slaves[i].ndev)
1443                        continue;
1444
1445                unregister_netdev(cpsw->slaves[i].ndev);
1446        }
1447}
1448
1449static int cpsw_register_ports(struct cpsw_common *cpsw)
1450{
1451        int ret = 0, i = 0;
1452
1453        for (i = 0; i < cpsw->data.slaves; i++) {
1454                if (!cpsw->slaves[i].ndev)
1455                        continue;
1456
1457                /* register the network device */
1458                ret = register_netdev(cpsw->slaves[i].ndev);
1459                if (ret) {
1460                        dev_err(cpsw->dev,
1461                                "cpsw: err registering net device%d\n", i);
1462                        cpsw->slaves[i].ndev = NULL;
1463                        break;
1464                }
1465        }
1466
1467        if (ret)
1468                cpsw_unregister_ports(cpsw);
1469        return ret;
1470}
1471
1472bool cpsw_port_dev_check(const struct net_device *ndev)
1473{
1474        if (ndev->netdev_ops == &cpsw_netdev_ops) {
1475                struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1476
1477                return !cpsw->data.dual_emac;
1478        }
1479
1480        return false;
1481}
1482
1483static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
1484{
1485        int set_val = 0;
1486        int i;
1487
1488        if (!cpsw->ale_bypass &&
1489            (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
1490                set_val = 1;
1491
1492        dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
1493
1494        for (i = 0; i < cpsw->data.slaves; i++) {
1495                struct net_device *sl_ndev = cpsw->slaves[i].ndev;
1496                struct cpsw_priv *priv = netdev_priv(sl_ndev);
1497
1498                priv->offload_fwd_mark = set_val;
1499        }
1500}
1501
1502static int cpsw_netdevice_port_link(struct net_device *ndev,
1503                                    struct net_device *br_ndev)
1504{
1505        struct cpsw_priv *priv = netdev_priv(ndev);
1506        struct cpsw_common *cpsw = priv->cpsw;
1507
1508        if (!cpsw->br_members) {
1509                cpsw->hw_bridge_dev = br_ndev;
1510        } else {
1511                /* This is adding the port to a second bridge, this is
1512                 * unsupported
1513                 */
1514                if (cpsw->hw_bridge_dev != br_ndev)
1515                        return -EOPNOTSUPP;
1516        }
1517
1518        cpsw->br_members |= BIT(priv->emac_port);
1519
1520        cpsw_port_offload_fwd_mark_update(cpsw);
1521
1522        return NOTIFY_DONE;
1523}
1524
1525static void cpsw_netdevice_port_unlink(struct net_device *ndev)
1526{
1527        struct cpsw_priv *priv = netdev_priv(ndev);
1528        struct cpsw_common *cpsw = priv->cpsw;
1529
1530        cpsw->br_members &= ~BIT(priv->emac_port);
1531
1532        cpsw_port_offload_fwd_mark_update(cpsw);
1533
1534        if (!cpsw->br_members)
1535                cpsw->hw_bridge_dev = NULL;
1536}
1537
1538/* netdev notifier */
1539static int cpsw_netdevice_event(struct notifier_block *unused,
1540                                unsigned long event, void *ptr)
1541{
1542        struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
1543        struct netdev_notifier_changeupper_info *info;
1544        int ret = NOTIFY_DONE;
1545
1546        if (!cpsw_port_dev_check(ndev))
1547                return NOTIFY_DONE;
1548
1549        switch (event) {
1550        case NETDEV_CHANGEUPPER:
1551                info = ptr;
1552
1553                if (netif_is_bridge_master(info->upper_dev)) {
1554                        if (info->linking)
1555                                ret = cpsw_netdevice_port_link(ndev,
1556                                                               info->upper_dev);
1557                        else
1558                                cpsw_netdevice_port_unlink(ndev);
1559                }
1560                break;
1561        default:
1562                return NOTIFY_DONE;
1563        }
1564
1565        return notifier_from_errno(ret);
1566}
1567
1568static struct notifier_block cpsw_netdevice_nb __read_mostly = {
1569        .notifier_call = cpsw_netdevice_event,
1570};
1571
1572static int cpsw_register_notifiers(struct cpsw_common *cpsw)
1573{
1574        int ret = 0;
1575
1576        ret = register_netdevice_notifier(&cpsw_netdevice_nb);
1577        if (ret) {
1578                dev_err(cpsw->dev, "can't register netdevice notifier\n");
1579                return ret;
1580        }
1581
1582        ret = cpsw_switchdev_register_notifiers(cpsw);
1583        if (ret)
1584                unregister_netdevice_notifier(&cpsw_netdevice_nb);
1585
1586        return ret;
1587}
1588
1589static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
1590{
1591        cpsw_switchdev_unregister_notifiers(cpsw);
1592        unregister_netdevice_notifier(&cpsw_netdevice_nb);
1593}
1594
1595static const struct devlink_ops cpsw_devlink_ops = {
1596};
1597
1598static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
1599                                   struct devlink_param_gset_ctx *ctx)
1600{
1601        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1602        struct cpsw_common *cpsw = dl_priv->cpsw;
1603
1604        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1605
1606        if (id != CPSW_DL_PARAM_SWITCH_MODE)
1607                return  -EOPNOTSUPP;
1608
1609        ctx->val.vbool = !cpsw->data.dual_emac;
1610
1611        return 0;
1612}
1613
1614static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
1615                                   struct devlink_param_gset_ctx *ctx)
1616{
1617        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1618        struct cpsw_common *cpsw = dl_priv->cpsw;
1619        int vlan = cpsw->data.default_vlan;
1620        bool switch_en = ctx->val.vbool;
1621        bool if_running = false;
1622        int i;
1623
1624        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1625
1626        if (id != CPSW_DL_PARAM_SWITCH_MODE)
1627                return  -EOPNOTSUPP;
1628
1629        if (switch_en == !cpsw->data.dual_emac)
1630                return 0;
1631
1632        if (!switch_en && cpsw->br_members) {
1633                dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
1634                return -EINVAL;
1635        }
1636
1637        rtnl_lock();
1638
1639        for (i = 0; i < cpsw->data.slaves; i++) {
1640                struct cpsw_slave *slave = &cpsw->slaves[i];
1641                struct net_device *sl_ndev = slave->ndev;
1642
1643                if (!sl_ndev || !netif_running(sl_ndev))
1644                        continue;
1645
1646                if_running = true;
1647        }
1648
1649        if (!if_running) {
1650                /* all ndevs are down */
1651                cpsw->data.dual_emac = !switch_en;
1652                for (i = 0; i < cpsw->data.slaves; i++) {
1653                        struct cpsw_slave *slave = &cpsw->slaves[i];
1654                        struct net_device *sl_ndev = slave->ndev;
1655
1656                        if (!sl_ndev)
1657                                continue;
1658
1659                        if (switch_en)
1660                                vlan = cpsw->data.default_vlan;
1661                        else
1662                                vlan = slave->data->dual_emac_res_vlan;
1663                        slave->port_vlan = vlan;
1664                }
1665                goto exit;
1666        }
1667
1668        if (switch_en) {
1669                dev_info(cpsw->dev, "Enable switch mode\n");
1670
1671                /* enable bypass - no forwarding; all traffic goes to Host */
1672                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1673
1674                /* clean up ALE table */
1675                cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1676                cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1677
1678                cpsw_init_host_port_switch(cpsw);
1679
1680                for (i = 0; i < cpsw->data.slaves; i++) {
1681                        struct cpsw_slave *slave = &cpsw->slaves[i];
1682                        struct net_device *sl_ndev = slave->ndev;
1683                        struct cpsw_priv *priv;
1684
1685                        if (!sl_ndev)
1686                                continue;
1687
1688                        priv = netdev_priv(sl_ndev);
1689                        slave->port_vlan = vlan;
1690                        WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
1691                        if (netif_running(sl_ndev))
1692                                cpsw_port_add_switch_def_ale_entries(priv,
1693                                                                     slave);
1694                }
1695
1696                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1697                cpsw->data.dual_emac = false;
1698        } else {
1699                dev_info(cpsw->dev, "Disable switch mode\n");
1700
1701                /* enable bypass - no forwarding; all traffic goes to Host */
1702                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
1703
1704                cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
1705                cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
1706
1707                cpsw_init_host_port_dual_mac(cpsw);
1708
1709                for (i = 0; i < cpsw->data.slaves; i++) {
1710                        struct cpsw_slave *slave = &cpsw->slaves[i];
1711                        struct net_device *sl_ndev = slave->ndev;
1712                        struct cpsw_priv *priv;
1713
1714                        if (!sl_ndev)
1715                                continue;
1716
1717                        priv = netdev_priv(slave->ndev);
1718                        slave->port_vlan = slave->data->dual_emac_res_vlan;
1719                        WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
1720                        cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
1721                }
1722
1723                cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
1724                cpsw->data.dual_emac = true;
1725        }
1726exit:
1727        rtnl_unlock();
1728
1729        return 0;
1730}
1731
1732static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
1733                                struct devlink_param_gset_ctx *ctx)
1734{
1735        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1736        struct cpsw_common *cpsw = dl_priv->cpsw;
1737
1738        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1739
1740        switch (id) {
1741        case CPSW_DL_PARAM_ALE_BYPASS:
1742                ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
1743                break;
1744        default:
1745                return -EOPNOTSUPP;
1746        }
1747
1748        return 0;
1749}
1750
1751static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
1752                                struct devlink_param_gset_ctx *ctx)
1753{
1754        struct cpsw_devlink *dl_priv = devlink_priv(dl);
1755        struct cpsw_common *cpsw = dl_priv->cpsw;
1756        int ret = -EOPNOTSUPP;
1757
1758        dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
1759
1760        switch (id) {
1761        case CPSW_DL_PARAM_ALE_BYPASS:
1762                ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
1763                                           ctx->val.vbool);
1764                if (!ret) {
1765                        cpsw->ale_bypass = ctx->val.vbool;
1766                        cpsw_port_offload_fwd_mark_update(cpsw);
1767                }
1768                break;
1769        default:
1770                return -EOPNOTSUPP;
1771        }
1772
1773        return 0;
1774}
1775
1776static const struct devlink_param cpsw_devlink_params[] = {
1777        DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
1778                             "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
1779                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1780                             cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
1781                             NULL),
1782        DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
1783                             "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
1784                             BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1785                             cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
1786};
1787
1788static int cpsw_register_devlink(struct cpsw_common *cpsw)
1789{
1790        struct device *dev = cpsw->dev;
1791        struct cpsw_devlink *dl_priv;
1792        int ret = 0;
1793
1794        cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv));
1795        if (!cpsw->devlink)
1796                return -ENOMEM;
1797
1798        dl_priv = devlink_priv(cpsw->devlink);
1799        dl_priv->cpsw = cpsw;
1800
1801        ret = devlink_register(cpsw->devlink, dev);
1802        if (ret) {
1803                dev_err(dev, "DL reg fail ret:%d\n", ret);
1804                goto dl_free;
1805        }
1806
1807        ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
1808                                      ARRAY_SIZE(cpsw_devlink_params));
1809        if (ret) {
1810                dev_err(dev, "DL params reg fail ret:%d\n", ret);
1811                goto dl_unreg;
1812        }
1813
1814        devlink_params_publish(cpsw->devlink);
1815        return ret;
1816
1817dl_unreg:
1818        devlink_unregister(cpsw->devlink);
1819dl_free:
1820        devlink_free(cpsw->devlink);
1821        return ret;
1822}
1823
1824static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
1825{
1826        devlink_params_unpublish(cpsw->devlink);
1827        devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
1828                                  ARRAY_SIZE(cpsw_devlink_params));
1829        devlink_unregister(cpsw->devlink);
1830        devlink_free(cpsw->devlink);
1831}
1832
1833static const struct of_device_id cpsw_of_mtable[] = {
1834        { .compatible = "ti,cpsw-switch"},
1835        { .compatible = "ti,am335x-cpsw-switch"},
1836        { .compatible = "ti,am4372-cpsw-switch"},
1837        { .compatible = "ti,dra7-cpsw-switch"},
1838        { /* sentinel */ },
1839};
1840MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
1841
1842static const struct soc_device_attribute cpsw_soc_devices[] = {
1843        { .family = "AM33xx", .revision = "ES1.0"},
1844        { /* sentinel */ }
1845};
1846
1847static int cpsw_probe(struct platform_device *pdev)
1848{
1849        const struct soc_device_attribute *soc;
1850        struct device *dev = &pdev->dev;
1851        struct cpsw_common *cpsw;
1852        struct resource *ss_res;
1853        struct gpio_descs *mode;
1854        void __iomem *ss_regs;
1855        int ret = 0, ch;
1856        struct clk *clk;
1857        int irq;
1858
1859        cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
1860        if (!cpsw)
1861                return -ENOMEM;
1862
1863        cpsw_slave_index = cpsw_slave_index_priv;
1864
1865        cpsw->dev = dev;
1866
1867        cpsw->slaves = devm_kcalloc(dev,
1868                                    CPSW_SLAVE_PORTS_NUM,
1869                                    sizeof(struct cpsw_slave),
1870                                    GFP_KERNEL);
1871        if (!cpsw->slaves)
1872                return -ENOMEM;
1873
1874        mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
1875        if (IS_ERR(mode)) {
1876                ret = PTR_ERR(mode);
1877                dev_err(dev, "gpio request failed, ret %d\n", ret);
1878                return ret;
1879        }
1880
1881        clk = devm_clk_get(dev, "fck");
1882        if (IS_ERR(clk)) {
1883                ret = PTR_ERR(clk);
1884                dev_err(dev, "fck is not found %d\n", ret);
1885                return ret;
1886        }
1887        cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
1888
1889        ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1890        ss_regs = devm_ioremap_resource(dev, ss_res);
1891        if (IS_ERR(ss_regs)) {
1892                ret = PTR_ERR(ss_regs);
1893                return ret;
1894        }
1895        cpsw->regs = ss_regs;
1896
1897        irq = platform_get_irq_byname(pdev, "rx");
1898        if (irq < 0)
1899                return irq;
1900        cpsw->irqs_table[0] = irq;
1901
1902        irq = platform_get_irq_byname(pdev, "tx");
1903        if (irq < 0)
1904                return irq;
1905        cpsw->irqs_table[1] = irq;
1906
1907        irq = platform_get_irq_byname(pdev, "misc");
1908        if (irq <= 0)
1909                return irq;
1910        cpsw->misc_irq = irq;
1911
1912        platform_set_drvdata(pdev, cpsw);
1913        /* This may be required here for child devices. */
1914        pm_runtime_enable(dev);
1915
1916        /* Need to enable clocks with runtime PM api to access module
1917         * registers
1918         */
1919        ret = pm_runtime_get_sync(dev);
1920        if (ret < 0) {
1921                pm_runtime_put_noidle(dev);
1922                pm_runtime_disable(dev);
1923                return ret;
1924        }
1925
1926        ret = cpsw_probe_dt(cpsw);
1927        if (ret)
1928                goto clean_dt_ret;
1929
1930        soc = soc_device_match(cpsw_soc_devices);
1931        if (soc)
1932                cpsw->quirk_irq = true;
1933
1934        cpsw->rx_packet_max = rx_packet_max;
1935        cpsw->descs_pool_size = descs_pool_size;
1936        eth_random_addr(cpsw->base_mac);
1937
1938        ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
1939                               (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
1940                               descs_pool_size);
1941        if (ret)
1942                goto clean_dt_ret;
1943
1944        cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
1945                        ss_regs + CPSW1_WR_OFFSET :
1946                        ss_regs + CPSW2_WR_OFFSET;
1947
1948        ch = cpsw->quirk_irq ? 0 : 7;
1949        cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
1950        if (IS_ERR(cpsw->txv[0].ch)) {
1951                dev_err(dev, "error initializing tx dma channel\n");
1952                ret = PTR_ERR(cpsw->txv[0].ch);
1953                goto clean_cpts;
1954        }
1955
1956        cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
1957        if (IS_ERR(cpsw->rxv[0].ch)) {
1958                dev_err(dev, "error initializing rx dma channel\n");
1959                ret = PTR_ERR(cpsw->rxv[0].ch);
1960                goto clean_cpts;
1961        }
1962        cpsw_split_res(cpsw);
1963
1964        /* setup netdevs */
1965        ret = cpsw_create_ports(cpsw);
1966        if (ret)
1967                goto clean_unregister_netdev;
1968
1969        /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
1970         * MISC IRQs which are always kept disabled with this driver so
1971         * we will not request them.
1972         *
1973         * If anyone wants to implement support for those, make sure to
1974         * first request and append them to irqs_table array.
1975         */
1976
1977        ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
1978                               0, dev_name(dev), cpsw);
1979        if (ret < 0) {
1980                dev_err(dev, "error attaching irq (%d)\n", ret);
1981                goto clean_unregister_netdev;
1982        }
1983
1984        ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
1985                               0, dev_name(dev), cpsw);
1986        if (ret < 0) {
1987                dev_err(dev, "error attaching irq (%d)\n", ret);
1988                goto clean_unregister_netdev;
1989        }
1990
1991        if (!cpsw->cpts)
1992                goto skip_cpts;
1993
1994        ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
1995                               0, dev_name(&pdev->dev), cpsw);
1996        if (ret < 0) {
1997                dev_err(dev, "error attaching misc irq (%d)\n", ret);
1998                goto clean_unregister_netdev;
1999        }
2000
2001        /* Enable misc CPTS evnt_pend IRQ */
2002        cpts_set_irqpoll(cpsw->cpts, false);
2003
2004skip_cpts:
2005        ret = cpsw_register_notifiers(cpsw);
2006        if (ret)
2007                goto clean_unregister_netdev;
2008
2009        ret = cpsw_register_devlink(cpsw);
2010        if (ret)
2011                goto clean_unregister_notifiers;
2012
2013        ret = cpsw_register_ports(cpsw);
2014        if (ret)
2015                goto clean_unregister_notifiers;
2016
2017        dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
2018                   &ss_res->start, descs_pool_size,
2019                   cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
2020                   CPSW_MINOR_VERSION(cpsw->version),
2021                   CPSW_RTL_VERSION(cpsw->version));
2022
2023        pm_runtime_put(dev);
2024
2025        return 0;
2026
2027clean_unregister_notifiers:
2028        cpsw_unregister_notifiers(cpsw);
2029clean_unregister_netdev:
2030        cpsw_unregister_ports(cpsw);
2031clean_cpts:
2032        cpts_release(cpsw->cpts);
2033        cpdma_ctlr_destroy(cpsw->dma);
2034clean_dt_ret:
2035        cpsw_remove_dt(cpsw);
2036        pm_runtime_put_sync(dev);
2037        pm_runtime_disable(dev);
2038        return ret;
2039}
2040
2041static int cpsw_remove(struct platform_device *pdev)
2042{
2043        struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2044        int ret;
2045
2046        ret = pm_runtime_get_sync(&pdev->dev);
2047        if (ret < 0) {
2048                pm_runtime_put_noidle(&pdev->dev);
2049                return ret;
2050        }
2051
2052        cpsw_unregister_notifiers(cpsw);
2053        cpsw_unregister_devlink(cpsw);
2054        cpsw_unregister_ports(cpsw);
2055
2056        cpts_release(cpsw->cpts);
2057        cpdma_ctlr_destroy(cpsw->dma);
2058        cpsw_remove_dt(cpsw);
2059        pm_runtime_put_sync(&pdev->dev);
2060        pm_runtime_disable(&pdev->dev);
2061        return 0;
2062}
2063
2064static int __maybe_unused cpsw_suspend(struct device *dev)
2065{
2066        struct cpsw_common *cpsw = dev_get_drvdata(dev);
2067        int i;
2068
2069        rtnl_lock();
2070
2071        for (i = 0; i < cpsw->data.slaves; i++) {
2072                struct net_device *ndev = cpsw->slaves[i].ndev;
2073
2074                if (!(ndev && netif_running(ndev)))
2075                        continue;
2076
2077                cpsw_ndo_stop(ndev);
2078        }
2079
2080        rtnl_unlock();
2081
2082        /* Select sleep pin state */
2083        pinctrl_pm_select_sleep_state(dev);
2084
2085        return 0;
2086}
2087
2088static int __maybe_unused cpsw_resume(struct device *dev)
2089{
2090        struct cpsw_common *cpsw = dev_get_drvdata(dev);
2091        int i;
2092
2093        /* Select default pin state */
2094        pinctrl_pm_select_default_state(dev);
2095
2096        /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2097        rtnl_lock();
2098
2099        for (i = 0; i < cpsw->data.slaves; i++) {
2100                struct net_device *ndev = cpsw->slaves[i].ndev;
2101
2102                if (!(ndev && netif_running(ndev)))
2103                        continue;
2104
2105                cpsw_ndo_open(ndev);
2106        }
2107
2108        rtnl_unlock();
2109
2110        return 0;
2111}
2112
2113static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2114
2115static struct platform_driver cpsw_driver = {
2116        .driver = {
2117                .name    = "cpsw-switch",
2118                .pm      = &cpsw_pm_ops,
2119                .of_match_table = cpsw_of_mtable,
2120        },
2121        .probe = cpsw_probe,
2122        .remove = cpsw_remove,
2123};
2124
2125module_platform_driver(cpsw_driver);
2126
2127MODULE_LICENSE("GPL");
2128MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
2129