linux/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 CGX driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/acpi.h>
  12#include <linux/module.h>
  13#include <linux/interrupt.h>
  14#include <linux/pci.h>
  15#include <linux/netdevice.h>
  16#include <linux/etherdevice.h>
  17#include <linux/ethtool.h>
  18#include <linux/phy.h>
  19#include <linux/of.h>
  20#include <linux/of_mdio.h>
  21#include <linux/of_net.h>
  22
  23#include "cgx.h"
  24#include "rvu.h"
  25#include "lmac_common.h"
  26
  27#define DRV_NAME        "Marvell-CGX/RPM"
  28#define DRV_STRING      "Marvell CGX/RPM Driver"
  29
  30static LIST_HEAD(cgx_list);
  31
  32/* Convert firmware speed encoding to user format(Mbps) */
  33static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
  34        [CGX_LINK_NONE] = 0,
  35        [CGX_LINK_10M] = 10,
  36        [CGX_LINK_100M] = 100,
  37        [CGX_LINK_1G] = 1000,
  38        [CGX_LINK_2HG] = 2500,
  39        [CGX_LINK_5G] = 5000,
  40        [CGX_LINK_10G] = 10000,
  41        [CGX_LINK_20G] = 20000,
  42        [CGX_LINK_25G] = 25000,
  43        [CGX_LINK_40G] = 40000,
  44        [CGX_LINK_50G] = 50000,
  45        [CGX_LINK_80G] = 80000,
  46        [CGX_LINK_100G] = 100000,
  47};
  48
  49/* Convert firmware lmac type encoding to string */
  50static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
  51        [LMAC_MODE_SGMII] = "SGMII",
  52        [LMAC_MODE_XAUI] = "XAUI",
  53        [LMAC_MODE_RXAUI] = "RXAUI",
  54        [LMAC_MODE_10G_R] = "10G_R",
  55        [LMAC_MODE_40G_R] = "40G_R",
  56        [LMAC_MODE_QSGMII] = "QSGMII",
  57        [LMAC_MODE_25G_R] = "25G_R",
  58        [LMAC_MODE_50G_R] = "50G_R",
  59        [LMAC_MODE_100G_R] = "100G_R",
  60        [LMAC_MODE_USXGMII] = "USXGMII",
  61};
  62
  63/* CGX PHY management internal APIs */
  64static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
  65
  66/* Supported devices */
  67static const struct pci_device_id cgx_id_table[] = {
  68        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
  69        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
  70        { 0, }  /* end of table */
  71};
  72
  73MODULE_DEVICE_TABLE(pci, cgx_id_table);
  74
  75static bool is_dev_rpm(void *cgxd)
  76{
  77        struct cgx *cgx = cgxd;
  78
  79        return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
  80}
  81
  82bool is_lmac_valid(struct cgx *cgx, int lmac_id)
  83{
  84        if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
  85                return false;
  86        return test_bit(lmac_id, &cgx->lmac_bmap);
  87}
  88
  89/* Helper function to get sequential index
  90 * given the enabled LMAC of a CGX
  91 */
  92static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
  93{
  94        int tmp, id = 0;
  95
  96        for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
  97                if (tmp == lmac_id)
  98                        break;
  99                id++;
 100        }
 101
 102        return id;
 103}
 104
 105struct mac_ops *get_mac_ops(void *cgxd)
 106{
 107        if (!cgxd)
 108                return cgxd;
 109
 110        return ((struct cgx *)cgxd)->mac_ops;
 111}
 112
 113void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
 114{
 115        writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
 116               offset);
 117}
 118
 119u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
 120{
 121        return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
 122                     offset);
 123}
 124
 125struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
 126{
 127        if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
 128                return NULL;
 129
 130        return cgx->lmac_idmap[lmac_id];
 131}
 132
 133int cgx_get_cgxcnt_max(void)
 134{
 135        struct cgx *cgx_dev;
 136        int idmax = -ENODEV;
 137
 138        list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
 139                if (cgx_dev->cgx_id > idmax)
 140                        idmax = cgx_dev->cgx_id;
 141
 142        if (idmax < 0)
 143                return 0;
 144
 145        return idmax + 1;
 146}
 147
 148int cgx_get_lmac_cnt(void *cgxd)
 149{
 150        struct cgx *cgx = cgxd;
 151
 152        if (!cgx)
 153                return -ENODEV;
 154
 155        return cgx->lmac_count;
 156}
 157
 158void *cgx_get_pdata(int cgx_id)
 159{
 160        struct cgx *cgx_dev;
 161
 162        list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
 163                if (cgx_dev->cgx_id == cgx_id)
 164                        return cgx_dev;
 165        }
 166        return NULL;
 167}
 168
 169void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
 170{
 171        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 172
 173        cgx_write(cgx_dev, lmac_id, offset, val);
 174}
 175
 176u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
 177{
 178        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 179
 180        return cgx_read(cgx_dev, lmac_id, offset);
 181}
 182
 183int cgx_get_cgxid(void *cgxd)
 184{
 185        struct cgx *cgx = cgxd;
 186
 187        if (!cgx)
 188                return -EINVAL;
 189
 190        return cgx->cgx_id;
 191}
 192
 193u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
 194{
 195        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 196        u64 cfg;
 197
 198        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
 199
 200        return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
 201}
 202
 203/* Ensure the required lock for event queue(where asynchronous events are
 204 * posted) is acquired before calling this API. Else an asynchronous event(with
 205 * latest link status) can reach the destination before this function returns
 206 * and could make the link status appear wrong.
 207 */
 208int cgx_get_link_info(void *cgxd, int lmac_id,
 209                      struct cgx_link_user_info *linfo)
 210{
 211        struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
 212
 213        if (!lmac)
 214                return -ENODEV;
 215
 216        *linfo = lmac->link_info;
 217        return 0;
 218}
 219
 220static u64 mac2u64 (u8 *mac_addr)
 221{
 222        u64 mac = 0;
 223        int index;
 224
 225        for (index = ETH_ALEN - 1; index >= 0; index--)
 226                mac |= ((u64)*mac_addr++) << (8 * index);
 227        return mac;
 228}
 229
 230static void cfg2mac(u64 cfg, u8 *mac_addr)
 231{
 232        int i, index = 0;
 233
 234        for (i = ETH_ALEN - 1; i >= 0; i--, index++)
 235                mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
 236}
 237
 238int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 239{
 240        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 241        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 242        struct mac_ops *mac_ops;
 243        int index, id;
 244        u64 cfg;
 245
 246        /* access mac_ops to know csr_offset */
 247        mac_ops = cgx_dev->mac_ops;
 248
 249        /* copy 6bytes from macaddr */
 250        /* memcpy(&cfg, mac_addr, 6); */
 251
 252        cfg = mac2u64 (mac_addr);
 253
 254        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 255
 256        index = id * lmac->mac_to_index_bmap.max;
 257
 258        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
 259                  cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
 260
 261        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 262        cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
 263                CGX_DMAC_MCAST_MODE);
 264        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 265
 266        return 0;
 267}
 268
 269u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
 270{
 271        struct mac_ops *mac_ops;
 272        struct cgx *cgx = cgxd;
 273
 274        if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
 275                return 0;
 276
 277        cgx = cgxd;
 278        /* Get mac_ops to know csr offset */
 279        mac_ops = cgx->mac_ops;
 280
 281        return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 282}
 283
 284u64 cgx_read_dmac_entry(void *cgxd, int index)
 285{
 286        struct mac_ops *mac_ops;
 287        struct cgx *cgx;
 288
 289        if (!cgxd)
 290                return 0;
 291
 292        cgx = cgxd;
 293        mac_ops = cgx->mac_ops;
 294        return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
 295}
 296
 297int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
 298{
 299        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 300        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 301        struct mac_ops *mac_ops;
 302        int index, idx;
 303        u64 cfg = 0;
 304        int id;
 305
 306        if (!lmac)
 307                return -ENODEV;
 308
 309        mac_ops = cgx_dev->mac_ops;
 310        /* Get available index where entry is to be installed */
 311        idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
 312        if (idx < 0)
 313                return idx;
 314
 315        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 316
 317        index = id * lmac->mac_to_index_bmap.max + idx;
 318
 319        cfg = mac2u64 (mac_addr);
 320        cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
 321        cfg |= ((u64)lmac_id << 49);
 322        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
 323
 324        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 325        cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
 326
 327        if (is_multicast_ether_addr(mac_addr)) {
 328                cfg &= ~GENMASK_ULL(2, 1);
 329                cfg |= CGX_DMAC_MCAST_MODE_CAM;
 330                lmac->mcast_filters_count++;
 331        } else if (!lmac->mcast_filters_count) {
 332                cfg |= CGX_DMAC_MCAST_MODE;
 333        }
 334
 335        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 336
 337        return idx;
 338}
 339
 340int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
 341{
 342        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 343        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 344        struct mac_ops *mac_ops;
 345        u8 index = 0, id;
 346        u64 cfg;
 347
 348        if (!lmac)
 349                return -ENODEV;
 350
 351        mac_ops = cgx_dev->mac_ops;
 352        /* Restore index 0 to its default init value as done during
 353         * cgx_lmac_init
 354         */
 355        set_bit(0, lmac->mac_to_index_bmap.bmap);
 356
 357        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 358
 359        index = id * lmac->mac_to_index_bmap.max + index;
 360        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
 361
 362        /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
 363        cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 364        cfg &= ~CGX_DMAC_CAM_ACCEPT;
 365        cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
 366        cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 367
 368        return 0;
 369}
 370
 371/* Allows caller to change macaddress associated with index
 372 * in dmac filter table including index 0 reserved for
 373 * interface mac address
 374 */
 375int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
 376{
 377        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 378        struct mac_ops *mac_ops;
 379        struct lmac *lmac;
 380        u64 cfg;
 381        int id;
 382
 383        lmac = lmac_pdata(lmac_id, cgx_dev);
 384        if (!lmac)
 385                return -ENODEV;
 386
 387        mac_ops = cgx_dev->mac_ops;
 388        /* Validate the index */
 389        if (index >= lmac->mac_to_index_bmap.max)
 390                return -EINVAL;
 391
 392        /* ensure index is already set */
 393        if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
 394                return -EINVAL;
 395
 396        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 397
 398        index = id * lmac->mac_to_index_bmap.max + index;
 399
 400        cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
 401        cfg &= ~CGX_RX_DMAC_ADR_MASK;
 402        cfg |= mac2u64 (mac_addr);
 403
 404        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
 405        return 0;
 406}
 407
 408int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
 409{
 410        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 411        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 412        struct mac_ops *mac_ops;
 413        u8 mac[ETH_ALEN];
 414        u64 cfg;
 415        int id;
 416
 417        if (!lmac)
 418                return -ENODEV;
 419
 420        mac_ops = cgx_dev->mac_ops;
 421        /* Validate the index */
 422        if (index >= lmac->mac_to_index_bmap.max)
 423                return -EINVAL;
 424
 425        /* Skip deletion for reserved index i.e. index 0 */
 426        if (index == 0)
 427                return 0;
 428
 429        rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
 430
 431        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 432
 433        index = id * lmac->mac_to_index_bmap.max + index;
 434
 435        /* Read MAC address to check whether it is ucast or mcast */
 436        cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
 437
 438        cfg2mac(cfg, mac);
 439        if (is_multicast_ether_addr(mac))
 440                lmac->mcast_filters_count--;
 441
 442        if (!lmac->mcast_filters_count) {
 443                cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 444                cfg &= ~GENMASK_ULL(2, 1);
 445                cfg |= CGX_DMAC_MCAST_MODE;
 446                cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 447        }
 448
 449        cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
 450
 451        return 0;
 452}
 453
 454int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
 455{
 456        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 457        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 458
 459        if (lmac)
 460                return lmac->mac_to_index_bmap.max;
 461
 462        return 0;
 463}
 464
 465u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
 466{
 467        struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
 468        struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
 469        struct mac_ops *mac_ops;
 470        int index;
 471        u64 cfg;
 472        int id;
 473
 474        mac_ops = cgx_dev->mac_ops;
 475
 476        id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
 477
 478        index = id * lmac->mac_to_index_bmap.max;
 479
 480        cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
 481        return cfg & CGX_RX_DMAC_ADR_MASK;
 482}
 483
 484int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
 485{
 486        struct cgx *cgx = cgxd;
 487
 488        if (!is_lmac_valid(cgx, lmac_id))
 489                return -ENODEV;
 490
 491        cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
 492        return 0;
 493}
 494
 495static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
 496{
 497        struct cgx *cgx = cgxd;
 498        u64 cfg;
 499
 500        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 501        return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
 502}
 503
 504/* Configure CGX LMAC in internal loopback mode */
 505int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
 506{
 507        struct cgx *cgx = cgxd;
 508        u8 lmac_type;
 509        u64 cfg;
 510
 511        if (!is_lmac_valid(cgx, lmac_id))
 512                return -ENODEV;
 513
 514        lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
 515        if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
 516                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
 517                if (enable)
 518                        cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
 519                else
 520                        cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
 521                cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
 522        } else {
 523                cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
 524                if (enable)
 525                        cfg |= CGXX_SPUX_CONTROL1_LBK;
 526                else
 527                        cfg &= ~CGXX_SPUX_CONTROL1_LBK;
 528                cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
 529        }
 530        return 0;
 531}
 532
 533void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
 534{
 535        struct cgx *cgx = cgx_get_pdata(cgx_id);
 536        struct lmac *lmac = lmac_pdata(lmac_id, cgx);
 537        u16 max_dmac = lmac->mac_to_index_bmap.max;
 538        struct mac_ops *mac_ops;
 539        int index, i;
 540        u64 cfg = 0;
 541        int id;
 542
 543        if (!cgx)
 544                return;
 545
 546        id = get_sequence_id_of_lmac(cgx, lmac_id);
 547
 548        mac_ops = cgx->mac_ops;
 549        if (enable) {
 550                /* Enable promiscuous mode on LMAC */
 551                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 552                cfg &= ~CGX_DMAC_CAM_ACCEPT;
 553                cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
 554                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 555
 556                for (i = 0; i < max_dmac; i++) {
 557                        index = id * max_dmac + i;
 558                        cfg = cgx_read(cgx, 0,
 559                                       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
 560                        cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
 561                        cgx_write(cgx, 0,
 562                                  (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
 563                }
 564        } else {
 565                /* Disable promiscuous mode */
 566                cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
 567                cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
 568                cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
 569                for (i = 0; i < max_dmac; i++) {
 570                        index = id * max_dmac + i;
 571                        cfg = cgx_read(cgx, 0,
 572                                       (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
 573                        if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
 574                                cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
 575                                cgx_write(cgx, 0,
 576                                          (CGXX_CMRX_RX_DMAC_CAM0 +
 577                                           index * 0x8),
 578                                          cfg);
 579                        }
 580                }
 581        }
 582}
 583
 584/* Enable or disable forwarding received pause frames to Tx block */
 585void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
 586{
 587        struct cgx *cgx = cgxd;
 588        u64 cfg;
 589
 590        if (!cgx)
 591                return;
 592
 593        if (enable) {
 594                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 595                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 596                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 597
 598                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 599                cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 600                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 601        } else {
 602                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 603                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 604                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 605
 606                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 607                cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 608                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 609        }
 610}
 611
 612int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
 613{
 614        struct cgx *cgx = cgxd;
 615
 616        if (!is_lmac_valid(cgx, lmac_id))
 617                return -ENODEV;
 618        *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
 619        return 0;
 620}
 621
 622int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
 623{
 624        struct cgx *cgx = cgxd;
 625
 626        if (!is_lmac_valid(cgx, lmac_id))
 627                return -ENODEV;
 628        *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
 629        return 0;
 630}
 631
 632u64 cgx_features_get(void *cgxd)
 633{
 634        return ((struct cgx *)cgxd)->hw_features;
 635}
 636
 637static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
 638{
 639        if (!linfo->fec)
 640                return 0;
 641
 642        switch (linfo->lmac_type_id) {
 643        case LMAC_MODE_SGMII:
 644        case LMAC_MODE_XAUI:
 645        case LMAC_MODE_RXAUI:
 646        case LMAC_MODE_QSGMII:
 647                return 0;
 648        case LMAC_MODE_10G_R:
 649        case LMAC_MODE_25G_R:
 650        case LMAC_MODE_100G_R:
 651        case LMAC_MODE_USXGMII:
 652                return 1;
 653        case LMAC_MODE_40G_R:
 654                return 4;
 655        case LMAC_MODE_50G_R:
 656                if (linfo->fec == OTX2_FEC_BASER)
 657                        return 2;
 658                else
 659                        return 1;
 660        default:
 661                return 0;
 662        }
 663}
 664
 665int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
 666{
 667        int stats, fec_stats_count = 0;
 668        int corr_reg, uncorr_reg;
 669        struct cgx *cgx = cgxd;
 670
 671        if (!cgx || lmac_id >= cgx->lmac_count)
 672                return -ENODEV;
 673        fec_stats_count =
 674                cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
 675        if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
 676                corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
 677                uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
 678        } else {
 679                corr_reg = CGXX_SPUX_RSFEC_CORR;
 680                uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
 681        }
 682        for (stats = 0; stats < fec_stats_count; stats++) {
 683                rsp->fec_corr_blks +=
 684                        cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
 685                rsp->fec_uncorr_blks +=
 686                        cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
 687        }
 688        return 0;
 689}
 690
 691int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
 692{
 693        struct cgx *cgx = cgxd;
 694        u64 cfg;
 695
 696        if (!is_lmac_valid(cgx, lmac_id))
 697                return -ENODEV;
 698
 699        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 700        if (enable)
 701                cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
 702        else
 703                cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
 704        cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 705        return 0;
 706}
 707
 708int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
 709{
 710        struct cgx *cgx = cgxd;
 711        u64 cfg, last;
 712
 713        if (!is_lmac_valid(cgx, lmac_id))
 714                return -ENODEV;
 715
 716        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
 717        last = cfg;
 718        if (enable)
 719                cfg |= DATA_PKT_TX_EN;
 720        else
 721                cfg &= ~DATA_PKT_TX_EN;
 722
 723        if (cfg != last)
 724                cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
 725        return !!(last & DATA_PKT_TX_EN);
 726}
 727
 728static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
 729                                         u8 *tx_pause, u8 *rx_pause)
 730{
 731        struct cgx *cgx = cgxd;
 732        u64 cfg;
 733
 734        if (is_dev_rpm(cgx))
 735                return 0;
 736
 737        if (!is_lmac_valid(cgx, lmac_id))
 738                return -ENODEV;
 739
 740        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 741        *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
 742
 743        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 744        *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
 745        return 0;
 746}
 747
 748static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
 749                                     u8 tx_pause, u8 rx_pause)
 750{
 751        struct cgx *cgx = cgxd;
 752        u64 cfg;
 753
 754        if (is_dev_rpm(cgx))
 755                return 0;
 756
 757        if (!is_lmac_valid(cgx, lmac_id))
 758                return -ENODEV;
 759
 760        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 761        cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 762        cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
 763        cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 764
 765        cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 766        cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
 767        cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
 768        cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 769
 770        cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
 771        if (tx_pause) {
 772                cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
 773        } else {
 774                cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
 775                cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
 776        }
 777        cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
 778        return 0;
 779}
 780
 781static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
 782{
 783        struct cgx *cgx = cgxd;
 784        u64 cfg;
 785
 786        if (!is_lmac_valid(cgx, lmac_id))
 787                return;
 788        if (enable) {
 789                /* Enable receive pause frames */
 790                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 791                cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 792                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 793
 794                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 795                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 796                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 797
 798                /* Enable pause frames transmission */
 799                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 800                cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
 801                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 802
 803                /* Set pause time and interval */
 804                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
 805                          DEFAULT_PAUSE_TIME);
 806                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
 807                cfg &= ~0xFFFFULL;
 808                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
 809                          cfg | (DEFAULT_PAUSE_TIME / 2));
 810
 811                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
 812                          DEFAULT_PAUSE_TIME);
 813
 814                cfg = cgx_read(cgx, lmac_id,
 815                               CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
 816                cfg &= ~0xFFFFULL;
 817                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
 818                          cfg | (DEFAULT_PAUSE_TIME / 2));
 819        } else {
 820                /* ALL pause frames received are completely ignored */
 821                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 822                cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
 823                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 824
 825                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 826                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
 827                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 828
 829                /* Disable pause frames transmission */
 830                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
 831                cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
 832                cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
 833        }
 834}
 835
 836void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
 837{
 838        struct cgx *cgx = cgxd;
 839        u64 cfg;
 840
 841        if (!cgx)
 842                return;
 843
 844        if (is_dev_rpm(cgx))
 845                return;
 846
 847        if (enable) {
 848                /* Enable inbound PTP timestamping */
 849                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 850                cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
 851                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 852
 853                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 854                cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
 855                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 856        } else {
 857                /* Disable inbound PTP stamping */
 858                cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
 859                cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
 860                cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
 861
 862                cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
 863                cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
 864                cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
 865        }
 866}
 867
 868/* CGX Firmware interface low level support */
 869int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
 870{
 871        struct cgx *cgx = lmac->cgx;
 872        struct device *dev;
 873        int err = 0;
 874        u64 cmd;
 875
 876        /* Ensure no other command is in progress */
 877        err = mutex_lock_interruptible(&lmac->cmd_lock);
 878        if (err)
 879                return err;
 880
 881        /* Ensure command register is free */
 882        cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
 883        if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
 884                err = -EBUSY;
 885                goto unlock;
 886        }
 887
 888        /* Update ownership in command request */
 889        req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
 890
 891        /* Mark this lmac as pending, before we start */
 892        lmac->cmd_pend = true;
 893
 894        /* Start command in hardware */
 895        cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
 896
 897        /* Ensure command is completed without errors */
 898        if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
 899                                msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
 900                dev = &cgx->pdev->dev;
 901                dev_err(dev, "cgx port %d:%d cmd timeout\n",
 902                        cgx->cgx_id, lmac->lmac_id);
 903                err = -EIO;
 904                goto unlock;
 905        }
 906
 907        /* we have a valid command response */
 908        smp_rmb(); /* Ensure the latest updates are visible */
 909        *resp = lmac->resp;
 910
 911unlock:
 912        mutex_unlock(&lmac->cmd_lock);
 913
 914        return err;
 915}
 916
 917int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
 918{
 919        struct lmac *lmac;
 920        int err;
 921
 922        lmac = lmac_pdata(lmac_id, cgx);
 923        if (!lmac)
 924                return -ENODEV;
 925
 926        err = cgx_fwi_cmd_send(req, resp, lmac);
 927
 928        /* Check for valid response */
 929        if (!err) {
 930                if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
 931                        return -EIO;
 932                else
 933                        return 0;
 934        }
 935
 936        return err;
 937}
 938
 939static int cgx_link_usertable_index_map(int speed)
 940{
 941        switch (speed) {
 942        case SPEED_10:
 943                return CGX_LINK_10M;
 944        case SPEED_100:
 945                return CGX_LINK_100M;
 946        case SPEED_1000:
 947                return CGX_LINK_1G;
 948        case SPEED_2500:
 949                return CGX_LINK_2HG;
 950        case SPEED_5000:
 951                return CGX_LINK_5G;
 952        case SPEED_10000:
 953                return CGX_LINK_10G;
 954        case SPEED_20000:
 955                return CGX_LINK_20G;
 956        case SPEED_25000:
 957                return CGX_LINK_25G;
 958        case SPEED_40000:
 959                return CGX_LINK_40G;
 960        case SPEED_50000:
 961                return CGX_LINK_50G;
 962        case 80000:
 963                return CGX_LINK_80G;
 964        case SPEED_100000:
 965                return CGX_LINK_100G;
 966        case SPEED_UNKNOWN:
 967                return CGX_LINK_NONE;
 968        }
 969        return CGX_LINK_NONE;
 970}
 971
 972static void set_mod_args(struct cgx_set_link_mode_args *args,
 973                         u32 speed, u8 duplex, u8 autoneg, u64 mode)
 974{
 975        /* Fill default values incase of user did not pass
 976         * valid parameters
 977         */
 978        if (args->duplex == DUPLEX_UNKNOWN)
 979                args->duplex = duplex;
 980        if (args->speed == SPEED_UNKNOWN)
 981                args->speed = speed;
 982        if (args->an == AUTONEG_UNKNOWN)
 983                args->an = autoneg;
 984        args->mode = mode;
 985        args->ports = 0;
 986}
 987
 988static void otx2_map_ethtool_link_modes(u64 bitmask,
 989                                        struct cgx_set_link_mode_args *args)
 990{
 991        switch (bitmask) {
 992        case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
 993                set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
 994                break;
 995        case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
 996                set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
 997                break;
 998        case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
 999                set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1000                break;
1001        case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1002                set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1003                break;
1004        case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1005                set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1006                break;
1007        case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1008                set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1009                break;
1010        case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1011                set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1012                break;
1013        case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1014                set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1015                break;
1016        case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1017                set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1018                break;
1019        case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1020                set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1021                break;
1022        case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1023                set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1024                break;
1025        case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1026                set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1027                break;
1028        case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1029                set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1030                break;
1031        case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1032                set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1033                break;
1034        case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1035                set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1036                break;
1037        case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1038                set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1039                break;
1040        case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1041                set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1042                break;
1043        case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1044                set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1045                break;
1046        case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1047                set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1048                break;
1049        case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1050                set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1051                break;
1052        case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1053                set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1054                break;
1055        case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1056                set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1057                break;
1058        case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1059                set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1060                break;
1061        case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1062                set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1063                break;
1064        case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1065                set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1066                break;
1067        case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1068                set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1069                break;
1070        default:
1071                set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1072                break;
1073        }
1074}
1075
1076static inline void link_status_user_format(u64 lstat,
1077                                           struct cgx_link_user_info *linfo,
1078                                           struct cgx *cgx, u8 lmac_id)
1079{
1080        const char *lmac_string;
1081
1082        linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1083        linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1084        linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1085        linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1086        linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1087        linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
1088        lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1089        strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1090}
1091
1092/* Hardware event handlers */
1093static inline void cgx_link_change_handler(u64 lstat,
1094                                           struct lmac *lmac)
1095{
1096        struct cgx_link_user_info *linfo;
1097        struct cgx *cgx = lmac->cgx;
1098        struct cgx_link_event event;
1099        struct device *dev;
1100        int err_type;
1101
1102        dev = &cgx->pdev->dev;
1103
1104        link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1105        err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1106
1107        event.cgx_id = cgx->cgx_id;
1108        event.lmac_id = lmac->lmac_id;
1109
1110        /* update the local copy of link status */
1111        lmac->link_info = event.link_uinfo;
1112        linfo = &lmac->link_info;
1113
1114        if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1115                return;
1116
1117        /* Ensure callback doesn't get unregistered until we finish it */
1118        spin_lock(&lmac->event_cb_lock);
1119
1120        if (!lmac->event_cb.notify_link_chg) {
1121                dev_dbg(dev, "cgx port %d:%d Link change handler null",
1122                        cgx->cgx_id, lmac->lmac_id);
1123                if (err_type != CGX_ERR_NONE) {
1124                        dev_err(dev, "cgx port %d:%d Link error %d\n",
1125                                cgx->cgx_id, lmac->lmac_id, err_type);
1126                }
1127                dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1128                         cgx->cgx_id, lmac->lmac_id,
1129                         linfo->link_up ? "UP" : "DOWN", linfo->speed);
1130                goto err;
1131        }
1132
1133        if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1134                dev_err(dev, "event notification failure\n");
1135err:
1136        spin_unlock(&lmac->event_cb_lock);
1137}
1138
1139static inline bool cgx_cmdresp_is_linkevent(u64 event)
1140{
1141        u8 id;
1142
1143        id = FIELD_GET(EVTREG_ID, event);
1144        if (id == CGX_CMD_LINK_BRING_UP ||
1145            id == CGX_CMD_LINK_BRING_DOWN ||
1146            id == CGX_CMD_MODE_CHANGE)
1147                return true;
1148        else
1149                return false;
1150}
1151
1152static inline bool cgx_event_is_linkevent(u64 event)
1153{
1154        if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1155                return true;
1156        else
1157                return false;
1158}
1159
1160static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1161{
1162        u64 event, offset, clear_bit;
1163        struct lmac *lmac = data;
1164        struct cgx *cgx;
1165
1166        cgx = lmac->cgx;
1167
1168        /* Clear SW_INT for RPM and CMR_INT for CGX */
1169        offset     = cgx->mac_ops->int_register;
1170        clear_bit  = cgx->mac_ops->int_ena_bit;
1171
1172        event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1173
1174        if (!FIELD_GET(EVTREG_ACK, event))
1175                return IRQ_NONE;
1176
1177        switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1178        case CGX_EVT_CMD_RESP:
1179                /* Copy the response. Since only one command is active at a
1180                 * time, there is no way a response can get overwritten
1181                 */
1182                lmac->resp = event;
1183                /* Ensure response is updated before thread context starts */
1184                smp_wmb();
1185
1186                /* There wont be separate events for link change initiated from
1187                 * software; Hence report the command responses as events
1188                 */
1189                if (cgx_cmdresp_is_linkevent(event))
1190                        cgx_link_change_handler(event, lmac);
1191
1192                /* Release thread waiting for completion  */
1193                lmac->cmd_pend = false;
1194                wake_up_interruptible(&lmac->wq_cmd_cmplt);
1195                break;
1196        case CGX_EVT_ASYNC:
1197                if (cgx_event_is_linkevent(event))
1198                        cgx_link_change_handler(event, lmac);
1199                break;
1200        }
1201
1202        /* Any new event or command response will be posted by firmware
1203         * only after the current status is acked.
1204         * Ack the interrupt register as well.
1205         */
1206        cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1207        cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1208
1209        return IRQ_HANDLED;
1210}
1211
1212/* APIs for PHY management using CGX firmware interface */
1213
1214/* callback registration for hardware events like link change */
1215int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1216{
1217        struct cgx *cgx = cgxd;
1218        struct lmac *lmac;
1219
1220        lmac = lmac_pdata(lmac_id, cgx);
1221        if (!lmac)
1222                return -ENODEV;
1223
1224        lmac->event_cb = *cb;
1225
1226        return 0;
1227}
1228
1229int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1230{
1231        struct lmac *lmac;
1232        unsigned long flags;
1233        struct cgx *cgx = cgxd;
1234
1235        lmac = lmac_pdata(lmac_id, cgx);
1236        if (!lmac)
1237                return -ENODEV;
1238
1239        spin_lock_irqsave(&lmac->event_cb_lock, flags);
1240        lmac->event_cb.notify_link_chg = NULL;
1241        lmac->event_cb.data = NULL;
1242        spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1243
1244        return 0;
1245}
1246
1247int cgx_get_fwdata_base(u64 *base)
1248{
1249        u64 req = 0, resp;
1250        struct cgx *cgx;
1251        int first_lmac;
1252        int err;
1253
1254        cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1255        if (!cgx)
1256                return -ENXIO;
1257
1258        first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1259        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1260        err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1261        if (!err)
1262                *base = FIELD_GET(RESP_FWD_BASE, resp);
1263
1264        return err;
1265}
1266
1267int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1268                      int cgx_id, int lmac_id)
1269{
1270        struct cgx *cgx = cgxd;
1271        u64 req = 0, resp;
1272
1273        if (!cgx)
1274                return -ENODEV;
1275
1276        if (args.mode)
1277                otx2_map_ethtool_link_modes(args.mode, &args);
1278        if (!args.speed && args.duplex && !args.an)
1279                return -EINVAL;
1280
1281        req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1282        req = FIELD_SET(CMDMODECHANGE_SPEED,
1283                        cgx_link_usertable_index_map(args.speed), req);
1284        req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1285        req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1286        req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1287        req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1288
1289        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1290}
1291int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1292{
1293        u64 req = 0, resp;
1294        struct cgx *cgx;
1295        int err = 0;
1296
1297        cgx = cgx_get_pdata(cgx_id);
1298        if (!cgx)
1299                return -ENXIO;
1300
1301        req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1302        req = FIELD_SET(CMDSETFEC, fec, req);
1303        err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1304        if (err)
1305                return err;
1306
1307        cgx->lmac_idmap[lmac_id]->link_info.fec =
1308                        FIELD_GET(RESP_LINKSTAT_FEC, resp);
1309        return cgx->lmac_idmap[lmac_id]->link_info.fec;
1310}
1311
1312int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1313{
1314        struct cgx *cgx = cgxd;
1315        u64 req = 0, resp;
1316
1317        if (!cgx)
1318                return -ENODEV;
1319
1320        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1321        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1322}
1323
1324static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1325{
1326        u64 req = 0;
1327        u64 resp;
1328
1329        if (enable)
1330                req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1331        else
1332                req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1333
1334        return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1335}
1336
1337static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1338{
1339        int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1340        u64 req = 0;
1341
1342        req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1343        return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1344}
1345
1346static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1347{
1348        struct device *dev = &cgx->pdev->dev;
1349        int major_ver, minor_ver;
1350        u64 resp;
1351        int err;
1352
1353        if (!cgx->lmac_count)
1354                return 0;
1355
1356        err = cgx_fwi_read_version(&resp, cgx);
1357        if (err)
1358                return err;
1359
1360        major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1361        minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1362        dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1363                major_ver, minor_ver);
1364        if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1365                return -EIO;
1366        else
1367                return 0;
1368}
1369
1370static void cgx_lmac_linkup_work(struct work_struct *work)
1371{
1372        struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1373        struct device *dev = &cgx->pdev->dev;
1374        int i, err;
1375
1376        /* Do Link up for all the enabled lmacs */
1377        for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1378                err = cgx_fwi_link_change(cgx, i, true);
1379                if (err)
1380                        dev_info(dev, "cgx port %d:%d Link up command failed\n",
1381                                 cgx->cgx_id, i);
1382        }
1383}
1384
1385int cgx_lmac_linkup_start(void *cgxd)
1386{
1387        struct cgx *cgx = cgxd;
1388
1389        if (!cgx)
1390                return -ENODEV;
1391
1392        queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1393
1394        return 0;
1395}
1396
1397static void cgx_lmac_get_fifolen(struct cgx *cgx)
1398{
1399        u64 cfg;
1400
1401        cfg = cgx_read(cgx, 0, CGX_CONST);
1402        cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1403}
1404
1405static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1406                                   int cnt, bool req_free)
1407{
1408        struct mac_ops *mac_ops = cgx->mac_ops;
1409        u64 offset, ena_bit;
1410        unsigned int irq;
1411        int err;
1412
1413        irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1414                                  cnt * mac_ops->irq_offset);
1415        offset   = mac_ops->int_set_reg;
1416        ena_bit  = mac_ops->int_ena_bit;
1417
1418        if (req_free) {
1419                free_irq(irq, lmac);
1420                return 0;
1421        }
1422
1423        err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1424        if (err)
1425                return err;
1426
1427        /* Enable interrupt */
1428        cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1429        return 0;
1430}
1431
1432int cgx_get_nr_lmacs(void *cgxd)
1433{
1434        struct cgx *cgx = cgxd;
1435
1436        return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1437}
1438
1439u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1440{
1441        struct cgx *cgx = cgxd;
1442
1443        return cgx->lmac_idmap[lmac_index]->lmac_id;
1444}
1445
1446unsigned long cgx_get_lmac_bmap(void *cgxd)
1447{
1448        struct cgx *cgx = cgxd;
1449
1450        return cgx->lmac_bmap;
1451}
1452
1453static int cgx_lmac_init(struct cgx *cgx)
1454{
1455        struct lmac *lmac;
1456        u64 lmac_list;
1457        int i, err;
1458
1459        cgx_lmac_get_fifolen(cgx);
1460
1461        cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1462        /* lmac_list specifies which lmacs are enabled
1463         * when bit n is set to 1, LMAC[n] is enabled
1464         */
1465        if (cgx->mac_ops->non_contiguous_serdes_lane)
1466                lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1467
1468        if (cgx->lmac_count > MAX_LMAC_PER_CGX)
1469                cgx->lmac_count = MAX_LMAC_PER_CGX;
1470
1471        for (i = 0; i < cgx->lmac_count; i++) {
1472                lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1473                if (!lmac)
1474                        return -ENOMEM;
1475                lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1476                if (!lmac->name) {
1477                        err = -ENOMEM;
1478                        goto err_lmac_free;
1479                }
1480                sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1481                if (cgx->mac_ops->non_contiguous_serdes_lane) {
1482                        lmac->lmac_id = __ffs64(lmac_list);
1483                        lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1484                } else {
1485                        lmac->lmac_id = i;
1486                }
1487
1488                lmac->cgx = cgx;
1489                lmac->mac_to_index_bmap.max =
1490                                MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
1491                err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1492                if (err)
1493                        return err;
1494
1495                /* Reserve first entry for default MAC address */
1496                set_bit(0, lmac->mac_to_index_bmap.bmap);
1497
1498                init_waitqueue_head(&lmac->wq_cmd_cmplt);
1499                mutex_init(&lmac->cmd_lock);
1500                spin_lock_init(&lmac->event_cb_lock);
1501                err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1502                if (err)
1503                        goto err_irq;
1504
1505                /* Add reference */
1506                cgx->lmac_idmap[lmac->lmac_id] = lmac;
1507                set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1508                cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1509        }
1510
1511        return cgx_lmac_verify_fwi_version(cgx);
1512
1513err_irq:
1514        kfree(lmac->name);
1515err_lmac_free:
1516        kfree(lmac);
1517        return err;
1518}
1519
1520static int cgx_lmac_exit(struct cgx *cgx)
1521{
1522        struct lmac *lmac;
1523        int i;
1524
1525        if (cgx->cgx_cmd_workq) {
1526                flush_workqueue(cgx->cgx_cmd_workq);
1527                destroy_workqueue(cgx->cgx_cmd_workq);
1528                cgx->cgx_cmd_workq = NULL;
1529        }
1530
1531        /* Free all lmac related resources */
1532        for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1533                lmac = cgx->lmac_idmap[i];
1534                if (!lmac)
1535                        continue;
1536                cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1537                cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1538                kfree(lmac->mac_to_index_bmap.bmap);
1539                kfree(lmac->name);
1540                kfree(lmac);
1541        }
1542
1543        return 0;
1544}
1545
1546static void cgx_populate_features(struct cgx *cgx)
1547{
1548        if (is_dev_rpm(cgx))
1549                cgx->hw_features =  (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
1550        else
1551                cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1552}
1553
1554static struct mac_ops   cgx_mac_ops    = {
1555        .name           =       "cgx",
1556        .csr_offset     =       0,
1557        .lmac_offset    =       18,
1558        .int_register   =       CGXX_CMRX_INT,
1559        .int_set_reg    =       CGXX_CMRX_INT_ENA_W1S,
1560        .irq_offset     =       9,
1561        .int_ena_bit    =       FW_CGX_INT,
1562        .lmac_fwi       =       CGX_LMAC_FWI,
1563        .non_contiguous_serdes_lane = false,
1564        .rx_stats_cnt   =       9,
1565        .tx_stats_cnt   =       18,
1566        .get_nr_lmacs   =       cgx_get_nr_lmacs,
1567        .get_lmac_type  =       cgx_get_lmac_type,
1568        .mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1569        .mac_get_rx_stats  =    cgx_get_rx_stats,
1570        .mac_get_tx_stats  =    cgx_get_tx_stats,
1571        .mac_enadis_rx_pause_fwding =   cgx_lmac_enadis_rx_pause_fwding,
1572        .mac_get_pause_frm_status =     cgx_lmac_get_pause_frm_status,
1573        .mac_enadis_pause_frm =         cgx_lmac_enadis_pause_frm,
1574        .mac_pause_frm_config =         cgx_lmac_pause_frm_config,
1575};
1576
1577static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1578{
1579        struct device *dev = &pdev->dev;
1580        struct cgx *cgx;
1581        int err, nvec;
1582
1583        cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1584        if (!cgx)
1585                return -ENOMEM;
1586        cgx->pdev = pdev;
1587
1588        pci_set_drvdata(pdev, cgx);
1589
1590        /* Use mac_ops to get MAC specific features */
1591        if (pdev->device == PCI_DEVID_CN10K_RPM)
1592                cgx->mac_ops = rpm_get_mac_ops();
1593        else
1594                cgx->mac_ops = &cgx_mac_ops;
1595
1596        err = pci_enable_device(pdev);
1597        if (err) {
1598                dev_err(dev, "Failed to enable PCI device\n");
1599                pci_set_drvdata(pdev, NULL);
1600                return err;
1601        }
1602
1603        err = pci_request_regions(pdev, DRV_NAME);
1604        if (err) {
1605                dev_err(dev, "PCI request regions failed 0x%x\n", err);
1606                goto err_disable_device;
1607        }
1608
1609        /* MAP configuration registers */
1610        cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1611        if (!cgx->reg_base) {
1612                dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1613                err = -ENOMEM;
1614                goto err_release_regions;
1615        }
1616
1617        nvec = pci_msix_vec_count(cgx->pdev);
1618        err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1619        if (err < 0 || err != nvec) {
1620                dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1621                        nvec, err);
1622                goto err_release_regions;
1623        }
1624
1625        cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1626                & CGX_ID_MASK;
1627
1628        /* init wq for processing linkup requests */
1629        INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1630        cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1631        if (!cgx->cgx_cmd_workq) {
1632                dev_err(dev, "alloc workqueue failed for cgx cmd");
1633                err = -ENOMEM;
1634                goto err_free_irq_vectors;
1635        }
1636
1637        list_add(&cgx->cgx_list, &cgx_list);
1638
1639
1640        cgx_populate_features(cgx);
1641
1642        mutex_init(&cgx->lock);
1643
1644        err = cgx_lmac_init(cgx);
1645        if (err)
1646                goto err_release_lmac;
1647
1648        return 0;
1649
1650err_release_lmac:
1651        cgx_lmac_exit(cgx);
1652        list_del(&cgx->cgx_list);
1653err_free_irq_vectors:
1654        pci_free_irq_vectors(pdev);
1655err_release_regions:
1656        pci_release_regions(pdev);
1657err_disable_device:
1658        pci_disable_device(pdev);
1659        pci_set_drvdata(pdev, NULL);
1660        return err;
1661}
1662
1663static void cgx_remove(struct pci_dev *pdev)
1664{
1665        struct cgx *cgx = pci_get_drvdata(pdev);
1666
1667        if (cgx) {
1668                cgx_lmac_exit(cgx);
1669                list_del(&cgx->cgx_list);
1670        }
1671        pci_free_irq_vectors(pdev);
1672        pci_release_regions(pdev);
1673        pci_disable_device(pdev);
1674        pci_set_drvdata(pdev, NULL);
1675}
1676
1677struct pci_driver cgx_driver = {
1678        .name = DRV_NAME,
1679        .id_table = cgx_id_table,
1680        .probe = cgx_probe,
1681        .remove = cgx_remove,
1682};
1683
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.