linux/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2// Copyright (c) 2020 Mellanox Technologies
   3
   4#include "en/ptp.h"
   5#include "en/txrx.h"
   6#include "en/params.h"
   7#include "en/fs_tt_redirect.h"
   8
   9struct mlx5e_ptp_fs {
  10        struct mlx5_flow_handle *l2_rule;
  11        struct mlx5_flow_handle *udp_v4_rule;
  12        struct mlx5_flow_handle *udp_v6_rule;
  13        bool valid;
  14};
  15
  16#define MLX5E_PTP_CHANNEL_IX 0
  17
  18struct mlx5e_ptp_params {
  19        struct mlx5e_params params;
  20        struct mlx5e_sq_param txq_sq_param;
  21        struct mlx5e_rq_param rq_param;
  22};
  23
  24struct mlx5e_skb_cb_hwtstamp {
  25        ktime_t cqe_hwtstamp;
  26        ktime_t port_hwtstamp;
  27};
  28
  29void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
  30{
  31        memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
  32}
  33
  34static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
  35{
  36        BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
  37        return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
  38}
  39
  40static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
  41                                     struct mlx5e_ptp_cq_stats *cq_stats)
  42{
  43        struct skb_shared_hwtstamps hwts = {};
  44        ktime_t diff;
  45
  46        diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
  47                   mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
  48
  49        /* Maximal allowed diff is 1 / 128 second */
  50        if (diff > (NSEC_PER_SEC >> 7)) {
  51                cq_stats->abort++;
  52                cq_stats->abort_abs_diff_ns += diff;
  53                return;
  54        }
  55
  56        hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
  57        skb_tstamp_tx(skb, &hwts);
  58}
  59
  60void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
  61                                   ktime_t hwtstamp,
  62                                   struct mlx5e_ptp_cq_stats *cq_stats)
  63{
  64        switch (hwtstamp_type) {
  65        case (MLX5E_SKB_CB_CQE_HWTSTAMP):
  66                mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
  67                break;
  68        case (MLX5E_SKB_CB_PORT_HWTSTAMP):
  69                mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
  70                break;
  71        }
  72
  73        /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
  74         * skb soon to be released.
  75         */
  76        if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
  77            !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
  78                return;
  79
  80        mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
  81        memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
  82}
  83
  84static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
  85                                    struct mlx5_cqe64 *cqe,
  86                                    int budget)
  87{
  88        struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
  89        struct mlx5e_txqsq *sq = &ptpsq->txqsq;
  90        ktime_t hwtstamp;
  91
  92        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
  93                ptpsq->cq_stats->err_cqe++;
  94                goto out;
  95        }
  96
  97        hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
  98        mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
  99                                      hwtstamp, ptpsq->cq_stats);
 100        ptpsq->cq_stats->cqe++;
 101
 102out:
 103        napi_consume_skb(skb, budget);
 104}
 105
 106static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
 107{
 108        struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
 109        struct mlx5_cqwq *cqwq = &cq->wq;
 110        struct mlx5_cqe64 *cqe;
 111        int work_done = 0;
 112
 113        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
 114                return false;
 115
 116        cqe = mlx5_cqwq_get_cqe(cqwq);
 117        if (!cqe)
 118                return false;
 119
 120        do {
 121                mlx5_cqwq_pop(cqwq);
 122
 123                mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
 124        } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
 125
 126        mlx5_cqwq_update_db_record(cqwq);
 127
 128        /* ensure cq space is freed before enabling more cqes */
 129        wmb();
 130
 131        return work_done == budget;
 132}
 133
 134static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
 135{
 136        struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
 137        struct mlx5e_ch_stats *ch_stats = c->stats;
 138        struct mlx5e_rq *rq = &c->rq;
 139        bool busy = false;
 140        int work_done = 0;
 141        int i;
 142
 143        rcu_read_lock();
 144
 145        ch_stats->poll++;
 146
 147        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 148                for (i = 0; i < c->num_tc; i++) {
 149                        busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
 150                        busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
 151                }
 152        }
 153        if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
 154                work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
 155                busy |= work_done == budget;
 156                busy |= INDIRECT_CALL_2(rq->post_wqes,
 157                                        mlx5e_post_rx_mpwqes,
 158                                        mlx5e_post_rx_wqes,
 159                                        rq);
 160        }
 161
 162        if (busy) {
 163                work_done = budget;
 164                goto out;
 165        }
 166
 167        if (unlikely(!napi_complete_done(napi, work_done)))
 168                goto out;
 169
 170        ch_stats->arm++;
 171
 172        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 173                for (i = 0; i < c->num_tc; i++) {
 174                        mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
 175                        mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
 176                }
 177        }
 178        if (test_bit(MLX5E_PTP_STATE_RX, c->state))
 179                mlx5e_cq_arm(&rq->cq);
 180
 181out:
 182        rcu_read_unlock();
 183
 184        return work_done;
 185}
 186
 187static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
 188                                 struct mlx5e_params *params,
 189                                 struct mlx5e_sq_param *param,
 190                                 struct mlx5e_txqsq *sq, int tc,
 191                                 struct mlx5e_ptpsq *ptpsq)
 192{
 193        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
 194        struct mlx5_core_dev *mdev = c->mdev;
 195        struct mlx5_wq_cyc *wq = &sq->wq;
 196        int err;
 197        int node;
 198
 199        sq->pdev      = c->pdev;
 200        sq->tstamp    = c->tstamp;
 201        sq->clock     = &mdev->clock;
 202        sq->mkey_be   = c->mkey_be;
 203        sq->netdev    = c->netdev;
 204        sq->priv      = c->priv;
 205        sq->mdev      = mdev;
 206        sq->ch_ix     = MLX5E_PTP_CHANNEL_IX;
 207        sq->txq_ix    = txq_ix;
 208        sq->uar_map   = mdev->mlx5e_res.hw_objs.bfreg.map;
 209        sq->min_inline_mode = params->tx_min_inline_mode;
 210        sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
 211        sq->stats     = &c->priv->ptp_stats.sq[tc];
 212        sq->ptpsq     = ptpsq;
 213        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
 214        if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
 215                set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
 216        sq->stop_room = param->stop_room;
 217        sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
 218
 219        node = dev_to_node(mlx5_core_dma_dev(mdev));
 220
 221        param->wq.db_numa_node = node;
 222        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
 223        if (err)
 224                return err;
 225        wq->db    = &wq->db[MLX5_SND_DBR];
 226
 227        err = mlx5e_alloc_txqsq_db(sq, node);
 228        if (err)
 229                goto err_sq_wq_destroy;
 230
 231        return 0;
 232
 233err_sq_wq_destroy:
 234        mlx5_wq_destroy(&sq->wq_ctrl);
 235
 236        return err;
 237}
 238
 239static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
 240{
 241        mlx5_core_destroy_sq(mdev, sqn);
 242}
 243
 244static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
 245{
 246        int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
 247
 248        ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
 249                                             GFP_KERNEL, numa);
 250        if (!ptpsq->skb_fifo.fifo)
 251                return -ENOMEM;
 252
 253        ptpsq->skb_fifo.pc   = &ptpsq->skb_fifo_pc;
 254        ptpsq->skb_fifo.cc   = &ptpsq->skb_fifo_cc;
 255        ptpsq->skb_fifo.mask = wq_sz - 1;
 256
 257        return 0;
 258}
 259
 260static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
 261{
 262        while (*skb_fifo->pc != *skb_fifo->cc) {
 263                struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
 264
 265                dev_kfree_skb_any(skb);
 266        }
 267}
 268
 269static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
 270{
 271        mlx5e_ptp_drain_skb_fifo(skb_fifo);
 272        kvfree(skb_fifo->fifo);
 273}
 274
 275static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
 276                                int txq_ix, struct mlx5e_ptp_params *cparams,
 277                                int tc, struct mlx5e_ptpsq *ptpsq)
 278{
 279        struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
 280        struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
 281        struct mlx5e_create_sq_param csp = {};
 282        int err;
 283
 284        err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
 285                                    txqsq, tc, ptpsq);
 286        if (err)
 287                return err;
 288
 289        csp.tisn            = tisn;
 290        csp.tis_lst_sz      = 1;
 291        csp.cqn             = txqsq->cq.mcq.cqn;
 292        csp.wq_ctrl         = &txqsq->wq_ctrl;
 293        csp.min_inline_mode = txqsq->min_inline_mode;
 294        csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
 295
 296        err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
 297        if (err)
 298                goto err_free_txqsq;
 299
 300        err = mlx5e_ptp_alloc_traffic_db(ptpsq,
 301                                         dev_to_node(mlx5_core_dma_dev(c->mdev)));
 302        if (err)
 303                goto err_free_txqsq;
 304
 305        return 0;
 306
 307err_free_txqsq:
 308        mlx5e_free_txqsq(txqsq);
 309
 310        return err;
 311}
 312
 313static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
 314{
 315        struct mlx5e_txqsq *sq = &ptpsq->txqsq;
 316        struct mlx5_core_dev *mdev = sq->mdev;
 317
 318        mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
 319        cancel_work_sync(&sq->recover_work);
 320        mlx5e_ptp_destroy_sq(mdev, sq->sqn);
 321        mlx5e_free_txqsq_descs(sq);
 322        mlx5e_free_txqsq(sq);
 323}
 324
 325static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
 326                                 struct mlx5e_ptp_params *cparams)
 327{
 328        struct mlx5e_params *params = &cparams->params;
 329        int ix_base;
 330        int err;
 331        int tc;
 332
 333        ix_base = params->num_tc * params->num_channels;
 334
 335        for (tc = 0; tc < params->num_tc; tc++) {
 336                int txq_ix = ix_base + tc;
 337
 338                err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
 339                                           cparams, tc, &c->ptpsq[tc]);
 340                if (err)
 341                        goto close_txqsq;
 342        }
 343
 344        return 0;
 345
 346close_txqsq:
 347        for (--tc; tc >= 0; tc--)
 348                mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
 349
 350        return err;
 351}
 352
 353static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
 354{
 355        int tc;
 356
 357        for (tc = 0; tc < c->num_tc; tc++)
 358                mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
 359}
 360
 361static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
 362                                 struct mlx5e_ptp_params *cparams)
 363{
 364        struct mlx5e_params *params = &cparams->params;
 365        struct mlx5e_create_cq_param ccp = {};
 366        struct dim_cq_moder ptp_moder = {};
 367        struct mlx5e_cq_param *cq_param;
 368        int err;
 369        int tc;
 370
 371        ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
 372        ccp.ch_stats = c->stats;
 373        ccp.napi     = &c->napi;
 374        ccp.ix       = MLX5E_PTP_CHANNEL_IX;
 375
 376        cq_param = &cparams->txq_sq_param.cqp;
 377
 378        for (tc = 0; tc < params->num_tc; tc++) {
 379                struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
 380
 381                err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
 382                if (err)
 383                        goto out_err_txqsq_cq;
 384        }
 385
 386        for (tc = 0; tc < params->num_tc; tc++) {
 387                struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
 388                struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
 389
 390                err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
 391                if (err)
 392                        goto out_err_ts_cq;
 393
 394                ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
 395        }
 396
 397        return 0;
 398
 399out_err_ts_cq:
 400        for (--tc; tc >= 0; tc--)
 401                mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
 402        tc = params->num_tc;
 403out_err_txqsq_cq:
 404        for (--tc; tc >= 0; tc--)
 405                mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
 406
 407        return err;
 408}
 409
 410static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
 411                                struct mlx5e_ptp_params *cparams)
 412{
 413        struct mlx5e_create_cq_param ccp = {};
 414        struct dim_cq_moder ptp_moder = {};
 415        struct mlx5e_cq_param *cq_param;
 416        struct mlx5e_cq *cq = &c->rq.cq;
 417
 418        ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
 419        ccp.ch_stats = c->stats;
 420        ccp.napi     = &c->napi;
 421        ccp.ix       = MLX5E_PTP_CHANNEL_IX;
 422
 423        cq_param = &cparams->rq_param.cqp;
 424
 425        return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
 426}
 427
 428static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
 429{
 430        int tc;
 431
 432        for (tc = 0; tc < c->num_tc; tc++)
 433                mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
 434
 435        for (tc = 0; tc < c->num_tc; tc++)
 436                mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
 437}
 438
 439static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
 440                                     struct mlx5e_params *params,
 441                                     struct mlx5e_sq_param *param)
 442{
 443        void *sqc = param->sqc;
 444        void *wq;
 445
 446        mlx5e_build_sq_param_common(mdev, param);
 447
 448        wq = MLX5_ADDR_OF(sqc, sqc, wq);
 449        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
 450        param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
 451        mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
 452}
 453
 454static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
 455                                     struct net_device *netdev,
 456                                     u16 q_counter,
 457                                     struct mlx5e_ptp_params *ptp_params)
 458{
 459        struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
 460        struct mlx5e_params *params = &ptp_params->params;
 461
 462        params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
 463        mlx5e_init_rq_type_params(mdev, params);
 464        params->sw_mtu = netdev->max_mtu;
 465        mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
 466}
 467
 468static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
 469                                   struct mlx5e_ptp_params *cparams,
 470                                   struct mlx5e_params *orig)
 471{
 472        struct mlx5e_params *params = &cparams->params;
 473
 474        params->tx_min_inline_mode = orig->tx_min_inline_mode;
 475        params->num_channels = orig->num_channels;
 476        params->hard_mtu = orig->hard_mtu;
 477        params->sw_mtu = orig->sw_mtu;
 478        params->num_tc = orig->num_tc;
 479
 480        /* SQ */
 481        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 482                params->log_sq_size = orig->log_sq_size;
 483                mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
 484        }
 485        /* RQ */
 486        if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
 487                params->vlan_strip_disable = orig->vlan_strip_disable;
 488                mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
 489        }
 490}
 491
 492static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
 493                             struct mlx5e_rq *rq)
 494{
 495        struct mlx5_core_dev *mdev = c->mdev;
 496        struct mlx5e_priv *priv = c->priv;
 497        int err;
 498
 499        rq->wq_type      = params->rq_wq_type;
 500        rq->pdev         = c->pdev;
 501        rq->netdev       = priv->netdev;
 502        rq->priv         = priv;
 503        rq->clock        = &mdev->clock;
 504        rq->tstamp       = &priv->tstamp;
 505        rq->mdev         = mdev;
 506        rq->hw_mtu       = MLX5E_SW2HW_MTU(params, params->sw_mtu);
 507        rq->stats        = &c->priv->ptp_stats.rq;
 508        rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
 509        err = mlx5e_rq_set_handlers(rq, params, false);
 510        if (err)
 511                return err;
 512
 513        return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
 514}
 515
 516static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
 517                             struct mlx5e_rq_param *rq_param)
 518{
 519        int node = dev_to_node(c->mdev->device);
 520        int err;
 521
 522        err = mlx5e_init_ptp_rq(c, params, &c->rq);
 523        if (err)
 524                return err;
 525
 526        return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
 527}
 528
 529static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
 530                                 struct mlx5e_ptp_params *cparams)
 531{
 532        int err;
 533
 534        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 535                err = mlx5e_ptp_open_tx_cqs(c, cparams);
 536                if (err)
 537                        return err;
 538
 539                err = mlx5e_ptp_open_txqsqs(c, cparams);
 540                if (err)
 541                        goto close_tx_cqs;
 542        }
 543        if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
 544                err = mlx5e_ptp_open_rx_cq(c, cparams);
 545                if (err)
 546                        goto close_txqsq;
 547
 548                err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
 549                if (err)
 550                        goto close_rx_cq;
 551        }
 552        return 0;
 553
 554close_rx_cq:
 555        if (test_bit(MLX5E_PTP_STATE_RX, c->state))
 556                mlx5e_close_cq(&c->rq.cq);
 557close_txqsq:
 558        if (test_bit(MLX5E_PTP_STATE_TX, c->state))
 559                mlx5e_ptp_close_txqsqs(c);
 560close_tx_cqs:
 561        if (test_bit(MLX5E_PTP_STATE_TX, c->state))
 562                mlx5e_ptp_close_tx_cqs(c);
 563
 564        return err;
 565}
 566
 567static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
 568{
 569        if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
 570                mlx5e_close_rq(&c->rq);
 571                mlx5e_close_cq(&c->rq.cq);
 572        }
 573        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 574                mlx5e_ptp_close_txqsqs(c);
 575                mlx5e_ptp_close_tx_cqs(c);
 576        }
 577}
 578
 579static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
 580{
 581        if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
 582                __set_bit(MLX5E_PTP_STATE_TX, c->state);
 583
 584        if (params->ptp_rx)
 585                __set_bit(MLX5E_PTP_STATE_RX, c->state);
 586
 587        return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
 588}
 589
 590static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
 591{
 592        struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
 593
 594        if (!ptp_fs->valid)
 595                return;
 596
 597        mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
 598        mlx5e_fs_tt_redirect_any_destroy(priv);
 599
 600        mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
 601        mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
 602        mlx5e_fs_tt_redirect_udp_destroy(priv);
 603        ptp_fs->valid = false;
 604}
 605
 606static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
 607{
 608        struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
 609        struct mlx5_flow_handle *rule;
 610        u32 tirn = priv->ptp_tir.tirn;
 611        int err;
 612
 613        if (ptp_fs->valid)
 614                return 0;
 615
 616        err = mlx5e_fs_tt_redirect_udp_create(priv);
 617        if (err)
 618                goto out_free;
 619
 620        rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP,
 621                                                 tirn, PTP_EV_PORT);
 622        if (IS_ERR(rule)) {
 623                err = PTR_ERR(rule);
 624                goto out_destroy_fs_udp;
 625        }
 626        ptp_fs->udp_v4_rule = rule;
 627
 628        rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP,
 629                                                 tirn, PTP_EV_PORT);
 630        if (IS_ERR(rule)) {
 631                err = PTR_ERR(rule);
 632                goto out_destroy_udp_v4_rule;
 633        }
 634        ptp_fs->udp_v6_rule = rule;
 635
 636        err = mlx5e_fs_tt_redirect_any_create(priv);
 637        if (err)
 638                goto out_destroy_udp_v6_rule;
 639
 640        rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
 641        if (IS_ERR(rule)) {
 642                err = PTR_ERR(rule);
 643                goto out_destroy_fs_any;
 644        }
 645        ptp_fs->l2_rule = rule;
 646        ptp_fs->valid = true;
 647
 648        return 0;
 649
 650out_destroy_fs_any:
 651        mlx5e_fs_tt_redirect_any_destroy(priv);
 652out_destroy_udp_v6_rule:
 653        mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
 654out_destroy_udp_v4_rule:
 655        mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
 656out_destroy_fs_udp:
 657        mlx5e_fs_tt_redirect_udp_destroy(priv);
 658out_free:
 659        return err;
 660}
 661
 662int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
 663                   u8 lag_port, struct mlx5e_ptp **cp)
 664{
 665        struct net_device *netdev = priv->netdev;
 666        struct mlx5_core_dev *mdev = priv->mdev;
 667        struct mlx5e_ptp_params *cparams;
 668        struct mlx5e_ptp *c;
 669        int err;
 670
 671
 672        c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
 673        cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
 674        if (!c || !cparams)
 675                return -ENOMEM;
 676
 677        c->priv     = priv;
 678        c->mdev     = priv->mdev;
 679        c->tstamp   = &priv->tstamp;
 680        c->pdev     = mlx5_core_dma_dev(priv->mdev);
 681        c->netdev   = priv->netdev;
 682        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
 683        c->num_tc   = params->num_tc;
 684        c->stats    = &priv->ptp_stats.ch;
 685        c->lag_port = lag_port;
 686
 687        err = mlx5e_ptp_set_state(c, params);
 688        if (err)
 689                goto err_free;
 690
 691        netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
 692
 693        mlx5e_ptp_build_params(c, cparams, params);
 694
 695        err = mlx5e_ptp_open_queues(c, cparams);
 696        if (unlikely(err))
 697                goto err_napi_del;
 698
 699        if (test_bit(MLX5E_PTP_STATE_RX, c->state))
 700                priv->rx_ptp_opened = true;
 701
 702        *cp = c;
 703
 704        kvfree(cparams);
 705
 706        return 0;
 707
 708err_napi_del:
 709        netif_napi_del(&c->napi);
 710err_free:
 711        kvfree(cparams);
 712        kvfree(c);
 713        return err;
 714}
 715
 716void mlx5e_ptp_close(struct mlx5e_ptp *c)
 717{
 718        mlx5e_ptp_close_queues(c);
 719        netif_napi_del(&c->napi);
 720
 721        kvfree(c);
 722}
 723
 724void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
 725{
 726        int tc;
 727
 728        napi_enable(&c->napi);
 729
 730        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 731                for (tc = 0; tc < c->num_tc; tc++)
 732                        mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
 733        }
 734        if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
 735                mlx5e_ptp_rx_set_fs(c->priv);
 736                mlx5e_activate_rq(&c->rq);
 737        }
 738}
 739
 740void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
 741{
 742        int tc;
 743
 744        if (test_bit(MLX5E_PTP_STATE_RX, c->state))
 745                mlx5e_deactivate_rq(&c->rq);
 746
 747        if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
 748                for (tc = 0; tc < c->num_tc; tc++)
 749                        mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
 750        }
 751
 752        napi_disable(&c->napi);
 753}
 754
 755int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
 756{
 757        if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
 758                return -EINVAL;
 759
 760        *rqn = c->rq.rqn;
 761        return 0;
 762}
 763
 764int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
 765{
 766        struct mlx5e_ptp_fs *ptp_fs;
 767
 768        if (!priv->profile->rx_ptp_support)
 769                return 0;
 770
 771        ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
 772        if (!ptp_fs)
 773                return -ENOMEM;
 774
 775        priv->fs.ptp_fs = ptp_fs;
 776        return 0;
 777}
 778
 779void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
 780{
 781        struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
 782
 783        if (!priv->profile->rx_ptp_support)
 784                return;
 785
 786        mlx5e_ptp_rx_unset_fs(priv);
 787        kfree(ptp_fs);
 788}
 789
 790int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
 791{
 792        struct mlx5e_ptp *c = priv->channels.ptp;
 793
 794        if (!priv->profile->rx_ptp_support)
 795                return 0;
 796
 797        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
 798                return 0;
 799
 800        if (set) {
 801                if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
 802                        netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
 803                        return -EINVAL;
 804                }
 805                return mlx5e_ptp_rx_set_fs(priv);
 806        }
 807        /* set == false */
 808        if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
 809                netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
 810                return -EINVAL;
 811        }
 812        mlx5e_ptp_rx_unset_fs(priv);
 813        return 0;
 814}
 815