linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/ip.h>
  34#include <linux/ipv6.h>
  35#include <linux/tcp.h>
  36#include <net/ip6_checksum.h>
  37#include <net/page_pool.h>
  38#include <net/inet_ecn.h>
  39#include "en.h"
  40#include "en/txrx.h"
  41#include "en_tc.h"
  42#include "eswitch.h"
  43#include "en_rep.h"
  44#include "en/rep/tc.h"
  45#include "ipoib/ipoib.h"
  46#include "accel/ipsec.h"
  47#include "fpga/ipsec.h"
  48#include "en_accel/ipsec_rxtx.h"
  49#include "en_accel/tls_rxtx.h"
  50#include "en/xdp.h"
  51#include "en/xsk/rx.h"
  52#include "en/health.h"
  53#include "en/params.h"
  54#include "devlink.h"
  55#include "en/devlink.h"
  56
  57static struct sk_buff *
  58mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
  59                                u16 cqe_bcnt, u32 head_offset, u32 page_idx);
  60static struct sk_buff *
  61mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
  62                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
  63static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  64static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  65
  66const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
  67        .handle_rx_cqe       = mlx5e_handle_rx_cqe,
  68        .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
  69};
  70
  71static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
  72{
  73        return config->rx_filter == HWTSTAMP_FILTER_ALL;
  74}
  75
  76static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
  77                                       u32 cqcc, void *data)
  78{
  79        u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
  80
  81        memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
  82}
  83
  84static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
  85                                         struct mlx5_cqwq *wq,
  86                                         u32 cqcc)
  87{
  88        struct mlx5e_cq_decomp *cqd = &rq->cqd;
  89        struct mlx5_cqe64 *title = &cqd->title;
  90
  91        mlx5e_read_cqe_slot(wq, cqcc, title);
  92        cqd->left        = be32_to_cpu(title->byte_cnt);
  93        cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
  94        rq->stats->cqe_compress_blks++;
  95}
  96
  97static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
  98                                            struct mlx5e_cq_decomp *cqd,
  99                                            u32 cqcc)
 100{
 101        mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
 102        cqd->mini_arr_idx = 0;
 103}
 104
 105static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
 106{
 107        u32 cqcc   = wq->cc;
 108        u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
 109        u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
 110        u32 wq_sz  = mlx5_cqwq_get_size(wq);
 111        u32 ci_top = min_t(u32, wq_sz, ci + n);
 112
 113        for (; ci < ci_top; ci++, n--) {
 114                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
 115
 116                cqe->op_own = op_own;
 117        }
 118
 119        if (unlikely(ci == wq_sz)) {
 120                op_own = !op_own;
 121                for (ci = 0; ci < n; ci++) {
 122                        struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
 123
 124                        cqe->op_own = op_own;
 125                }
 126        }
 127}
 128
 129static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
 130                                        struct mlx5_cqwq *wq,
 131                                        u32 cqcc)
 132{
 133        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 134        struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
 135        struct mlx5_cqe64 *title = &cqd->title;
 136
 137        title->byte_cnt     = mini_cqe->byte_cnt;
 138        title->check_sum    = mini_cqe->checksum;
 139        title->op_own      &= 0xf0;
 140        title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
 141
 142        /* state bit set implies linked-list striding RQ wq type and
 143         * HW stride index capability supported
 144         */
 145        if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
 146                title->wqe_counter = mini_cqe->stridx;
 147                return;
 148        }
 149
 150        /* HW stride index capability not supported */
 151        title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
 152        if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
 153                cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
 154        else
 155                cqd->wqe_counter =
 156                        mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
 157}
 158
 159static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
 160                                                struct mlx5_cqwq *wq,
 161                                                u32 cqcc)
 162{
 163        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 164
 165        mlx5e_decompress_cqe(rq, wq, cqcc);
 166        cqd->title.rss_hash_type   = 0;
 167        cqd->title.rss_hash_result = 0;
 168}
 169
 170static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
 171                                             struct mlx5_cqwq *wq,
 172                                             int update_owner_only,
 173                                             int budget_rem)
 174{
 175        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 176        u32 cqcc = wq->cc + update_owner_only;
 177        u32 cqe_count;
 178        u32 i;
 179
 180        cqe_count = min_t(u32, cqd->left, budget_rem);
 181
 182        for (i = update_owner_only; i < cqe_count;
 183             i++, cqd->mini_arr_idx++, cqcc++) {
 184                if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
 185                        mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
 186
 187                mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
 188                INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
 189                                mlx5e_handle_rx_cqe, rq, &cqd->title);
 190        }
 191        mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
 192        wq->cc = cqcc;
 193        cqd->left -= cqe_count;
 194        rq->stats->cqe_compress_pkts += cqe_count;
 195
 196        return cqe_count;
 197}
 198
 199static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
 200                                              struct mlx5_cqwq *wq,
 201                                              int budget_rem)
 202{
 203        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 204        u32 cc = wq->cc;
 205
 206        mlx5e_read_title_slot(rq, wq, cc);
 207        mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
 208        mlx5e_decompress_cqe(rq, wq, cc);
 209        INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
 210                        mlx5e_handle_rx_cqe, rq, &cqd->title);
 211        cqd->mini_arr_idx++;
 212
 213        return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
 214}
 215
 216static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
 217                                      struct mlx5e_dma_info *dma_info)
 218{
 219        struct mlx5e_page_cache *cache = &rq->page_cache;
 220        u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
 221        struct mlx5e_rq_stats *stats = rq->stats;
 222
 223        if (tail_next == cache->head) {
 224                stats->cache_full++;
 225                return false;
 226        }
 227
 228        if (!dev_page_is_reusable(dma_info->page)) {
 229                stats->cache_waive++;
 230                return false;
 231        }
 232
 233        cache->page_cache[cache->tail] = *dma_info;
 234        cache->tail = tail_next;
 235        return true;
 236}
 237
 238static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
 239                                      struct mlx5e_dma_info *dma_info)
 240{
 241        struct mlx5e_page_cache *cache = &rq->page_cache;
 242        struct mlx5e_rq_stats *stats = rq->stats;
 243
 244        if (unlikely(cache->head == cache->tail)) {
 245                stats->cache_empty++;
 246                return false;
 247        }
 248
 249        if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
 250                stats->cache_busy++;
 251                return false;
 252        }
 253
 254        *dma_info = cache->page_cache[cache->head];
 255        cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
 256        stats->cache_reuse++;
 257
 258        dma_sync_single_for_device(rq->pdev, dma_info->addr,
 259                                   PAGE_SIZE,
 260                                   DMA_FROM_DEVICE);
 261        return true;
 262}
 263
 264static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
 265                                        struct mlx5e_dma_info *dma_info)
 266{
 267        if (mlx5e_rx_cache_get(rq, dma_info))
 268                return 0;
 269
 270        dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
 271        if (unlikely(!dma_info->page))
 272                return -ENOMEM;
 273
 274        dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
 275                                      PAGE_SIZE, rq->buff.map_dir);
 276        if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
 277                page_pool_recycle_direct(rq->page_pool, dma_info->page);
 278                dma_info->page = NULL;
 279                return -ENOMEM;
 280        }
 281
 282        return 0;
 283}
 284
 285static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
 286                                   struct mlx5e_dma_info *dma_info)
 287{
 288        if (rq->xsk_pool)
 289                return mlx5e_xsk_page_alloc_pool(rq, dma_info);
 290        else
 291                return mlx5e_page_alloc_pool(rq, dma_info);
 292}
 293
 294void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
 295{
 296        dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
 297}
 298
 299void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
 300                                struct mlx5e_dma_info *dma_info,
 301                                bool recycle)
 302{
 303        if (likely(recycle)) {
 304                if (mlx5e_rx_cache_put(rq, dma_info))
 305                        return;
 306
 307                mlx5e_page_dma_unmap(rq, dma_info);
 308                page_pool_recycle_direct(rq->page_pool, dma_info->page);
 309        } else {
 310                mlx5e_page_dma_unmap(rq, dma_info);
 311                page_pool_release_page(rq->page_pool, dma_info->page);
 312                put_page(dma_info->page);
 313        }
 314}
 315
 316static inline void mlx5e_page_release(struct mlx5e_rq *rq,
 317                                      struct mlx5e_dma_info *dma_info,
 318                                      bool recycle)
 319{
 320        if (rq->xsk_pool)
 321                /* The `recycle` parameter is ignored, and the page is always
 322                 * put into the Reuse Ring, because there is no way to return
 323                 * the page to the userspace when the interface goes down.
 324                 */
 325                xsk_buff_free(dma_info->xsk);
 326        else
 327                mlx5e_page_release_dynamic(rq, dma_info, recycle);
 328}
 329
 330static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
 331                                    struct mlx5e_wqe_frag_info *frag)
 332{
 333        int err = 0;
 334
 335        if (!frag->offset)
 336                /* On first frag (offset == 0), replenish page (dma_info actually).
 337                 * Other frags that point to the same dma_info (with a different
 338                 * offset) should just use the new one without replenishing again
 339                 * by themselves.
 340                 */
 341                err = mlx5e_page_alloc(rq, frag->di);
 342
 343        return err;
 344}
 345
 346static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
 347                                     struct mlx5e_wqe_frag_info *frag,
 348                                     bool recycle)
 349{
 350        if (frag->last_in_page)
 351                mlx5e_page_release(rq, frag->di, recycle);
 352}
 353
 354static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
 355{
 356        return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
 357}
 358
 359static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
 360                              u16 ix)
 361{
 362        struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
 363        int err;
 364        int i;
 365
 366        for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
 367                err = mlx5e_get_rx_frag(rq, frag);
 368                if (unlikely(err))
 369                        goto free_frags;
 370
 371                wqe->data[i].addr = cpu_to_be64(frag->di->addr +
 372                                                frag->offset + rq->buff.headroom);
 373        }
 374
 375        return 0;
 376
 377free_frags:
 378        while (--i >= 0)
 379                mlx5e_put_rx_frag(rq, --frag, true);
 380
 381        return err;
 382}
 383
 384static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
 385                                     struct mlx5e_wqe_frag_info *wi,
 386                                     bool recycle)
 387{
 388        int i;
 389
 390        for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
 391                mlx5e_put_rx_frag(rq, wi, recycle);
 392}
 393
 394static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
 395{
 396        struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
 397
 398        mlx5e_free_rx_wqe(rq, wi, false);
 399}
 400
 401static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
 402{
 403        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
 404        int err;
 405        int i;
 406
 407        if (rq->xsk_pool) {
 408                int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
 409
 410                /* Check in advance that we have enough frames, instead of
 411                 * allocating one-by-one, failing and moving frames to the
 412                 * Reuse Ring.
 413                 */
 414                if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
 415                        return -ENOMEM;
 416        }
 417
 418        for (i = 0; i < wqe_bulk; i++) {
 419                struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
 420
 421                err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
 422                if (unlikely(err))
 423                        goto free_wqes;
 424        }
 425
 426        return 0;
 427
 428free_wqes:
 429        while (--i >= 0)
 430                mlx5e_dealloc_rx_wqe(rq, ix + i);
 431
 432        return err;
 433}
 434
 435static inline void
 436mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
 437                   struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
 438                   unsigned int truesize)
 439{
 440        dma_sync_single_for_cpu(rq->pdev,
 441                                di->addr + frag_offset,
 442                                len, DMA_FROM_DEVICE);
 443        page_ref_inc(di->page);
 444        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 445                        di->page, frag_offset, len, truesize);
 446}
 447
 448static inline void
 449mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
 450                      struct mlx5e_dma_info *dma_info,
 451                      int offset_from, u32 headlen)
 452{
 453        const void *from = page_address(dma_info->page) + offset_from;
 454        /* Aligning len to sizeof(long) optimizes memcpy performance */
 455        unsigned int len = ALIGN(headlen, sizeof(long));
 456
 457        dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
 458                                DMA_FROM_DEVICE);
 459        skb_copy_to_linear_data(skb, from, len);
 460}
 461
 462static void
 463mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
 464{
 465        bool no_xdp_xmit;
 466        struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
 467        int i;
 468
 469        /* A common case for AF_XDP. */
 470        if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
 471                return;
 472
 473        no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
 474                                   MLX5_MPWRQ_PAGES_PER_WQE);
 475
 476        for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
 477                if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
 478                        mlx5e_page_release(rq, &dma_info[i], recycle);
 479}
 480
 481static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
 482{
 483        struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
 484
 485        do {
 486                u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
 487
 488                mlx5_wq_ll_push(wq, next_wqe_index);
 489        } while (--n);
 490
 491        /* ensure wqes are visible to device before updating doorbell record */
 492        dma_wmb();
 493
 494        mlx5_wq_ll_update_db_record(wq);
 495}
 496
 497static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 498{
 499        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
 500        struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
 501        struct mlx5e_icosq *sq = rq->icosq;
 502        struct mlx5_wq_cyc *wq = &sq->wq;
 503        struct mlx5e_umr_wqe *umr_wqe;
 504        u16 pi;
 505        int err;
 506        int i;
 507
 508        /* Check in advance that we have enough frames, instead of allocating
 509         * one-by-one, failing and moving frames to the Reuse Ring.
 510         */
 511        if (rq->xsk_pool &&
 512            unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
 513                err = -ENOMEM;
 514                goto err;
 515        }
 516
 517        pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
 518        umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
 519        memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
 520
 521        for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
 522                err = mlx5e_page_alloc(rq, dma_info);
 523                if (unlikely(err))
 524                        goto err_unmap;
 525                umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
 526        }
 527
 528        bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 529        wi->consumed_strides = 0;
 530
 531        umr_wqe->ctrl.opmod_idx_opcode =
 532                cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
 533                            MLX5_OPCODE_UMR);
 534        umr_wqe->uctrl.xlt_offset =
 535                cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
 536
 537        sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
 538                .wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
 539                .num_wqebbs = MLX5E_UMR_WQEBBS,
 540                .umr.rq     = rq,
 541        };
 542
 543        sq->pc += MLX5E_UMR_WQEBBS;
 544
 545        sq->doorbell_cseg = &umr_wqe->ctrl;
 546
 547        return 0;
 548
 549err_unmap:
 550        while (--i >= 0) {
 551                dma_info--;
 552                mlx5e_page_release(rq, dma_info, true);
 553        }
 554
 555err:
 556        rq->stats->buff_alloc_err++;
 557
 558        return err;
 559}
 560
 561static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 562{
 563        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
 564        /* Don't recycle, this function is called on rq/netdev close */
 565        mlx5e_free_rx_mpwqe(rq, wi, false);
 566}
 567
 568INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 569{
 570        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
 571        u8 wqe_bulk;
 572        int err;
 573
 574        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
 575                return false;
 576
 577        wqe_bulk = rq->wqe.info.wqe_bulk;
 578
 579        if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
 580                return false;
 581
 582        if (rq->page_pool)
 583                page_pool_nid_changed(rq->page_pool, numa_mem_id());
 584
 585        do {
 586                u16 head = mlx5_wq_cyc_get_head(wq);
 587
 588                err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
 589                if (unlikely(err)) {
 590                        rq->stats->buff_alloc_err++;
 591                        break;
 592                }
 593
 594                mlx5_wq_cyc_push_n(wq, wqe_bulk);
 595        } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
 596
 597        /* ensure wqes are visible to device before updating doorbell record */
 598        dma_wmb();
 599
 600        mlx5_wq_cyc_update_db_record(wq);
 601
 602        return !!err;
 603}
 604
 605void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
 606{
 607        u16 sqcc;
 608
 609        sqcc = sq->cc;
 610
 611        while (sqcc != sq->pc) {
 612                struct mlx5e_icosq_wqe_info *wi;
 613                u16 ci;
 614
 615                ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 616                wi = &sq->db.wqe_info[ci];
 617                sqcc += wi->num_wqebbs;
 618#ifdef CONFIG_MLX5_EN_TLS
 619                switch (wi->wqe_type) {
 620                case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
 621                        mlx5e_ktls_handle_ctx_completion(wi);
 622                        break;
 623                case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
 624                        mlx5e_ktls_handle_get_psv_completion(wi, sq);
 625                        break;
 626                }
 627#endif
 628        }
 629        sq->cc = sqcc;
 630}
 631
 632int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 633{
 634        struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
 635        struct mlx5_cqe64 *cqe;
 636        u16 sqcc;
 637        int i;
 638
 639        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
 640                return 0;
 641
 642        cqe = mlx5_cqwq_get_cqe(&cq->wq);
 643        if (likely(!cqe))
 644                return 0;
 645
 646        /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
 647         * otherwise a cq overrun may occur
 648         */
 649        sqcc = sq->cc;
 650
 651        i = 0;
 652        do {
 653                u16 wqe_counter;
 654                bool last_wqe;
 655
 656                mlx5_cqwq_pop(&cq->wq);
 657
 658                wqe_counter = be16_to_cpu(cqe->wqe_counter);
 659
 660                do {
 661                        struct mlx5e_icosq_wqe_info *wi;
 662                        u16 ci;
 663
 664                        last_wqe = (sqcc == wqe_counter);
 665
 666                        ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 667                        wi = &sq->db.wqe_info[ci];
 668                        sqcc += wi->num_wqebbs;
 669
 670                        if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
 671                                netdev_WARN_ONCE(cq->netdev,
 672                                                 "Bad OP in ICOSQ CQE: 0x%x\n",
 673                                                 get_cqe_opcode(cqe));
 674                                mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
 675                                                     (struct mlx5_err_cqe *)cqe);
 676                                mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
 677                                if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
 678                                        queue_work(cq->priv->wq, &sq->recover_work);
 679                                break;
 680                        }
 681
 682                        switch (wi->wqe_type) {
 683                        case MLX5E_ICOSQ_WQE_UMR_RX:
 684                                wi->umr.rq->mpwqe.umr_completed++;
 685                                break;
 686                        case MLX5E_ICOSQ_WQE_NOP:
 687                                break;
 688#ifdef CONFIG_MLX5_EN_TLS
 689                        case MLX5E_ICOSQ_WQE_UMR_TLS:
 690                                break;
 691                        case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
 692                                mlx5e_ktls_handle_ctx_completion(wi);
 693                                break;
 694                        case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
 695                                mlx5e_ktls_handle_get_psv_completion(wi, sq);
 696                                break;
 697#endif
 698                        default:
 699                                netdev_WARN_ONCE(cq->netdev,
 700                                                 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
 701                                                 wi->wqe_type);
 702                        }
 703                } while (!last_wqe);
 704        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 705
 706        sq->cc = sqcc;
 707
 708        mlx5_cqwq_update_db_record(&cq->wq);
 709
 710        return i;
 711}
 712
 713INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
 714{
 715        struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
 716        u8  umr_completed = rq->mpwqe.umr_completed;
 717        struct mlx5e_icosq *sq = rq->icosq;
 718        int alloc_err = 0;
 719        u8  missing, i;
 720        u16 head;
 721
 722        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
 723                return false;
 724
 725        if (umr_completed) {
 726                mlx5e_post_rx_mpwqe(rq, umr_completed);
 727                rq->mpwqe.umr_in_progress -= umr_completed;
 728                rq->mpwqe.umr_completed = 0;
 729        }
 730
 731        missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
 732
 733        if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
 734                rq->stats->congst_umr++;
 735
 736#define UMR_WQE_BULK (2)
 737        if (likely(missing < UMR_WQE_BULK))
 738                return false;
 739
 740        if (rq->page_pool)
 741                page_pool_nid_changed(rq->page_pool, numa_mem_id());
 742
 743        head = rq->mpwqe.actual_wq_head;
 744        i = missing;
 745        do {
 746                alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
 747
 748                if (unlikely(alloc_err))
 749                        break;
 750                head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
 751        } while (--i);
 752
 753        rq->mpwqe.umr_last_bulk    = missing - i;
 754        if (sq->doorbell_cseg) {
 755                mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
 756                sq->doorbell_cseg = NULL;
 757        }
 758
 759        rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
 760        rq->mpwqe.actual_wq_head   = head;
 761
 762        /* If XSK Fill Ring doesn't have enough frames, report the error, so
 763         * that one of the actions can be performed:
 764         * 1. If need_wakeup is used, signal that the application has to kick
 765         * the driver when it refills the Fill Ring.
 766         * 2. Otherwise, busy poll by rescheduling the NAPI poll.
 767         */
 768        if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
 769                return true;
 770
 771        return false;
 772}
 773
 774static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
 775{
 776        u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
 777        u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
 778                         (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
 779
 780        tcp->check                      = 0;
 781        tcp->psh                        = get_cqe_lro_tcppsh(cqe);
 782
 783        if (tcp_ack) {
 784                tcp->ack                = 1;
 785                tcp->ack_seq            = cqe->lro_ack_seq_num;
 786                tcp->window             = cqe->lro_tcp_win;
 787        }
 788}
 789
 790static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
 791                                 u32 cqe_bcnt)
 792{
 793        struct ethhdr   *eth = (struct ethhdr *)(skb->data);
 794        struct tcphdr   *tcp;
 795        int network_depth = 0;
 796        __wsum check;
 797        __be16 proto;
 798        u16 tot_len;
 799        void *ip_p;
 800
 801        proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
 802
 803        tot_len = cqe_bcnt - network_depth;
 804        ip_p = skb->data + network_depth;
 805
 806        if (proto == htons(ETH_P_IP)) {
 807                struct iphdr *ipv4 = ip_p;
 808
 809                tcp = ip_p + sizeof(struct iphdr);
 810                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 811
 812                ipv4->ttl               = cqe->lro_min_ttl;
 813                ipv4->tot_len           = cpu_to_be16(tot_len);
 814                ipv4->check             = 0;
 815                ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
 816                                                       ipv4->ihl);
 817
 818                mlx5e_lro_update_tcp_hdr(cqe, tcp);
 819                check = csum_partial(tcp, tcp->doff * 4,
 820                                     csum_unfold((__force __sum16)cqe->check_sum));
 821                /* Almost done, don't forget the pseudo header */
 822                tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
 823                                               tot_len - sizeof(struct iphdr),
 824                                               IPPROTO_TCP, check);
 825        } else {
 826                u16 payload_len = tot_len - sizeof(struct ipv6hdr);
 827                struct ipv6hdr *ipv6 = ip_p;
 828
 829                tcp = ip_p + sizeof(struct ipv6hdr);
 830                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 831
 832                ipv6->hop_limit         = cqe->lro_min_ttl;
 833                ipv6->payload_len       = cpu_to_be16(payload_len);
 834
 835                mlx5e_lro_update_tcp_hdr(cqe, tcp);
 836                check = csum_partial(tcp, tcp->doff * 4,
 837                                     csum_unfold((__force __sum16)cqe->check_sum));
 838                /* Almost done, don't forget the pseudo header */
 839                tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
 840                                             IPPROTO_TCP, check);
 841        }
 842}
 843
 844static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
 845                                      struct sk_buff *skb)
 846{
 847        u8 cht = cqe->rss_hash_type;
 848        int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
 849                 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
 850                                            PKT_HASH_TYPE_NONE;
 851        skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 852}
 853
 854static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
 855                                        __be16 *proto)
 856{
 857        *proto = ((struct ethhdr *)skb->data)->h_proto;
 858        *proto = __vlan_get_protocol(skb, *proto, network_depth);
 859
 860        if (*proto == htons(ETH_P_IP))
 861                return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
 862
 863        if (*proto == htons(ETH_P_IPV6))
 864                return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
 865
 866        return false;
 867}
 868
 869static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
 870{
 871        int network_depth = 0;
 872        __be16 proto;
 873        void *ip;
 874        int rc;
 875
 876        if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
 877                return;
 878
 879        ip = skb->data + network_depth;
 880        rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
 881                                         IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
 882
 883        rq->stats->ecn_mark += !!rc;
 884}
 885
 886static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 887{
 888        void *ip_p = skb->data + network_depth;
 889
 890        return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
 891                                            ((struct ipv6hdr *)ip_p)->nexthdr;
 892}
 893
 894#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 895
 896#define MAX_PADDING 8
 897
 898static void
 899tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
 900                       struct mlx5e_rq_stats *stats)
 901{
 902        stats->csum_complete_tail_slow++;
 903        skb->csum = csum_block_add(skb->csum,
 904                                   skb_checksum(skb, offset, len, 0),
 905                                   offset);
 906}
 907
 908static void
 909tail_padding_csum(struct sk_buff *skb, int offset,
 910                  struct mlx5e_rq_stats *stats)
 911{
 912        u8 tail_padding[MAX_PADDING];
 913        int len = skb->len - offset;
 914        void *tail;
 915
 916        if (unlikely(len > MAX_PADDING)) {
 917                tail_padding_csum_slow(skb, offset, len, stats);
 918                return;
 919        }
 920
 921        tail = skb_header_pointer(skb, offset, len, tail_padding);
 922        if (unlikely(!tail)) {
 923                tail_padding_csum_slow(skb, offset, len, stats);
 924                return;
 925        }
 926
 927        stats->csum_complete_tail++;
 928        skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
 929}
 930
 931static void
 932mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
 933                     struct mlx5e_rq_stats *stats)
 934{
 935        struct ipv6hdr *ip6;
 936        struct iphdr   *ip4;
 937        int pkt_len;
 938
 939        /* Fixup vlan headers, if any */
 940        if (network_depth > ETH_HLEN)
 941                /* CQE csum is calculated from the IP header and does
 942                 * not cover VLAN headers (if present). This will add
 943                 * the checksum manually.
 944                 */
 945                skb->csum = csum_partial(skb->data + ETH_HLEN,
 946                                         network_depth - ETH_HLEN,
 947                                         skb->csum);
 948
 949        /* Fixup tail padding, if any */
 950        switch (proto) {
 951        case htons(ETH_P_IP):
 952                ip4 = (struct iphdr *)(skb->data + network_depth);
 953                pkt_len = network_depth + ntohs(ip4->tot_len);
 954                break;
 955        case htons(ETH_P_IPV6):
 956                ip6 = (struct ipv6hdr *)(skb->data + network_depth);
 957                pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
 958                break;
 959        default:
 960                return;
 961        }
 962
 963        if (likely(pkt_len >= skb->len))
 964                return;
 965
 966        tail_padding_csum(skb, pkt_len, stats);
 967}
 968
 969static inline void mlx5e_handle_csum(struct net_device *netdev,
 970                                     struct mlx5_cqe64 *cqe,
 971                                     struct mlx5e_rq *rq,
 972                                     struct sk_buff *skb,
 973                                     bool   lro)
 974{
 975        struct mlx5e_rq_stats *stats = rq->stats;
 976        int network_depth = 0;
 977        __be16 proto;
 978
 979        if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
 980                goto csum_none;
 981
 982        if (lro) {
 983                skb->ip_summed = CHECKSUM_UNNECESSARY;
 984                stats->csum_unnecessary++;
 985                return;
 986        }
 987
 988        /* True when explicitly set via priv flag, or XDP prog is loaded */
 989        if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
 990                goto csum_unnecessary;
 991
 992        /* CQE csum doesn't cover padding octets in short ethernet
 993         * frames. And the pad field is appended prior to calculating
 994         * and appending the FCS field.
 995         *
 996         * Detecting these padded frames requires to verify and parse
 997         * IP headers, so we simply force all those small frames to be
 998         * CHECKSUM_UNNECESSARY even if they are not padded.
 999         */
1000        if (short_frame(skb->len))
1001                goto csum_unnecessary;
1002
1003        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1004                u8 ipproto = get_ip_proto(skb, network_depth, proto);
1005
1006                if (unlikely(ipproto == IPPROTO_SCTP))
1007                        goto csum_unnecessary;
1008
1009                if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1010                        goto csum_none;
1011
1012                stats->csum_complete++;
1013                skb->ip_summed = CHECKSUM_COMPLETE;
1014                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1015
1016                if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1017                        return; /* CQE csum covers all received bytes */
1018
1019                /* csum might need some fixups ...*/
1020                mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1021                return;
1022        }
1023
1024csum_unnecessary:
1025        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1026                   (cqe->hds_ip_ext & CQE_L4_OK))) {
1027                skb->ip_summed = CHECKSUM_UNNECESSARY;
1028                if (cqe_is_tunneled(cqe)) {
1029                        skb->csum_level = 1;
1030                        skb->encapsulation = 1;
1031                        stats->csum_unnecessary_inner++;
1032                        return;
1033                }
1034                stats->csum_unnecessary++;
1035                return;
1036        }
1037csum_none:
1038        skb->ip_summed = CHECKSUM_NONE;
1039        stats->csum_none++;
1040}
1041
1042#define MLX5E_CE_BIT_MASK 0x80
1043
1044static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1045                                      u32 cqe_bcnt,
1046                                      struct mlx5e_rq *rq,
1047                                      struct sk_buff *skb)
1048{
1049        u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1050        struct mlx5e_rq_stats *stats = rq->stats;
1051        struct net_device *netdev = rq->netdev;
1052
1053        skb->mac_len = ETH_HLEN;
1054
1055        mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1056
1057        if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1058                mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
1059
1060        if (lro_num_seg > 1) {
1061                mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1062                skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
1063                /* Subtract one since we already counted this as one
1064                 * "regular" packet in mlx5e_complete_rx_cqe()
1065                 */
1066                stats->packets += lro_num_seg - 1;
1067                stats->lro_packets++;
1068                stats->lro_bytes += cqe_bcnt;
1069        }
1070
1071        if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1072                skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1073                                                                  rq->clock, get_cqe_ts(cqe));
1074        skb_record_rx_queue(skb, rq->ix);
1075
1076        if (likely(netdev->features & NETIF_F_RXHASH))
1077                mlx5e_skb_set_hash(cqe, skb);
1078
1079        if (cqe_has_vlan(cqe)) {
1080                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1081                                       be16_to_cpu(cqe->vlan_info));
1082                stats->removed_vlan_packets++;
1083        }
1084
1085        skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1086
1087        mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1088        /* checking CE bit in cqe - MSB in ml_path field */
1089        if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1090                mlx5e_enable_ecn(rq, skb);
1091
1092        skb->protocol = eth_type_trans(skb, netdev);
1093
1094        if (unlikely(mlx5e_skb_is_multicast(skb)))
1095                stats->mcast_packets++;
1096}
1097
1098static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1099                                         struct mlx5_cqe64 *cqe,
1100                                         u32 cqe_bcnt,
1101                                         struct sk_buff *skb)
1102{
1103        struct mlx5e_rq_stats *stats = rq->stats;
1104
1105        stats->packets++;
1106        stats->bytes += cqe_bcnt;
1107        mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1108}
1109
1110static inline
1111struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1112                                       u32 frag_size, u16 headroom,
1113                                       u32 cqe_bcnt)
1114{
1115        struct sk_buff *skb = build_skb(va, frag_size);
1116
1117        if (unlikely(!skb)) {
1118                rq->stats->buff_alloc_err++;
1119                return NULL;
1120        }
1121
1122        skb_reserve(skb, headroom);
1123        skb_put(skb, cqe_bcnt);
1124
1125        return skb;
1126}
1127
1128static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
1129                                u32 len, struct xdp_buff *xdp)
1130{
1131        xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
1132        xdp_prepare_buff(xdp, va, headroom, len, false);
1133}
1134
1135static struct sk_buff *
1136mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1137                          struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1138{
1139        struct mlx5e_dma_info *di = wi->di;
1140        u16 rx_headroom = rq->buff.headroom;
1141        struct xdp_buff xdp;
1142        struct sk_buff *skb;
1143        void *va, *data;
1144        u32 frag_size;
1145
1146        va             = page_address(di->page) + wi->offset;
1147        data           = va + rx_headroom;
1148        frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1149
1150        dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
1151                                      frag_size, DMA_FROM_DEVICE);
1152        net_prefetchw(va); /* xdp_frame data area */
1153        net_prefetch(data);
1154
1155        mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
1156        if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
1157                return NULL; /* page/packet was consumed by XDP */
1158
1159        rx_headroom = xdp.data - xdp.data_hard_start;
1160        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1161        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
1162        if (unlikely(!skb))
1163                return NULL;
1164
1165        /* queue up for recycling/reuse */
1166        page_ref_inc(di->page);
1167
1168        return skb;
1169}
1170
1171static struct sk_buff *
1172mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1173                             struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1174{
1175        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1176        struct mlx5e_wqe_frag_info *head_wi = wi;
1177        u16 headlen      = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1178        u16 frag_headlen = headlen;
1179        u16 byte_cnt     = cqe_bcnt - headlen;
1180        struct sk_buff *skb;
1181
1182        /* XDP is not supported in this configuration, as incoming packets
1183         * might spread among multiple pages.
1184         */
1185        skb = napi_alloc_skb(rq->cq.napi,
1186                             ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1187        if (unlikely(!skb)) {
1188                rq->stats->buff_alloc_err++;
1189                return NULL;
1190        }
1191
1192        net_prefetchw(skb->data);
1193
1194        while (byte_cnt) {
1195                u16 frag_consumed_bytes =
1196                        min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
1197
1198                mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
1199                                   frag_consumed_bytes, frag_info->frag_stride);
1200                byte_cnt -= frag_consumed_bytes;
1201                frag_headlen = 0;
1202                frag_info++;
1203                wi++;
1204        }
1205
1206        /* copy header */
1207        mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
1208        /* skb linear part was allocated with headlen and aligned to long */
1209        skb->tail += headlen;
1210        skb->len  += headlen;
1211
1212        return skb;
1213}
1214
1215static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1216{
1217        struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1218        struct mlx5e_priv *priv = rq->priv;
1219
1220        if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1221            !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1222                mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1223                queue_work(priv->wq, &rq->recover_work);
1224        }
1225}
1226
1227static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1228{
1229        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1230        struct mlx5e_wqe_frag_info *wi;
1231        struct sk_buff *skb;
1232        u32 cqe_bcnt;
1233        u16 ci;
1234
1235        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1236        wi       = get_frag(rq, ci);
1237        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1238
1239        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1240                trigger_report(rq, cqe);
1241                rq->stats->wqe_err++;
1242                goto free_wqe;
1243        }
1244
1245        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1246                              mlx5e_skb_from_cqe_linear,
1247                              mlx5e_skb_from_cqe_nonlinear,
1248                              rq, cqe, wi, cqe_bcnt);
1249        if (!skb) {
1250                /* probably for XDP */
1251                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1252                        /* do not return page to cache,
1253                         * it will be returned on XDP_TX completion.
1254                         */
1255                        goto wq_cyc_pop;
1256                }
1257                goto free_wqe;
1258        }
1259
1260        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1261
1262        if (mlx5e_cqe_regb_chain(cqe))
1263                if (!mlx5e_tc_update_skb(cqe, skb)) {
1264                        dev_kfree_skb_any(skb);
1265                        goto free_wqe;
1266                }
1267
1268        napi_gro_receive(rq->cq.napi, skb);
1269
1270free_wqe:
1271        mlx5e_free_rx_wqe(rq, wi, true);
1272wq_cyc_pop:
1273        mlx5_wq_cyc_pop(wq);
1274}
1275
1276#ifdef CONFIG_MLX5_ESWITCH
1277static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1278{
1279        struct net_device *netdev = rq->netdev;
1280        struct mlx5e_priv *priv = netdev_priv(netdev);
1281        struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1282        struct mlx5_eswitch_rep *rep = rpriv->rep;
1283        struct mlx5e_tc_update_priv tc_priv = {};
1284        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1285        struct mlx5e_wqe_frag_info *wi;
1286        struct sk_buff *skb;
1287        u32 cqe_bcnt;
1288        u16 ci;
1289
1290        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1291        wi       = get_frag(rq, ci);
1292        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1293
1294        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1295                rq->stats->wqe_err++;
1296                goto free_wqe;
1297        }
1298
1299        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1300                              mlx5e_skb_from_cqe_linear,
1301                              mlx5e_skb_from_cqe_nonlinear,
1302                              rq, cqe, wi, cqe_bcnt);
1303        if (!skb) {
1304                /* probably for XDP */
1305                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1306                        /* do not return page to cache,
1307                         * it will be returned on XDP_TX completion.
1308                         */
1309                        goto wq_cyc_pop;
1310                }
1311                goto free_wqe;
1312        }
1313
1314        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1315
1316        if (rep->vlan && skb_vlan_tag_present(skb))
1317                skb_vlan_pop(skb);
1318
1319        if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
1320                     !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
1321                dev_kfree_skb_any(skb);
1322                goto free_wqe;
1323        }
1324
1325        napi_gro_receive(rq->cq.napi, skb);
1326
1327        mlx5_rep_tc_post_napi_receive(&tc_priv);
1328
1329free_wqe:
1330        mlx5e_free_rx_wqe(rq, wi, true);
1331wq_cyc_pop:
1332        mlx5_wq_cyc_pop(wq);
1333}
1334
1335static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1336{
1337        u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1338        u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1339        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1340        u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1341        u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1342        u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
1343        u32 page_idx       = wqe_offset >> PAGE_SHIFT;
1344        struct mlx5e_tc_update_priv tc_priv = {};
1345        struct mlx5e_rx_wqe_ll *wqe;
1346        struct mlx5_wq_ll *wq;
1347        struct sk_buff *skb;
1348        u16 cqe_bcnt;
1349
1350        wi->consumed_strides += cstrides;
1351
1352        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1353                trigger_report(rq, cqe);
1354                rq->stats->wqe_err++;
1355                goto mpwrq_cqe_out;
1356        }
1357
1358        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1359                struct mlx5e_rq_stats *stats = rq->stats;
1360
1361                stats->mpwqe_filler_cqes++;
1362                stats->mpwqe_filler_strides += cstrides;
1363                goto mpwrq_cqe_out;
1364        }
1365
1366        cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1367
1368        skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1369                              mlx5e_skb_from_cqe_mpwrq_linear,
1370                              mlx5e_skb_from_cqe_mpwrq_nonlinear,
1371                              rq, wi, cqe_bcnt, head_offset, page_idx);
1372        if (!skb)
1373                goto mpwrq_cqe_out;
1374
1375        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1376
1377        if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
1378                     !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
1379                dev_kfree_skb_any(skb);
1380                goto mpwrq_cqe_out;
1381        }
1382
1383        napi_gro_receive(rq->cq.napi, skb);
1384
1385        mlx5_rep_tc_post_napi_receive(&tc_priv);
1386
1387mpwrq_cqe_out:
1388        if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1389                return;
1390
1391        wq  = &rq->mpwqe.wq;
1392        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1393        mlx5e_free_rx_mpwqe(rq, wi, true);
1394        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1395}
1396
1397const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1398        .handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1399        .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1400};
1401#endif
1402
1403static struct sk_buff *
1404mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1405                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1406{
1407        u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1408        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1409        u32 frag_offset    = head_offset + headlen;
1410        u32 byte_cnt       = cqe_bcnt - headlen;
1411        struct mlx5e_dma_info *head_di = di;
1412        struct sk_buff *skb;
1413
1414        skb = napi_alloc_skb(rq->cq.napi,
1415                             ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1416        if (unlikely(!skb)) {
1417                rq->stats->buff_alloc_err++;
1418                return NULL;
1419        }
1420
1421        net_prefetchw(skb->data);
1422
1423        if (unlikely(frag_offset >= PAGE_SIZE)) {
1424                di++;
1425                frag_offset -= PAGE_SIZE;
1426        }
1427
1428        while (byte_cnt) {
1429                u32 pg_consumed_bytes =
1430                        min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1431                unsigned int truesize =
1432                        ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1433
1434                mlx5e_add_skb_frag(rq, skb, di, frag_offset,
1435                                   pg_consumed_bytes, truesize);
1436                byte_cnt -= pg_consumed_bytes;
1437                frag_offset = 0;
1438                di++;
1439        }
1440        /* copy header */
1441        mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
1442        /* skb linear part was allocated with headlen and aligned to long */
1443        skb->tail += headlen;
1444        skb->len  += headlen;
1445
1446        return skb;
1447}
1448
1449static struct sk_buff *
1450mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1451                                u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1452{
1453        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1454        u16 rx_headroom = rq->buff.headroom;
1455        u32 cqe_bcnt32 = cqe_bcnt;
1456        struct xdp_buff xdp;
1457        struct sk_buff *skb;
1458        void *va, *data;
1459        u32 frag_size;
1460
1461        /* Check packet size. Note LRO doesn't use linear SKB */
1462        if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1463                rq->stats->oversize_pkts_sw_drop++;
1464                return NULL;
1465        }
1466
1467        va             = page_address(di->page) + head_offset;
1468        data           = va + rx_headroom;
1469        frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1470
1471        dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1472                                      frag_size, DMA_FROM_DEVICE);
1473        net_prefetchw(va); /* xdp_frame data area */
1474        net_prefetch(data);
1475
1476        mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
1477        if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
1478                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1479                        __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
1480                return NULL; /* page/packet was consumed by XDP */
1481        }
1482
1483        rx_headroom = xdp.data - xdp.data_hard_start;
1484        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1485        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
1486        if (unlikely(!skb))
1487                return NULL;
1488
1489        /* queue up for recycling/reuse */
1490        page_ref_inc(di->page);
1491
1492        return skb;
1493}
1494
1495static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1496{
1497        u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1498        u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1499        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1500        u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1501        u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1502        u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
1503        u32 page_idx       = wqe_offset >> PAGE_SHIFT;
1504        struct mlx5e_rx_wqe_ll *wqe;
1505        struct mlx5_wq_ll *wq;
1506        struct sk_buff *skb;
1507        u16 cqe_bcnt;
1508
1509        wi->consumed_strides += cstrides;
1510
1511        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1512                trigger_report(rq, cqe);
1513                rq->stats->wqe_err++;
1514                goto mpwrq_cqe_out;
1515        }
1516
1517        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1518                struct mlx5e_rq_stats *stats = rq->stats;
1519
1520                stats->mpwqe_filler_cqes++;
1521                stats->mpwqe_filler_strides += cstrides;
1522                goto mpwrq_cqe_out;
1523        }
1524
1525        cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1526
1527        skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1528                              mlx5e_skb_from_cqe_mpwrq_linear,
1529                              mlx5e_skb_from_cqe_mpwrq_nonlinear,
1530                              rq, wi, cqe_bcnt, head_offset, page_idx);
1531        if (!skb)
1532                goto mpwrq_cqe_out;
1533
1534        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1535
1536        if (mlx5e_cqe_regb_chain(cqe))
1537                if (!mlx5e_tc_update_skb(cqe, skb)) {
1538                        dev_kfree_skb_any(skb);
1539                        goto mpwrq_cqe_out;
1540                }
1541
1542        napi_gro_receive(rq->cq.napi, skb);
1543
1544mpwrq_cqe_out:
1545        if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1546                return;
1547
1548        wq  = &rq->mpwqe.wq;
1549        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1550        mlx5e_free_rx_mpwqe(rq, wi, true);
1551        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1552}
1553
1554int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1555{
1556        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1557        struct mlx5_cqwq *cqwq = &cq->wq;
1558        struct mlx5_cqe64 *cqe;
1559        int work_done = 0;
1560
1561        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1562                return 0;
1563
1564        if (rq->cqd.left) {
1565                work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
1566                if (work_done >= budget)
1567                        goto out;
1568        }
1569
1570        cqe = mlx5_cqwq_get_cqe(cqwq);
1571        if (!cqe) {
1572                if (unlikely(work_done))
1573                        goto out;
1574                return 0;
1575        }
1576
1577        do {
1578                if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1579                        work_done +=
1580                                mlx5e_decompress_cqes_start(rq, cqwq,
1581                                                            budget - work_done);
1582                        continue;
1583                }
1584
1585                mlx5_cqwq_pop(cqwq);
1586
1587                INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
1588                                mlx5e_handle_rx_cqe, rq, cqe);
1589        } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
1590
1591out:
1592        if (rcu_access_pointer(rq->xdp_prog))
1593                mlx5e_xdp_rx_poll_complete(rq);
1594
1595        mlx5_cqwq_update_db_record(cqwq);
1596
1597        /* ensure cq space is freed before enabling more cqes */
1598        wmb();
1599
1600        return work_done;
1601}
1602
1603#ifdef CONFIG_MLX5_CORE_IPOIB
1604
1605#define MLX5_IB_GRH_SGID_OFFSET 8
1606#define MLX5_IB_GRH_DGID_OFFSET 24
1607#define MLX5_GID_SIZE           16
1608
1609static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1610                                         struct mlx5_cqe64 *cqe,
1611                                         u32 cqe_bcnt,
1612                                         struct sk_buff *skb)
1613{
1614        struct hwtstamp_config *tstamp;
1615        struct mlx5e_rq_stats *stats;
1616        struct net_device *netdev;
1617        struct mlx5e_priv *priv;
1618        char *pseudo_header;
1619        u32 flags_rqpn;
1620        u32 qpn;
1621        u8 *dgid;
1622        u8 g;
1623
1624        qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1625        netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1626
1627        /* No mapping present, cannot process SKB. This might happen if a child
1628         * interface is going down while having unprocessed CQEs on parent RQ
1629         */
1630        if (unlikely(!netdev)) {
1631                /* TODO: add drop counters support */
1632                skb->dev = NULL;
1633                pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1634                return;
1635        }
1636
1637        priv = mlx5i_epriv(netdev);
1638        tstamp = &priv->tstamp;
1639        stats = &priv->channel_stats[rq->ix].rq;
1640
1641        flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
1642        g = (flags_rqpn >> 28) & 3;
1643        dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1644        if ((!g) || dgid[0] != 0xff)
1645                skb->pkt_type = PACKET_HOST;
1646        else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1647                skb->pkt_type = PACKET_BROADCAST;
1648        else
1649                skb->pkt_type = PACKET_MULTICAST;
1650
1651        /* Drop packets that this interface sent, ie multicast packets
1652         * that the HCA has replicated.
1653         */
1654        if (g && (qpn == (flags_rqpn & 0xffffff)) &&
1655            (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
1656                    MLX5_GID_SIZE) == 0)) {
1657                skb->dev = NULL;
1658                return;
1659        }
1660
1661        skb_pull(skb, MLX5_IB_GRH_BYTES);
1662
1663        skb->protocol = *((__be16 *)(skb->data));
1664
1665        if (netdev->features & NETIF_F_RXCSUM) {
1666                skb->ip_summed = CHECKSUM_COMPLETE;
1667                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1668                stats->csum_complete++;
1669        } else {
1670                skb->ip_summed = CHECKSUM_NONE;
1671                stats->csum_none++;
1672        }
1673
1674        if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
1675                skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1676                                                                  rq->clock, get_cqe_ts(cqe));
1677        skb_record_rx_queue(skb, rq->ix);
1678
1679        if (likely(netdev->features & NETIF_F_RXHASH))
1680                mlx5e_skb_set_hash(cqe, skb);
1681
1682        /* 20 bytes of ipoib header and 4 for encap existing */
1683        pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1684        memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1685        skb_reset_mac_header(skb);
1686        skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1687
1688        skb->dev = netdev;
1689
1690        stats->packets++;
1691        stats->bytes += cqe_bcnt;
1692}
1693
1694static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1695{
1696        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1697        struct mlx5e_wqe_frag_info *wi;
1698        struct sk_buff *skb;
1699        u32 cqe_bcnt;
1700        u16 ci;
1701
1702        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1703        wi       = get_frag(rq, ci);
1704        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1705
1706        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1707                rq->stats->wqe_err++;
1708                goto wq_free_wqe;
1709        }
1710
1711        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1712                              mlx5e_skb_from_cqe_linear,
1713                              mlx5e_skb_from_cqe_nonlinear,
1714                              rq, cqe, wi, cqe_bcnt);
1715        if (!skb)
1716                goto wq_free_wqe;
1717
1718        mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1719        if (unlikely(!skb->dev)) {
1720                dev_kfree_skb_any(skb);
1721                goto wq_free_wqe;
1722        }
1723        napi_gro_receive(rq->cq.napi, skb);
1724
1725wq_free_wqe:
1726        mlx5e_free_rx_wqe(rq, wi, true);
1727        mlx5_wq_cyc_pop(wq);
1728}
1729
1730const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
1731        .handle_rx_cqe       = mlx5i_handle_rx_cqe,
1732        .handle_rx_cqe_mpwqe = NULL, /* Not supported */
1733};
1734#endif /* CONFIG_MLX5_CORE_IPOIB */
1735
1736#ifdef CONFIG_MLX5_EN_IPSEC
1737
1738static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1739{
1740        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1741        struct mlx5e_wqe_frag_info *wi;
1742        struct sk_buff *skb;
1743        u32 cqe_bcnt;
1744        u16 ci;
1745
1746        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1747        wi       = get_frag(rq, ci);
1748        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1749
1750        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1751                rq->stats->wqe_err++;
1752                goto wq_free_wqe;
1753        }
1754
1755        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1756                              mlx5e_skb_from_cqe_linear,
1757                              mlx5e_skb_from_cqe_nonlinear,
1758                              rq, cqe, wi, cqe_bcnt);
1759        if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
1760                goto wq_free_wqe;
1761
1762        skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
1763        if (unlikely(!skb))
1764                goto wq_free_wqe;
1765
1766        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1767        napi_gro_receive(rq->cq.napi, skb);
1768
1769wq_free_wqe:
1770        mlx5e_free_rx_wqe(rq, wi, true);
1771        mlx5_wq_cyc_pop(wq);
1772}
1773
1774#endif /* CONFIG_MLX5_EN_IPSEC */
1775
1776int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
1777{
1778        struct net_device *netdev = rq->netdev;
1779        struct mlx5_core_dev *mdev = rq->mdev;
1780        struct mlx5e_priv *priv = rq->priv;
1781
1782        switch (rq->wq_type) {
1783        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1784                rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
1785                        mlx5e_xsk_skb_from_cqe_mpwrq_linear :
1786                        mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
1787                                mlx5e_skb_from_cqe_mpwrq_linear :
1788                                mlx5e_skb_from_cqe_mpwrq_nonlinear;
1789                rq->post_wqes = mlx5e_post_rx_mpwqes;
1790                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
1791
1792                rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
1793                if (mlx5_fpga_is_ipsec_device(mdev)) {
1794                        netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
1795                        return -EINVAL;
1796                }
1797                if (!rq->handle_rx_cqe) {
1798                        netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
1799                        return -EINVAL;
1800                }
1801                break;
1802        default: /* MLX5_WQ_TYPE_CYCLIC */
1803                rq->wqe.skb_from_cqe = xsk ?
1804                        mlx5e_xsk_skb_from_cqe_linear :
1805                        mlx5e_rx_is_linear_skb(params, NULL) ?
1806                                mlx5e_skb_from_cqe_linear :
1807                                mlx5e_skb_from_cqe_nonlinear;
1808                rq->post_wqes = mlx5e_post_rx_wqes;
1809                rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
1810
1811#ifdef CONFIG_MLX5_EN_IPSEC
1812                if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
1813                    priv->ipsec)
1814                        rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
1815                else
1816#endif
1817                        rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
1818                if (!rq->handle_rx_cqe) {
1819                        netdev_err(netdev, "RX handler of RQ is not set\n");
1820                        return -EINVAL;
1821                }
1822        }
1823
1824        return 0;
1825}
1826
1827static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1828{
1829        struct mlx5e_priv *priv = netdev_priv(rq->netdev);
1830        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1831        struct mlx5e_wqe_frag_info *wi;
1832        struct devlink_port *dl_port;
1833        struct sk_buff *skb;
1834        u32 cqe_bcnt;
1835        u16 trap_id;
1836        u16 ci;
1837
1838        trap_id  = get_cqe_flow_tag(cqe);
1839        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1840        wi       = get_frag(rq, ci);
1841        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1842
1843        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1844                rq->stats->wqe_err++;
1845                goto free_wqe;
1846        }
1847
1848        skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
1849        if (!skb)
1850                goto free_wqe;
1851
1852        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1853        skb_push(skb, ETH_HLEN);
1854
1855        dl_port = mlx5e_devlink_get_dl_port(priv);
1856        mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port);
1857        dev_kfree_skb_any(skb);
1858
1859free_wqe:
1860        mlx5e_free_rx_wqe(rq, wi, false);
1861        mlx5_wq_cyc_pop(wq);
1862}
1863
1864void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
1865{
1866        rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
1867                               mlx5e_skb_from_cqe_linear :
1868                               mlx5e_skb_from_cqe_nonlinear;
1869        rq->post_wqes = mlx5e_post_rx_wqes;
1870        rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
1871        rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
1872}
1873