linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* Copyright (c) 2019 Mellanox Technologies. */
   3
   4#include "dr_types.h"
   5
   6#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
   7#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
   8
   9struct mlx5dr_icm_pool {
  10        enum mlx5dr_icm_type icm_type;
  11        enum mlx5dr_icm_chunk_size max_log_chunk_sz;
  12        struct mlx5dr_domain *dmn;
  13        /* memory management */
  14        struct mutex mutex; /* protect the ICM pool and ICM buddy */
  15        struct list_head buddy_mem_list;
  16        u64 hot_memory_size;
  17};
  18
  19struct mlx5dr_icm_dm {
  20        u32 obj_id;
  21        enum mlx5_sw_icm_type type;
  22        phys_addr_t addr;
  23        size_t length;
  24};
  25
  26struct mlx5dr_icm_mr {
  27        struct mlx5_core_mkey mkey;
  28        struct mlx5dr_icm_dm dm;
  29        struct mlx5dr_domain *dmn;
  30        size_t length;
  31        u64 icm_start_addr;
  32};
  33
  34static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
  35                                 u32 pd, u64 length, u64 start_addr, int mode,
  36                                 struct mlx5_core_mkey *mkey)
  37{
  38        u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
  39        u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
  40        void *mkc;
  41
  42        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
  43
  44        MLX5_SET(mkc, mkc, access_mode_1_0, mode);
  45        MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
  46        MLX5_SET(mkc, mkc, lw, 1);
  47        MLX5_SET(mkc, mkc, lr, 1);
  48        if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
  49                MLX5_SET(mkc, mkc, rw, 1);
  50                MLX5_SET(mkc, mkc, rr, 1);
  51        }
  52
  53        MLX5_SET64(mkc, mkc, len, length);
  54        MLX5_SET(mkc, mkc, pd, pd);
  55        MLX5_SET(mkc, mkc, qpn, 0xffffff);
  56        MLX5_SET64(mkc, mkc, start_addr, start_addr);
  57
  58        return mlx5_core_create_mkey(mdev, mkey, in, inlen);
  59}
  60
  61static struct mlx5dr_icm_mr *
  62dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
  63{
  64        struct mlx5_core_dev *mdev = pool->dmn->mdev;
  65        enum mlx5_sw_icm_type dm_type;
  66        struct mlx5dr_icm_mr *icm_mr;
  67        size_t log_align_base;
  68        int err;
  69
  70        icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
  71        if (!icm_mr)
  72                return NULL;
  73
  74        icm_mr->dmn = pool->dmn;
  75
  76        icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
  77                                                               pool->icm_type);
  78
  79        if (pool->icm_type == DR_ICM_TYPE_STE) {
  80                dm_type = MLX5_SW_ICM_TYPE_STEERING;
  81                log_align_base = ilog2(icm_mr->dm.length);
  82        } else {
  83                dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
  84                /* Align base is 64B */
  85                log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
  86        }
  87        icm_mr->dm.type = dm_type;
  88
  89        err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
  90                                   log_align_base, 0, &icm_mr->dm.addr,
  91                                   &icm_mr->dm.obj_id);
  92        if (err) {
  93                mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
  94                goto free_icm_mr;
  95        }
  96
  97        /* Register device memory */
  98        err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
  99                                    icm_mr->dm.length,
 100                                    icm_mr->dm.addr,
 101                                    MLX5_MKC_ACCESS_MODE_SW_ICM,
 102                                    &icm_mr->mkey);
 103        if (err) {
 104                mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
 105                goto free_dm;
 106        }
 107
 108        icm_mr->icm_start_addr = icm_mr->dm.addr;
 109
 110        if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
 111                mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
 112                           log_align_base);
 113                goto free_mkey;
 114        }
 115
 116        return icm_mr;
 117
 118free_mkey:
 119        mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
 120free_dm:
 121        mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
 122                               icm_mr->dm.addr, icm_mr->dm.obj_id);
 123free_icm_mr:
 124        kvfree(icm_mr);
 125        return NULL;
 126}
 127
 128static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
 129{
 130        struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
 131        struct mlx5dr_icm_dm *dm = &icm_mr->dm;
 132
 133        mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
 134        mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
 135                               dm->addr, dm->obj_id);
 136        kvfree(icm_mr);
 137}
 138
 139static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
 140{
 141        chunk->ste_arr = kvzalloc(chunk->num_of_entries *
 142                                  sizeof(chunk->ste_arr[0]), GFP_KERNEL);
 143        if (!chunk->ste_arr)
 144                return -ENOMEM;
 145
 146        chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
 147                                     DR_STE_SIZE_REDUCED, GFP_KERNEL);
 148        if (!chunk->hw_ste_arr)
 149                goto out_free_ste_arr;
 150
 151        chunk->miss_list = kvmalloc(chunk->num_of_entries *
 152                                    sizeof(chunk->miss_list[0]), GFP_KERNEL);
 153        if (!chunk->miss_list)
 154                goto out_free_hw_ste_arr;
 155
 156        return 0;
 157
 158out_free_hw_ste_arr:
 159        kvfree(chunk->hw_ste_arr);
 160out_free_ste_arr:
 161        kvfree(chunk->ste_arr);
 162        return -ENOMEM;
 163}
 164
 165static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
 166{
 167        kvfree(chunk->miss_list);
 168        kvfree(chunk->hw_ste_arr);
 169        kvfree(chunk->ste_arr);
 170}
 171
 172static enum mlx5dr_icm_type
 173get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
 174{
 175        return chunk->buddy_mem->pool->icm_type;
 176}
 177
 178static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
 179                                 struct mlx5dr_icm_buddy_mem *buddy)
 180{
 181        enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
 182
 183        buddy->used_memory -= chunk->byte_size;
 184        list_del(&chunk->chunk_list);
 185
 186        if (icm_type == DR_ICM_TYPE_STE)
 187                dr_icm_chunk_ste_cleanup(chunk);
 188
 189        kvfree(chunk);
 190}
 191
 192static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
 193{
 194        struct mlx5dr_icm_buddy_mem *buddy;
 195        struct mlx5dr_icm_mr *icm_mr;
 196
 197        icm_mr = dr_icm_pool_mr_create(pool);
 198        if (!icm_mr)
 199                return -ENOMEM;
 200
 201        buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
 202        if (!buddy)
 203                goto free_mr;
 204
 205        if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
 206                goto err_free_buddy;
 207
 208        buddy->icm_mr = icm_mr;
 209        buddy->pool = pool;
 210
 211        /* add it to the -start- of the list in order to search in it first */
 212        list_add(&buddy->list_node, &pool->buddy_mem_list);
 213
 214        return 0;
 215
 216err_free_buddy:
 217        kvfree(buddy);
 218free_mr:
 219        dr_icm_pool_mr_destroy(icm_mr);
 220        return -ENOMEM;
 221}
 222
 223static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
 224{
 225        struct mlx5dr_icm_chunk *chunk, *next;
 226
 227        list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
 228                dr_icm_chunk_destroy(chunk, buddy);
 229
 230        list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
 231                dr_icm_chunk_destroy(chunk, buddy);
 232
 233        dr_icm_pool_mr_destroy(buddy->icm_mr);
 234
 235        mlx5dr_buddy_cleanup(buddy);
 236
 237        kvfree(buddy);
 238}
 239
 240static struct mlx5dr_icm_chunk *
 241dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
 242                    enum mlx5dr_icm_chunk_size chunk_size,
 243                    struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
 244                    unsigned int seg)
 245{
 246        struct mlx5dr_icm_chunk *chunk;
 247        int offset;
 248
 249        chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
 250        if (!chunk)
 251                return NULL;
 252
 253        offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
 254
 255        chunk->rkey = buddy_mem_pool->icm_mr->mkey.key;
 256        chunk->mr_addr = offset;
 257        chunk->icm_addr =
 258                (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
 259        chunk->num_of_entries =
 260                mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
 261        chunk->byte_size =
 262                mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
 263        chunk->seg = seg;
 264
 265        if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
 266                mlx5dr_err(pool->dmn,
 267                           "Failed to init ste arrays (order: %d)\n",
 268                           chunk_size);
 269                goto out_free_chunk;
 270        }
 271
 272        buddy_mem_pool->used_memory += chunk->byte_size;
 273        chunk->buddy_mem = buddy_mem_pool;
 274        INIT_LIST_HEAD(&chunk->chunk_list);
 275
 276        /* chunk now is part of the used_list */
 277        list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
 278
 279        return chunk;
 280
 281out_free_chunk:
 282        kvfree(chunk);
 283        return NULL;
 284}
 285
 286static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
 287{
 288        if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
 289                return true;
 290
 291        return false;
 292}
 293
 294static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
 295{
 296        struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
 297        int err;
 298
 299        err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
 300        if (err) {
 301                mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
 302                return err;
 303        }
 304
 305        list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
 306                struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
 307
 308                list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
 309                        mlx5dr_buddy_free_mem(buddy, chunk->seg,
 310                                              ilog2(chunk->num_of_entries));
 311                        pool->hot_memory_size -= chunk->byte_size;
 312                        dr_icm_chunk_destroy(chunk, buddy);
 313                }
 314
 315                if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
 316                        dr_icm_buddy_destroy(buddy);
 317        }
 318
 319        return 0;
 320}
 321
 322static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
 323                                         enum mlx5dr_icm_chunk_size chunk_size,
 324                                         struct mlx5dr_icm_buddy_mem **buddy,
 325                                         unsigned int *seg)
 326{
 327        struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
 328        bool new_mem = false;
 329        int err;
 330
 331alloc_buddy_mem:
 332        /* find the next free place from the buddy list */
 333        list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
 334                err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
 335                                             chunk_size, seg);
 336                if (!err)
 337                        goto found;
 338
 339                if (WARN_ON(new_mem)) {
 340                        /* We have new memory pool, first in the list */
 341                        mlx5dr_err(pool->dmn,
 342                                   "No memory for order: %d\n",
 343                                   chunk_size);
 344                        goto out;
 345                }
 346        }
 347
 348        /* no more available allocators in that pool, create new */
 349        err = dr_icm_buddy_create(pool);
 350        if (err) {
 351                mlx5dr_err(pool->dmn,
 352                           "Failed creating buddy for order %d\n",
 353                           chunk_size);
 354                goto out;
 355        }
 356
 357        /* mark we have new memory, first in list */
 358        new_mem = true;
 359        goto alloc_buddy_mem;
 360
 361found:
 362        *buddy = buddy_mem_pool;
 363out:
 364        return err;
 365}
 366
 367/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
 368 * also memory used for HW STE management for optimizations.
 369 */
 370struct mlx5dr_icm_chunk *
 371mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
 372                       enum mlx5dr_icm_chunk_size chunk_size)
 373{
 374        struct mlx5dr_icm_chunk *chunk = NULL;
 375        struct mlx5dr_icm_buddy_mem *buddy;
 376        unsigned int seg;
 377        int ret;
 378
 379        if (chunk_size > pool->max_log_chunk_sz)
 380                return NULL;
 381
 382        mutex_lock(&pool->mutex);
 383        /* find mem, get back the relevant buddy pool and seg in that mem */
 384        ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
 385        if (ret)
 386                goto out;
 387
 388        chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
 389        if (!chunk)
 390                goto out_err;
 391
 392        goto out;
 393
 394out_err:
 395        mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
 396out:
 397        mutex_unlock(&pool->mutex);
 398        return chunk;
 399}
 400
 401void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
 402{
 403        struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
 404        struct mlx5dr_icm_pool *pool = buddy->pool;
 405
 406        /* move the memory to the waiting list AKA "hot" */
 407        mutex_lock(&pool->mutex);
 408        list_move_tail(&chunk->chunk_list, &buddy->hot_list);
 409        pool->hot_memory_size += chunk->byte_size;
 410
 411        /* Check if we have chunks that are waiting for sync-ste */
 412        if (dr_icm_pool_is_sync_required(pool))
 413                dr_icm_pool_sync_all_buddy_pools(pool);
 414
 415        mutex_unlock(&pool->mutex);
 416}
 417
 418struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
 419                                               enum mlx5dr_icm_type icm_type)
 420{
 421        enum mlx5dr_icm_chunk_size max_log_chunk_sz;
 422        struct mlx5dr_icm_pool *pool;
 423
 424        if (icm_type == DR_ICM_TYPE_STE)
 425                max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
 426        else
 427                max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
 428
 429        pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
 430        if (!pool)
 431                return NULL;
 432
 433        pool->dmn = dmn;
 434        pool->icm_type = icm_type;
 435        pool->max_log_chunk_sz = max_log_chunk_sz;
 436
 437        INIT_LIST_HEAD(&pool->buddy_mem_list);
 438
 439        mutex_init(&pool->mutex);
 440
 441        return pool;
 442}
 443
 444void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
 445{
 446        struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
 447
 448        list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
 449                dr_icm_buddy_destroy(buddy);
 450
 451        mutex_destroy(&pool->mutex);
 452        kvfree(pool);
 453}
 454