linux/kernel/bpf/stackmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2016 Facebook
   3 */
   4#include <linux/bpf.h>
   5#include <linux/jhash.h>
   6#include <linux/filter.h>
   7#include <linux/kernel.h>
   8#include <linux/stacktrace.h>
   9#include <linux/perf_event.h>
  10#include <linux/btf_ids.h>
  11#include <linux/buildid.h>
  12#include "percpu_freelist.h"
  13#include "mmap_unlock_work.h"
  14
  15#define STACK_CREATE_FLAG_MASK                                  \
  16        (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |        \
  17         BPF_F_STACK_BUILD_ID)
  18
  19struct stack_map_bucket {
  20        struct pcpu_freelist_node fnode;
  21        u32 hash;
  22        u32 nr;
  23        u64 data[];
  24};
  25
  26struct bpf_stack_map {
  27        struct bpf_map map;
  28        void *elems;
  29        struct pcpu_freelist freelist;
  30        u32 n_buckets;
  31        struct stack_map_bucket *buckets[];
  32};
  33
  34static inline bool stack_map_use_build_id(struct bpf_map *map)
  35{
  36        return (map->map_flags & BPF_F_STACK_BUILD_ID);
  37}
  38
  39static inline int stack_map_data_size(struct bpf_map *map)
  40{
  41        return stack_map_use_build_id(map) ?
  42                sizeof(struct bpf_stack_build_id) : sizeof(u64);
  43}
  44
  45static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
  46{
  47        u64 elem_size = sizeof(struct stack_map_bucket) +
  48                        (u64)smap->map.value_size;
  49        int err;
  50
  51        smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
  52                                         smap->map.numa_node);
  53        if (!smap->elems)
  54                return -ENOMEM;
  55
  56        err = pcpu_freelist_init(&smap->freelist);
  57        if (err)
  58                goto free_elems;
  59
  60        pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
  61                               smap->map.max_entries);
  62        return 0;
  63
  64free_elems:
  65        bpf_map_area_free(smap->elems);
  66        return err;
  67}
  68
  69/* Called from syscall */
  70static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
  71{
  72        u32 value_size = attr->value_size;
  73        struct bpf_stack_map *smap;
  74        u64 cost, n_buckets;
  75        int err;
  76
  77        if (!bpf_capable())
  78                return ERR_PTR(-EPERM);
  79
  80        if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
  81                return ERR_PTR(-EINVAL);
  82
  83        /* check sanity of attributes */
  84        if (attr->max_entries == 0 || attr->key_size != 4 ||
  85            value_size < 8 || value_size % 8)
  86                return ERR_PTR(-EINVAL);
  87
  88        BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
  89        if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
  90                if (value_size % sizeof(struct bpf_stack_build_id) ||
  91                    value_size / sizeof(struct bpf_stack_build_id)
  92                    > sysctl_perf_event_max_stack)
  93                        return ERR_PTR(-EINVAL);
  94        } else if (value_size / 8 > sysctl_perf_event_max_stack)
  95                return ERR_PTR(-EINVAL);
  96
  97        /* hash table size must be power of 2 */
  98        n_buckets = roundup_pow_of_two(attr->max_entries);
  99        if (!n_buckets)
 100                return ERR_PTR(-E2BIG);
 101
 102        cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
 103        smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
 104        if (!smap)
 105                return ERR_PTR(-ENOMEM);
 106
 107        bpf_map_init_from_attr(&smap->map, attr);
 108        smap->n_buckets = n_buckets;
 109
 110        err = get_callchain_buffers(sysctl_perf_event_max_stack);
 111        if (err)
 112                goto free_smap;
 113
 114        err = prealloc_elems_and_freelist(smap);
 115        if (err)
 116                goto put_buffers;
 117
 118        return &smap->map;
 119
 120put_buffers:
 121        put_callchain_buffers();
 122free_smap:
 123        bpf_map_area_free(smap);
 124        return ERR_PTR(err);
 125}
 126
 127static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
 128                                          u64 *ips, u32 trace_nr, bool user)
 129{
 130        int i;
 131        struct mmap_unlock_irq_work *work = NULL;
 132        bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
 133        struct vm_area_struct *vma, *prev_vma = NULL;
 134        const char *prev_build_id;
 135
 136        /* If the irq_work is in use, fall back to report ips. Same
 137         * fallback is used for kernel stack (!user) on a stackmap with
 138         * build_id.
 139         */
 140        if (!user || !current || !current->mm || irq_work_busy ||
 141            !mmap_read_trylock(current->mm)) {
 142                /* cannot access current->mm, fall back to ips */
 143                for (i = 0; i < trace_nr; i++) {
 144                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 145                        id_offs[i].ip = ips[i];
 146                        memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
 147                }
 148                return;
 149        }
 150
 151        for (i = 0; i < trace_nr; i++) {
 152                if (range_in_vma(prev_vma, ips[i], ips[i])) {
 153                        vma = prev_vma;
 154                        memcpy(id_offs[i].build_id, prev_build_id,
 155                               BUILD_ID_SIZE_MAX);
 156                        goto build_id_valid;
 157                }
 158                vma = find_vma(current->mm, ips[i]);
 159                if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
 160                        /* per entry fall back to ips */
 161                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
 162                        id_offs[i].ip = ips[i];
 163                        memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
 164                        continue;
 165                }
 166build_id_valid:
 167                id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
 168                        - vma->vm_start;
 169                id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
 170                prev_vma = vma;
 171                prev_build_id = id_offs[i].build_id;
 172        }
 173        bpf_mmap_unlock_mm(work, current->mm);
 174}
 175
 176static struct perf_callchain_entry *
 177get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
 178{
 179#ifdef CONFIG_STACKTRACE
 180        struct perf_callchain_entry *entry;
 181        int rctx;
 182
 183        entry = get_callchain_entry(&rctx);
 184
 185        if (!entry)
 186                return NULL;
 187
 188        entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
 189                                         max_depth, 0);
 190
 191        /* stack_trace_save_tsk() works on unsigned long array, while
 192         * perf_callchain_entry uses u64 array. For 32-bit systems, it is
 193         * necessary to fix this mismatch.
 194         */
 195        if (__BITS_PER_LONG != 64) {
 196                unsigned long *from = (unsigned long *) entry->ip;
 197                u64 *to = entry->ip;
 198                int i;
 199
 200                /* copy data from the end to avoid using extra buffer */
 201                for (i = entry->nr - 1; i >= 0; i--)
 202                        to[i] = (u64)(from[i]);
 203        }
 204
 205        put_callchain_entry(rctx);
 206
 207        return entry;
 208#else /* CONFIG_STACKTRACE */
 209        return NULL;
 210#endif
 211}
 212
 213static long __bpf_get_stackid(struct bpf_map *map,
 214                              struct perf_callchain_entry *trace, u64 flags)
 215{
 216        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 217        struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
 218        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 219        u32 hash, id, trace_nr, trace_len;
 220        bool user = flags & BPF_F_USER_STACK;
 221        u64 *ips;
 222        bool hash_matches;
 223
 224        if (trace->nr <= skip)
 225                /* skipping more than usable stack trace */
 226                return -EFAULT;
 227
 228        trace_nr = trace->nr - skip;
 229        trace_len = trace_nr * sizeof(u64);
 230        ips = trace->ip + skip;
 231        hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
 232        id = hash & (smap->n_buckets - 1);
 233        bucket = READ_ONCE(smap->buckets[id]);
 234
 235        hash_matches = bucket && bucket->hash == hash;
 236        /* fast cmp */
 237        if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
 238                return id;
 239
 240        if (stack_map_use_build_id(map)) {
 241                /* for build_id+offset, pop a bucket before slow cmp */
 242                new_bucket = (struct stack_map_bucket *)
 243                        pcpu_freelist_pop(&smap->freelist);
 244                if (unlikely(!new_bucket))
 245                        return -ENOMEM;
 246                new_bucket->nr = trace_nr;
 247                stack_map_get_build_id_offset(
 248                        (struct bpf_stack_build_id *)new_bucket->data,
 249                        ips, trace_nr, user);
 250                trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
 251                if (hash_matches && bucket->nr == trace_nr &&
 252                    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
 253                        pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
 254                        return id;
 255                }
 256                if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
 257                        pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
 258                        return -EEXIST;
 259                }
 260        } else {
 261                if (hash_matches && bucket->nr == trace_nr &&
 262                    memcmp(bucket->data, ips, trace_len) == 0)
 263                        return id;
 264                if (bucket && !(flags & BPF_F_REUSE_STACKID))
 265                        return -EEXIST;
 266
 267                new_bucket = (struct stack_map_bucket *)
 268                        pcpu_freelist_pop(&smap->freelist);
 269                if (unlikely(!new_bucket))
 270                        return -ENOMEM;
 271                memcpy(new_bucket->data, ips, trace_len);
 272        }
 273
 274        new_bucket->hash = hash;
 275        new_bucket->nr = trace_nr;
 276
 277        old_bucket = xchg(&smap->buckets[id], new_bucket);
 278        if (old_bucket)
 279                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 280        return id;
 281}
 282
 283BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
 284           u64, flags)
 285{
 286        u32 max_depth = map->value_size / stack_map_data_size(map);
 287        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 288        bool user = flags & BPF_F_USER_STACK;
 289        struct perf_callchain_entry *trace;
 290        bool kernel = !user;
 291
 292        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 293                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 294                return -EINVAL;
 295
 296        max_depth += skip;
 297        if (max_depth > sysctl_perf_event_max_stack)
 298                max_depth = sysctl_perf_event_max_stack;
 299
 300        trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
 301                                   false, false);
 302
 303        if (unlikely(!trace))
 304                /* couldn't fetch the stack trace */
 305                return -EFAULT;
 306
 307        return __bpf_get_stackid(map, trace, flags);
 308}
 309
 310const struct bpf_func_proto bpf_get_stackid_proto = {
 311        .func           = bpf_get_stackid,
 312        .gpl_only       = true,
 313        .ret_type       = RET_INTEGER,
 314        .arg1_type      = ARG_PTR_TO_CTX,
 315        .arg2_type      = ARG_CONST_MAP_PTR,
 316        .arg3_type      = ARG_ANYTHING,
 317};
 318
 319static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
 320{
 321        __u64 nr_kernel = 0;
 322
 323        while (nr_kernel < trace->nr) {
 324                if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
 325                        break;
 326                nr_kernel++;
 327        }
 328        return nr_kernel;
 329}
 330
 331BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
 332           struct bpf_map *, map, u64, flags)
 333{
 334        struct perf_event *event = ctx->event;
 335        struct perf_callchain_entry *trace;
 336        bool kernel, user;
 337        __u64 nr_kernel;
 338        int ret;
 339
 340        /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
 341        if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
 342                return bpf_get_stackid((unsigned long)(ctx->regs),
 343                                       (unsigned long) map, flags, 0, 0);
 344
 345        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 346                               BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
 347                return -EINVAL;
 348
 349        user = flags & BPF_F_USER_STACK;
 350        kernel = !user;
 351
 352        trace = ctx->data->callchain;
 353        if (unlikely(!trace))
 354                return -EFAULT;
 355
 356        nr_kernel = count_kernel_ip(trace);
 357
 358        if (kernel) {
 359                __u64 nr = trace->nr;
 360
 361                trace->nr = nr_kernel;
 362                ret = __bpf_get_stackid(map, trace, flags);
 363
 364                /* restore nr */
 365                trace->nr = nr;
 366        } else { /* user */
 367                u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
 368
 369                skip += nr_kernel;
 370                if (skip > BPF_F_SKIP_FIELD_MASK)
 371                        return -EFAULT;
 372
 373                flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
 374                ret = __bpf_get_stackid(map, trace, flags);
 375        }
 376        return ret;
 377}
 378
 379const struct bpf_func_proto bpf_get_stackid_proto_pe = {
 380        .func           = bpf_get_stackid_pe,
 381        .gpl_only       = false,
 382        .ret_type       = RET_INTEGER,
 383        .arg1_type      = ARG_PTR_TO_CTX,
 384        .arg2_type      = ARG_CONST_MAP_PTR,
 385        .arg3_type      = ARG_ANYTHING,
 386};
 387
 388static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
 389                            struct perf_callchain_entry *trace_in,
 390                            void *buf, u32 size, u64 flags)
 391{
 392        u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
 393        bool user_build_id = flags & BPF_F_USER_BUILD_ID;
 394        u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
 395        bool user = flags & BPF_F_USER_STACK;
 396        struct perf_callchain_entry *trace;
 397        bool kernel = !user;
 398        int err = -EINVAL;
 399        u64 *ips;
 400
 401        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 402                               BPF_F_USER_BUILD_ID)))
 403                goto clear;
 404        if (kernel && user_build_id)
 405                goto clear;
 406
 407        elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
 408                                            : sizeof(u64);
 409        if (unlikely(size % elem_size))
 410                goto clear;
 411
 412        /* cannot get valid user stack for task without user_mode regs */
 413        if (task && user && !user_mode(regs))
 414                goto err_fault;
 415
 416        num_elem = size / elem_size;
 417        max_depth = num_elem + skip;
 418        if (sysctl_perf_event_max_stack < max_depth)
 419                max_depth = sysctl_perf_event_max_stack;
 420
 421        if (trace_in)
 422                trace = trace_in;
 423        else if (kernel && task)
 424                trace = get_callchain_entry_for_task(task, max_depth);
 425        else
 426                trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
 427                                           false, false);
 428        if (unlikely(!trace))
 429                goto err_fault;
 430
 431        if (trace->nr < skip)
 432                goto err_fault;
 433
 434        trace_nr = trace->nr - skip;
 435        trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
 436        copy_len = trace_nr * elem_size;
 437
 438        ips = trace->ip + skip;
 439        if (user && user_build_id)
 440                stack_map_get_build_id_offset(buf, ips, trace_nr, user);
 441        else
 442                memcpy(buf, ips, copy_len);
 443
 444        if (size > copy_len)
 445                memset(buf + copy_len, 0, size - copy_len);
 446        return copy_len;
 447
 448err_fault:
 449        err = -EFAULT;
 450clear:
 451        memset(buf, 0, size);
 452        return err;
 453}
 454
 455BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
 456           u64, flags)
 457{
 458        return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
 459}
 460
 461const struct bpf_func_proto bpf_get_stack_proto = {
 462        .func           = bpf_get_stack,
 463        .gpl_only       = true,
 464        .ret_type       = RET_INTEGER,
 465        .arg1_type      = ARG_PTR_TO_CTX,
 466        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 467        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 468        .arg4_type      = ARG_ANYTHING,
 469};
 470
 471BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
 472           u32, size, u64, flags)
 473{
 474        struct pt_regs *regs;
 475        long res = -EINVAL;
 476
 477        if (!try_get_task_stack(task))
 478                return -EFAULT;
 479
 480        regs = task_pt_regs(task);
 481        if (regs)
 482                res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
 483        put_task_stack(task);
 484
 485        return res;
 486}
 487
 488const struct bpf_func_proto bpf_get_task_stack_proto = {
 489        .func           = bpf_get_task_stack,
 490        .gpl_only       = false,
 491        .ret_type       = RET_INTEGER,
 492        .arg1_type      = ARG_PTR_TO_BTF_ID,
 493        .arg1_btf_id    = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
 494        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 495        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 496        .arg4_type      = ARG_ANYTHING,
 497};
 498
 499BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
 500           void *, buf, u32, size, u64, flags)
 501{
 502        struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
 503        struct perf_event *event = ctx->event;
 504        struct perf_callchain_entry *trace;
 505        bool kernel, user;
 506        int err = -EINVAL;
 507        __u64 nr_kernel;
 508
 509        if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
 510                return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
 511
 512        if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
 513                               BPF_F_USER_BUILD_ID)))
 514                goto clear;
 515
 516        user = flags & BPF_F_USER_STACK;
 517        kernel = !user;
 518
 519        err = -EFAULT;
 520        trace = ctx->data->callchain;
 521        if (unlikely(!trace))
 522                goto clear;
 523
 524        nr_kernel = count_kernel_ip(trace);
 525
 526        if (kernel) {
 527                __u64 nr = trace->nr;
 528
 529                trace->nr = nr_kernel;
 530                err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
 531
 532                /* restore nr */
 533                trace->nr = nr;
 534        } else { /* user */
 535                u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
 536
 537                skip += nr_kernel;
 538                if (skip > BPF_F_SKIP_FIELD_MASK)
 539                        goto clear;
 540
 541                flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
 542                err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
 543        }
 544        return err;
 545
 546clear:
 547        memset(buf, 0, size);
 548        return err;
 549
 550}
 551
 552const struct bpf_func_proto bpf_get_stack_proto_pe = {
 553        .func           = bpf_get_stack_pe,
 554        .gpl_only       = true,
 555        .ret_type       = RET_INTEGER,
 556        .arg1_type      = ARG_PTR_TO_CTX,
 557        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
 558        .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
 559        .arg4_type      = ARG_ANYTHING,
 560};
 561
 562/* Called from eBPF program */
 563static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
 564{
 565        return ERR_PTR(-EOPNOTSUPP);
 566}
 567
 568/* Called from syscall */
 569int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 570{
 571        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 572        struct stack_map_bucket *bucket, *old_bucket;
 573        u32 id = *(u32 *)key, trace_len;
 574
 575        if (unlikely(id >= smap->n_buckets))
 576                return -ENOENT;
 577
 578        bucket = xchg(&smap->buckets[id], NULL);
 579        if (!bucket)
 580                return -ENOENT;
 581
 582        trace_len = bucket->nr * stack_map_data_size(map);
 583        memcpy(value, bucket->data, trace_len);
 584        memset(value + trace_len, 0, map->value_size - trace_len);
 585
 586        old_bucket = xchg(&smap->buckets[id], bucket);
 587        if (old_bucket)
 588                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 589        return 0;
 590}
 591
 592static int stack_map_get_next_key(struct bpf_map *map, void *key,
 593                                  void *next_key)
 594{
 595        struct bpf_stack_map *smap = container_of(map,
 596                                                  struct bpf_stack_map, map);
 597        u32 id;
 598
 599        WARN_ON_ONCE(!rcu_read_lock_held());
 600
 601        if (!key) {
 602                id = 0;
 603        } else {
 604                id = *(u32 *)key;
 605                if (id >= smap->n_buckets || !smap->buckets[id])
 606                        id = 0;
 607                else
 608                        id++;
 609        }
 610
 611        while (id < smap->n_buckets && !smap->buckets[id])
 612                id++;
 613
 614        if (id >= smap->n_buckets)
 615                return -ENOENT;
 616
 617        *(u32 *)next_key = id;
 618        return 0;
 619}
 620
 621static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
 622                                 u64 map_flags)
 623{
 624        return -EINVAL;
 625}
 626
 627/* Called from syscall or from eBPF program */
 628static int stack_map_delete_elem(struct bpf_map *map, void *key)
 629{
 630        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 631        struct stack_map_bucket *old_bucket;
 632        u32 id = *(u32 *)key;
 633
 634        if (unlikely(id >= smap->n_buckets))
 635                return -E2BIG;
 636
 637        old_bucket = xchg(&smap->buckets[id], NULL);
 638        if (old_bucket) {
 639                pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
 640                return 0;
 641        } else {
 642                return -ENOENT;
 643        }
 644}
 645
 646/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
 647static void stack_map_free(struct bpf_map *map)
 648{
 649        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 650
 651        bpf_map_area_free(smap->elems);
 652        pcpu_freelist_destroy(&smap->freelist);
 653        bpf_map_area_free(smap);
 654        put_callchain_buffers();
 655}
 656
 657BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
 658const struct bpf_map_ops stack_trace_map_ops = {
 659        .map_meta_equal = bpf_map_meta_equal,
 660        .map_alloc = stack_map_alloc,
 661        .map_free = stack_map_free,
 662        .map_get_next_key = stack_map_get_next_key,
 663        .map_lookup_elem = stack_map_lookup_elem,
 664        .map_update_elem = stack_map_update_elem,
 665        .map_delete_elem = stack_map_delete_elem,
 666        .map_check_btf = map_check_no_btf,
 667        .map_btf_id = &stack_trace_map_btf_ids[0],
 668};
 669