linux/net/ipv4/bpf_tcp_ca.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019 Facebook  */
   3
   4#include <linux/types.h>
   5#include <linux/bpf_verifier.h>
   6#include <linux/bpf.h>
   7#include <linux/btf.h>
   8#include <linux/btf_ids.h>
   9#include <linux/filter.h>
  10#include <net/tcp.h>
  11#include <net/bpf_sk_storage.h>
  12
  13static u32 optional_ops[] = {
  14        offsetof(struct tcp_congestion_ops, init),
  15        offsetof(struct tcp_congestion_ops, release),
  16        offsetof(struct tcp_congestion_ops, set_state),
  17        offsetof(struct tcp_congestion_ops, cwnd_event),
  18        offsetof(struct tcp_congestion_ops, in_ack_event),
  19        offsetof(struct tcp_congestion_ops, pkts_acked),
  20        offsetof(struct tcp_congestion_ops, min_tso_segs),
  21        offsetof(struct tcp_congestion_ops, sndbuf_expand),
  22        offsetof(struct tcp_congestion_ops, cong_control),
  23};
  24
  25static u32 unsupported_ops[] = {
  26        offsetof(struct tcp_congestion_ops, get_info),
  27};
  28
  29static const struct btf_type *tcp_sock_type;
  30static u32 tcp_sock_id, sock_id;
  31
  32static int bpf_tcp_ca_init(struct btf *btf)
  33{
  34        s32 type_id;
  35
  36        type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
  37        if (type_id < 0)
  38                return -EINVAL;
  39        sock_id = type_id;
  40
  41        type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
  42        if (type_id < 0)
  43                return -EINVAL;
  44        tcp_sock_id = type_id;
  45        tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
  46
  47        return 0;
  48}
  49
  50static bool is_optional(u32 member_offset)
  51{
  52        unsigned int i;
  53
  54        for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
  55                if (member_offset == optional_ops[i])
  56                        return true;
  57        }
  58
  59        return false;
  60}
  61
  62static bool is_unsupported(u32 member_offset)
  63{
  64        unsigned int i;
  65
  66        for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
  67                if (member_offset == unsupported_ops[i])
  68                        return true;
  69        }
  70
  71        return false;
  72}
  73
  74extern struct btf *btf_vmlinux;
  75
  76static bool bpf_tcp_ca_is_valid_access(int off, int size,
  77                                       enum bpf_access_type type,
  78                                       const struct bpf_prog *prog,
  79                                       struct bpf_insn_access_aux *info)
  80{
  81        if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
  82                return false;
  83        if (type != BPF_READ)
  84                return false;
  85        if (off % size != 0)
  86                return false;
  87
  88        if (!btf_ctx_access(off, size, type, prog, info))
  89                return false;
  90
  91        if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
  92                /* promote it to tcp_sock */
  93                info->btf_id = tcp_sock_id;
  94
  95        return true;
  96}
  97
  98static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
  99                                        const struct btf *btf,
 100                                        const struct btf_type *t, int off,
 101                                        int size, enum bpf_access_type atype,
 102                                        u32 *next_btf_id)
 103{
 104        size_t end;
 105
 106        if (atype == BPF_READ)
 107                return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
 108
 109        if (t != tcp_sock_type) {
 110                bpf_log(log, "only read is supported\n");
 111                return -EACCES;
 112        }
 113
 114        switch (off) {
 115        case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
 116                end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
 117                break;
 118        case offsetof(struct inet_connection_sock, icsk_ack.pending):
 119                end = offsetofend(struct inet_connection_sock,
 120                                  icsk_ack.pending);
 121                break;
 122        case offsetof(struct tcp_sock, snd_cwnd):
 123                end = offsetofend(struct tcp_sock, snd_cwnd);
 124                break;
 125        case offsetof(struct tcp_sock, snd_cwnd_cnt):
 126                end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
 127                break;
 128        case offsetof(struct tcp_sock, snd_ssthresh):
 129                end = offsetofend(struct tcp_sock, snd_ssthresh);
 130                break;
 131        case offsetof(struct tcp_sock, ecn_flags):
 132                end = offsetofend(struct tcp_sock, ecn_flags);
 133                break;
 134        default:
 135                bpf_log(log, "no write support to tcp_sock at off %d\n", off);
 136                return -EACCES;
 137        }
 138
 139        if (off + size > end) {
 140                bpf_log(log,
 141                        "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
 142                        off, size, end);
 143                return -EACCES;
 144        }
 145
 146        return NOT_INIT;
 147}
 148
 149BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
 150{
 151        /* bpf_tcp_ca prog cannot have NULL tp */
 152        __tcp_send_ack((struct sock *)tp, rcv_nxt);
 153        return 0;
 154}
 155
 156static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
 157        .func           = bpf_tcp_send_ack,
 158        .gpl_only       = false,
 159        /* In case we want to report error later */
 160        .ret_type       = RET_INTEGER,
 161        .arg1_type      = ARG_PTR_TO_BTF_ID,
 162        .arg1_btf_id    = &tcp_sock_id,
 163        .arg2_type      = ARG_ANYTHING,
 164};
 165
 166static const struct bpf_func_proto *
 167bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
 168                          const struct bpf_prog *prog)
 169{
 170        switch (func_id) {
 171        case BPF_FUNC_tcp_send_ack:
 172                return &bpf_tcp_send_ack_proto;
 173        case BPF_FUNC_sk_storage_get:
 174                return &bpf_sk_storage_get_proto;
 175        case BPF_FUNC_sk_storage_delete:
 176                return &bpf_sk_storage_delete_proto;
 177        default:
 178                return bpf_base_func_proto(func_id);
 179        }
 180}
 181
 182BTF_SET_START(bpf_tcp_ca_kfunc_ids)
 183BTF_ID(func, tcp_reno_ssthresh)
 184BTF_ID(func, tcp_reno_cong_avoid)
 185BTF_ID(func, tcp_reno_undo_cwnd)
 186BTF_ID(func, tcp_slow_start)
 187BTF_ID(func, tcp_cong_avoid_ai)
 188#ifdef CONFIG_X86
 189#ifdef CONFIG_DYNAMIC_FTRACE
 190#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
 191BTF_ID(func, cubictcp_init)
 192BTF_ID(func, cubictcp_recalc_ssthresh)
 193BTF_ID(func, cubictcp_cong_avoid)
 194BTF_ID(func, cubictcp_state)
 195BTF_ID(func, cubictcp_cwnd_event)
 196BTF_ID(func, cubictcp_acked)
 197#endif
 198#if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
 199BTF_ID(func, dctcp_init)
 200BTF_ID(func, dctcp_update_alpha)
 201BTF_ID(func, dctcp_cwnd_event)
 202BTF_ID(func, dctcp_ssthresh)
 203BTF_ID(func, dctcp_cwnd_undo)
 204BTF_ID(func, dctcp_state)
 205#endif
 206#if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
 207BTF_ID(func, bbr_init)
 208BTF_ID(func, bbr_main)
 209BTF_ID(func, bbr_sndbuf_expand)
 210BTF_ID(func, bbr_undo_cwnd)
 211BTF_ID(func, bbr_cwnd_event)
 212BTF_ID(func, bbr_ssthresh)
 213BTF_ID(func, bbr_min_tso_segs)
 214BTF_ID(func, bbr_set_state)
 215#endif
 216#endif  /* CONFIG_DYNAMIC_FTRACE */
 217#endif  /* CONFIG_X86 */
 218BTF_SET_END(bpf_tcp_ca_kfunc_ids)
 219
 220static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
 221{
 222        return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id);
 223}
 224
 225static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
 226        .get_func_proto         = bpf_tcp_ca_get_func_proto,
 227        .is_valid_access        = bpf_tcp_ca_is_valid_access,
 228        .btf_struct_access      = bpf_tcp_ca_btf_struct_access,
 229        .check_kfunc_call       = bpf_tcp_ca_check_kfunc_call,
 230};
 231
 232static int bpf_tcp_ca_init_member(const struct btf_type *t,
 233                                  const struct btf_member *member,
 234                                  void *kdata, const void *udata)
 235{
 236        const struct tcp_congestion_ops *utcp_ca;
 237        struct tcp_congestion_ops *tcp_ca;
 238        int prog_fd;
 239        u32 moff;
 240
 241        utcp_ca = (const struct tcp_congestion_ops *)udata;
 242        tcp_ca = (struct tcp_congestion_ops *)kdata;
 243
 244        moff = btf_member_bit_offset(t, member) / 8;
 245        switch (moff) {
 246        case offsetof(struct tcp_congestion_ops, flags):
 247                if (utcp_ca->flags & ~TCP_CONG_MASK)
 248                        return -EINVAL;
 249                tcp_ca->flags = utcp_ca->flags;
 250                return 1;
 251        case offsetof(struct tcp_congestion_ops, name):
 252                if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
 253                                     sizeof(tcp_ca->name)) <= 0)
 254                        return -EINVAL;
 255                if (tcp_ca_find(utcp_ca->name))
 256                        return -EEXIST;
 257                return 1;
 258        }
 259
 260        if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
 261                return 0;
 262
 263        /* Ensure bpf_prog is provided for compulsory func ptr */
 264        prog_fd = (int)(*(unsigned long *)(udata + moff));
 265        if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
 266                return -EINVAL;
 267
 268        return 0;
 269}
 270
 271static int bpf_tcp_ca_check_member(const struct btf_type *t,
 272                                   const struct btf_member *member)
 273{
 274        if (is_unsupported(btf_member_bit_offset(t, member) / 8))
 275                return -ENOTSUPP;
 276        return 0;
 277}
 278
 279static int bpf_tcp_ca_reg(void *kdata)
 280{
 281        return tcp_register_congestion_control(kdata);
 282}
 283
 284static void bpf_tcp_ca_unreg(void *kdata)
 285{
 286        tcp_unregister_congestion_control(kdata);
 287}
 288
 289/* Avoid sparse warning.  It is only used in bpf_struct_ops.c. */
 290extern struct bpf_struct_ops bpf_tcp_congestion_ops;
 291
 292struct bpf_struct_ops bpf_tcp_congestion_ops = {
 293        .verifier_ops = &bpf_tcp_ca_verifier_ops,
 294        .reg = bpf_tcp_ca_reg,
 295        .unreg = bpf_tcp_ca_unreg,
 296        .check_member = bpf_tcp_ca_check_member,
 297        .init_member = bpf_tcp_ca_init_member,
 298        .init = bpf_tcp_ca_init,
 299        .name = "tcp_congestion_ops",
 300};
 301