linux/net/netfilter/nft_cmp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
   4 *
   5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/netlink.h>
  12#include <linux/netfilter.h>
  13#include <linux/if_arp.h>
  14#include <linux/netfilter/nf_tables.h>
  15#include <net/netfilter/nf_tables_core.h>
  16#include <net/netfilter/nf_tables_offload.h>
  17#include <net/netfilter/nf_tables.h>
  18
  19struct nft_cmp_expr {
  20        struct nft_data         data;
  21        u8                      sreg;
  22        u8                      len;
  23        enum nft_cmp_ops        op:8;
  24};
  25
  26void nft_cmp_eval(const struct nft_expr *expr,
  27                  struct nft_regs *regs,
  28                  const struct nft_pktinfo *pkt)
  29{
  30        const struct nft_cmp_expr *priv = nft_expr_priv(expr);
  31        int d;
  32
  33        d = memcmp(&regs->data[priv->sreg], &priv->data, priv->len);
  34        switch (priv->op) {
  35        case NFT_CMP_EQ:
  36                if (d != 0)
  37                        goto mismatch;
  38                break;
  39        case NFT_CMP_NEQ:
  40                if (d == 0)
  41                        goto mismatch;
  42                break;
  43        case NFT_CMP_LT:
  44                if (d == 0)
  45                        goto mismatch;
  46                fallthrough;
  47        case NFT_CMP_LTE:
  48                if (d > 0)
  49                        goto mismatch;
  50                break;
  51        case NFT_CMP_GT:
  52                if (d == 0)
  53                        goto mismatch;
  54                fallthrough;
  55        case NFT_CMP_GTE:
  56                if (d < 0)
  57                        goto mismatch;
  58                break;
  59        }
  60        return;
  61
  62mismatch:
  63        regs->verdict.code = NFT_BREAK;
  64}
  65
  66static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
  67        [NFTA_CMP_SREG]         = { .type = NLA_U32 },
  68        [NFTA_CMP_OP]           = { .type = NLA_U32 },
  69        [NFTA_CMP_DATA]         = { .type = NLA_NESTED },
  70};
  71
  72static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
  73                        const struct nlattr * const tb[])
  74{
  75        struct nft_cmp_expr *priv = nft_expr_priv(expr);
  76        struct nft_data_desc desc = {
  77                .type   = NFT_DATA_VALUE,
  78                .size   = sizeof(priv->data),
  79        };
  80        int err;
  81
  82        err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
  83        if (err < 0)
  84                return err;
  85
  86        err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
  87        if (err < 0)
  88                return err;
  89
  90        priv->op  = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
  91        priv->len = desc.len;
  92        return 0;
  93}
  94
  95static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
  96{
  97        const struct nft_cmp_expr *priv = nft_expr_priv(expr);
  98
  99        if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
 100                goto nla_put_failure;
 101        if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
 102                goto nla_put_failure;
 103
 104        if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
 105                          NFT_DATA_VALUE, priv->len) < 0)
 106                goto nla_put_failure;
 107        return 0;
 108
 109nla_put_failure:
 110        return -1;
 111}
 112
 113union nft_cmp_offload_data {
 114        u16     val16;
 115        u32     val32;
 116        u64     val64;
 117};
 118
 119static void nft_payload_n2h(union nft_cmp_offload_data *data,
 120                            const u8 *val, u32 len)
 121{
 122        switch (len) {
 123        case 2:
 124                data->val16 = ntohs(*((__be16 *)val));
 125                break;
 126        case 4:
 127                data->val32 = ntohl(*((__be32 *)val));
 128                break;
 129        case 8:
 130                data->val64 = be64_to_cpu(*((__be64 *)val));
 131                break;
 132        default:
 133                WARN_ON_ONCE(1);
 134                break;
 135        }
 136}
 137
 138static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
 139                             struct nft_flow_rule *flow,
 140                             const struct nft_cmp_expr *priv)
 141{
 142        struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
 143        union nft_cmp_offload_data _data, _datamask;
 144        u8 *mask = (u8 *)&flow->match.mask;
 145        u8 *key = (u8 *)&flow->match.key;
 146        u8 *data, *datamask;
 147
 148        if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
 149                return -EOPNOTSUPP;
 150
 151        if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
 152                nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
 153                nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
 154                data = (u8 *)&_data;
 155                datamask = (u8 *)&_datamask;
 156        } else {
 157                data = (u8 *)&priv->data;
 158                datamask = (u8 *)&reg->mask;
 159        }
 160
 161        memcpy(key + reg->offset, data, reg->len);
 162        memcpy(mask + reg->offset, datamask, reg->len);
 163
 164        flow->match.dissector.used_keys |= BIT(reg->key);
 165        flow->match.dissector.offset[reg->key] = reg->base_offset;
 166
 167        if (reg->key == FLOW_DISSECTOR_KEY_META &&
 168            reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
 169            nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
 170                return -EOPNOTSUPP;
 171
 172        nft_offload_update_dependency(ctx, &priv->data, reg->len);
 173
 174        return 0;
 175}
 176
 177static int nft_cmp_offload(struct nft_offload_ctx *ctx,
 178                           struct nft_flow_rule *flow,
 179                           const struct nft_expr *expr)
 180{
 181        const struct nft_cmp_expr *priv = nft_expr_priv(expr);
 182
 183        return __nft_cmp_offload(ctx, flow, priv);
 184}
 185
 186static const struct nft_expr_ops nft_cmp_ops = {
 187        .type           = &nft_cmp_type,
 188        .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
 189        .eval           = nft_cmp_eval,
 190        .init           = nft_cmp_init,
 191        .dump           = nft_cmp_dump,
 192        .reduce         = NFT_REDUCE_READONLY,
 193        .offload        = nft_cmp_offload,
 194};
 195
 196/* Calculate the mask for the nft_cmp_fast expression. On big endian the
 197 * mask needs to include the *upper* bytes when interpreting that data as
 198 * something smaller than the full u32, therefore a cpu_to_le32 is done.
 199 */
 200static u32 nft_cmp_fast_mask(unsigned int len)
 201{
 202        __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
 203                                          data) * BITS_PER_BYTE - len));
 204
 205        return (__force u32)mask;
 206}
 207
 208static int nft_cmp_fast_init(const struct nft_ctx *ctx,
 209                             const struct nft_expr *expr,
 210                             const struct nlattr * const tb[])
 211{
 212        struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
 213        struct nft_data data;
 214        struct nft_data_desc desc = {
 215                .type   = NFT_DATA_VALUE,
 216                .size   = sizeof(data),
 217        };
 218        int err;
 219
 220        err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
 221        if (err < 0)
 222                return err;
 223
 224        err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
 225        if (err < 0)
 226                return err;
 227
 228        desc.len *= BITS_PER_BYTE;
 229
 230        priv->mask = nft_cmp_fast_mask(desc.len);
 231        priv->data = data.data[0] & priv->mask;
 232        priv->len  = desc.len;
 233        priv->inv  = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
 234        return 0;
 235}
 236
 237static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
 238                                struct nft_flow_rule *flow,
 239                                const struct nft_expr *expr)
 240{
 241        const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
 242        struct nft_cmp_expr cmp = {
 243                .data   = {
 244                        .data   = {
 245                                [0] = priv->data,
 246                        },
 247                },
 248                .sreg   = priv->sreg,
 249                .len    = priv->len / BITS_PER_BYTE,
 250                .op     = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
 251        };
 252
 253        return __nft_cmp_offload(ctx, flow, &cmp);
 254}
 255
 256static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
 257{
 258        const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
 259        enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
 260        struct nft_data data;
 261
 262        if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
 263                goto nla_put_failure;
 264        if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
 265                goto nla_put_failure;
 266
 267        data.data[0] = priv->data;
 268        if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
 269                          NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
 270                goto nla_put_failure;
 271        return 0;
 272
 273nla_put_failure:
 274        return -1;
 275}
 276
 277const struct nft_expr_ops nft_cmp_fast_ops = {
 278        .type           = &nft_cmp_type,
 279        .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
 280        .eval           = NULL, /* inlined */
 281        .init           = nft_cmp_fast_init,
 282        .dump           = nft_cmp_fast_dump,
 283        .reduce         = NFT_REDUCE_READONLY,
 284        .offload        = nft_cmp_fast_offload,
 285};
 286
 287static u32 nft_cmp_mask(u32 bitlen)
 288{
 289        return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
 290}
 291
 292static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
 293{
 294        int len = bitlen / BITS_PER_BYTE;
 295        int i, words = len / sizeof(u32);
 296
 297        for (i = 0; i < words; i++) {
 298                data->data[i] = 0xffffffff;
 299                bitlen -= sizeof(u32) * BITS_PER_BYTE;
 300        }
 301
 302        if (len % sizeof(u32))
 303                data->data[i++] = nft_cmp_mask(bitlen);
 304
 305        for (; i < 4; i++)
 306                data->data[i] = 0;
 307}
 308
 309static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
 310                               const struct nft_expr *expr,
 311                               const struct nlattr * const tb[])
 312{
 313        struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
 314        struct nft_data_desc desc = {
 315                .type   = NFT_DATA_VALUE,
 316                .size   = sizeof(priv->data),
 317        };
 318        int err;
 319
 320        err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
 321        if (err < 0)
 322                return err;
 323
 324        err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
 325        if (err < 0)
 326                return err;
 327
 328        nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
 329        priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
 330        priv->len = desc.len;
 331
 332        return 0;
 333}
 334
 335static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
 336                                  struct nft_flow_rule *flow,
 337                                  const struct nft_expr *expr)
 338{
 339        const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
 340        struct nft_cmp_expr cmp = {
 341                .data   = priv->data,
 342                .sreg   = priv->sreg,
 343                .len    = priv->len,
 344                .op     = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
 345        };
 346
 347        return __nft_cmp_offload(ctx, flow, &cmp);
 348}
 349
 350static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
 351{
 352        const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
 353        enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
 354
 355        if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
 356                goto nla_put_failure;
 357        if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
 358                goto nla_put_failure;
 359
 360        if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
 361                          NFT_DATA_VALUE, priv->len) < 0)
 362                goto nla_put_failure;
 363        return 0;
 364
 365nla_put_failure:
 366        return -1;
 367}
 368
 369
 370const struct nft_expr_ops nft_cmp16_fast_ops = {
 371        .type           = &nft_cmp_type,
 372        .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
 373        .eval           = NULL, /* inlined */
 374        .init           = nft_cmp16_fast_init,
 375        .dump           = nft_cmp16_fast_dump,
 376        .reduce         = NFT_REDUCE_READONLY,
 377        .offload        = nft_cmp16_fast_offload,
 378};
 379
 380static const struct nft_expr_ops *
 381nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
 382{
 383        struct nft_data data;
 384        struct nft_data_desc desc = {
 385                .type   = NFT_DATA_VALUE,
 386                .size   = sizeof(data),
 387        };
 388        enum nft_cmp_ops op;
 389        u8 sreg;
 390        int err;
 391
 392        if (tb[NFTA_CMP_SREG] == NULL ||
 393            tb[NFTA_CMP_OP] == NULL ||
 394            tb[NFTA_CMP_DATA] == NULL)
 395                return ERR_PTR(-EINVAL);
 396
 397        op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
 398        switch (op) {
 399        case NFT_CMP_EQ:
 400        case NFT_CMP_NEQ:
 401        case NFT_CMP_LT:
 402        case NFT_CMP_LTE:
 403        case NFT_CMP_GT:
 404        case NFT_CMP_GTE:
 405                break;
 406        default:
 407                return ERR_PTR(-EINVAL);
 408        }
 409
 410        err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
 411        if (err < 0)
 412                return ERR_PTR(err);
 413
 414        sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
 415
 416        if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
 417                if (desc.len <= sizeof(u32))
 418                        return &nft_cmp_fast_ops;
 419                else if (desc.len <= sizeof(data) &&
 420                         ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
 421                          (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
 422                        return &nft_cmp16_fast_ops;
 423        }
 424        return &nft_cmp_ops;
 425}
 426
 427struct nft_expr_type nft_cmp_type __read_mostly = {
 428        .name           = "cmp",
 429        .select_ops     = nft_cmp_select_ops,
 430        .policy         = nft_cmp_policy,
 431        .maxattr        = NFTA_CMP_MAX,
 432        .owner          = THIS_MODULE,
 433};
 434