linux/arch/powerpc/net/bpf_jit_comp64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp64.c: eBPF JIT compiler
   4 *
   5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
   6 *                IBM Corporation
   7 *
   8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
   9 */
  10#include <linux/moduleloader.h>
  11#include <asm/cacheflush.h>
  12#include <asm/asm-compat.h>
  13#include <linux/netdevice.h>
  14#include <linux/filter.h>
  15#include <linux/if_vlan.h>
  16#include <asm/kprobes.h>
  17#include <linux/bpf.h>
  18
  19#include "bpf_jit64.h"
  20
  21static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  22{
  23        /*
  24         * We only need a stack frame if:
  25         * - we call other functions (kernel helpers), or
  26         * - the bpf program uses its stack area
  27         * The latter condition is deduced from the usage of BPF_REG_FP
  28         */
  29        return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
  30}
  31
  32/*
  33 * When not setting up our own stackframe, the redzone usage is:
  34 *
  35 *              [       prev sp         ] <-------------
  36 *              [         ...           ]               |
  37 * sp (r1) ---> [    stack pointer      ] --------------
  38 *              [   nv gpr save area    ] 6*8
  39 *              [    tail_call_cnt      ] 8
  40 *              [    local_tmp_var      ] 8
  41 *              [   unused red zone     ] 208 bytes protected
  42 */
  43static int bpf_jit_stack_local(struct codegen_context *ctx)
  44{
  45        if (bpf_has_stack_frame(ctx))
  46                return STACK_FRAME_MIN_SIZE + ctx->stack_size;
  47        else
  48                return -(BPF_PPC_STACK_SAVE + 16);
  49}
  50
  51static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
  52{
  53        return bpf_jit_stack_local(ctx) + 8;
  54}
  55
  56static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
  57{
  58        if (reg >= BPF_PPC_NVR_MIN && reg < 32)
  59                return (bpf_has_stack_frame(ctx) ?
  60                        (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
  61                                - (8 * (32 - reg));
  62
  63        pr_err("BPF JIT is asking about unknown registers");
  64        BUG();
  65}
  66
  67void bpf_jit_realloc_regs(struct codegen_context *ctx)
  68{
  69}
  70
  71void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
  72{
  73        int i;
  74
  75        /*
  76         * Initialize tail_call_cnt if we do tail calls.
  77         * Otherwise, put in NOPs so that it can be skipped when we are
  78         * invoked through a tail call.
  79         */
  80        if (ctx->seen & SEEN_TAILCALL) {
  81                EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
  82                /* this goes in the redzone */
  83                PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
  84        } else {
  85                EMIT(PPC_RAW_NOP());
  86                EMIT(PPC_RAW_NOP());
  87        }
  88
  89#define BPF_TAILCALL_PROLOGUE_SIZE      8
  90
  91        if (bpf_has_stack_frame(ctx)) {
  92                /*
  93                 * We need a stack frame, but we don't necessarily need to
  94                 * save/restore LR unless we call other functions
  95                 */
  96                if (ctx->seen & SEEN_FUNC) {
  97                        EMIT(PPC_RAW_MFLR(_R0));
  98                        PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
  99                }
 100
 101                PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
 102        }
 103
 104        /*
 105         * Back up non-volatile regs -- BPF registers 6-10
 106         * If we haven't created our own stack frame, we save these
 107         * in the protected zone below the previous stack frame
 108         */
 109        for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 110                if (bpf_is_seen_register(ctx, b2p[i]))
 111                        PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
 112
 113        /* Setup frame pointer to point to the bpf stack area */
 114        if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
 115                EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
 116                                STACK_FRAME_MIN_SIZE + ctx->stack_size));
 117}
 118
 119static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
 120{
 121        int i;
 122
 123        /* Restore NVRs */
 124        for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 125                if (bpf_is_seen_register(ctx, b2p[i]))
 126                        PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
 127
 128        /* Tear down our stack frame */
 129        if (bpf_has_stack_frame(ctx)) {
 130                EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
 131                if (ctx->seen & SEEN_FUNC) {
 132                        PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
 133                        EMIT(PPC_RAW_MTLR(0));
 134                }
 135        }
 136}
 137
 138void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 139{
 140        bpf_jit_emit_common_epilogue(image, ctx);
 141
 142        /* Move result to r3 */
 143        EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
 144
 145        EMIT(PPC_RAW_BLR());
 146}
 147
 148static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
 149                                       u64 func)
 150{
 151#ifdef PPC64_ELF_ABI_v1
 152        /* func points to the function descriptor */
 153        PPC_LI64(b2p[TMP_REG_2], func);
 154        /* Load actual entry point from function descriptor */
 155        PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
 156        /* ... and move it to CTR */
 157        EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
 158        /*
 159         * Load TOC from function descriptor at offset 8.
 160         * We can clobber r2 since we get called through a
 161         * function pointer (so caller will save/restore r2)
 162         * and since we don't use a TOC ourself.
 163         */
 164        PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
 165#else
 166        /* We can clobber r12 */
 167        PPC_FUNC_ADDR(12, func);
 168        EMIT(PPC_RAW_MTCTR(12));
 169#endif
 170        EMIT(PPC_RAW_BCTRL());
 171}
 172
 173void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
 174{
 175        unsigned int i, ctx_idx = ctx->idx;
 176
 177        /* Load function address into r12 */
 178        PPC_LI64(12, func);
 179
 180        /* For bpf-to-bpf function calls, the callee's address is unknown
 181         * until the last extra pass. As seen above, we use PPC_LI64() to
 182         * load the callee's address, but this may optimize the number of
 183         * instructions required based on the nature of the address.
 184         *
 185         * Since we don't want the number of instructions emitted to change,
 186         * we pad the optimized PPC_LI64() call with NOPs to guarantee that
 187         * we always have a five-instruction sequence, which is the maximum
 188         * that PPC_LI64() can emit.
 189         */
 190        for (i = ctx->idx - ctx_idx; i < 5; i++)
 191                EMIT(PPC_RAW_NOP());
 192
 193#ifdef PPC64_ELF_ABI_v1
 194        /*
 195         * Load TOC from function descriptor at offset 8.
 196         * We can clobber r2 since we get called through a
 197         * function pointer (so caller will save/restore r2)
 198         * and since we don't use a TOC ourself.
 199         */
 200        PPC_BPF_LL(2, 12, 8);
 201        /* Load actual entry point from function descriptor */
 202        PPC_BPF_LL(12, 12, 0);
 203#endif
 204
 205        EMIT(PPC_RAW_MTCTR(12));
 206        EMIT(PPC_RAW_BCTRL());
 207}
 208
 209static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 210{
 211        /*
 212         * By now, the eBPF program has already setup parameters in r3, r4 and r5
 213         * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
 214         * r4/BPF_REG_2 - pointer to bpf_array
 215         * r5/BPF_REG_3 - index in bpf_array
 216         */
 217        int b2p_bpf_array = b2p[BPF_REG_2];
 218        int b2p_index = b2p[BPF_REG_3];
 219
 220        /*
 221         * if (index >= array->map.max_entries)
 222         *   goto out;
 223         */
 224        EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
 225        EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
 226        EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
 227        PPC_BCC(COND_GE, out);
 228
 229        /*
 230         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 231         *   goto out;
 232         */
 233        PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
 234        EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
 235        PPC_BCC(COND_GT, out);
 236
 237        /*
 238         * tail_call_cnt++;
 239         */
 240        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
 241        PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
 242
 243        /* prog = array->ptrs[index]; */
 244        EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
 245        EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
 246        PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
 247
 248        /*
 249         * if (prog == NULL)
 250         *   goto out;
 251         */
 252        EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
 253        PPC_BCC(COND_EQ, out);
 254
 255        /* goto *(prog->bpf_func + prologue_size); */
 256        PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
 257#ifdef PPC64_ELF_ABI_v1
 258        /* skip past the function descriptor */
 259        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
 260                        FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
 261#else
 262        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
 263#endif
 264        EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
 265
 266        /* tear down stack, restore NVRs, ... */
 267        bpf_jit_emit_common_epilogue(image, ctx);
 268
 269        EMIT(PPC_RAW_BCTR());
 270        /* out: */
 271}
 272
 273/* Assemble the body code between the prologue & epilogue */
 274int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
 275                       u32 *addrs, bool extra_pass)
 276{
 277        const struct bpf_insn *insn = fp->insnsi;
 278        int flen = fp->len;
 279        int i, ret;
 280
 281        /* Start of epilogue code - will only be valid 2nd pass onwards */
 282        u32 exit_addr = addrs[flen];
 283
 284        for (i = 0; i < flen; i++) {
 285                u32 code = insn[i].code;
 286                u32 dst_reg = b2p[insn[i].dst_reg];
 287                u32 src_reg = b2p[insn[i].src_reg];
 288                s16 off = insn[i].off;
 289                s32 imm = insn[i].imm;
 290                bool func_addr_fixed;
 291                u64 func_addr;
 292                u64 imm64;
 293                u32 true_cond;
 294                u32 tmp_idx;
 295
 296                /*
 297                 * addrs[] maps a BPF bytecode address into a real offset from
 298                 * the start of the body code.
 299                 */
 300                addrs[i] = ctx->idx * 4;
 301
 302                /*
 303                 * As an optimization, we note down which non-volatile registers
 304                 * are used so that we can only save/restore those in our
 305                 * prologue and epilogue. We do this here regardless of whether
 306                 * the actual BPF instruction uses src/dst registers or not
 307                 * (for instance, BPF_CALL does not use them). The expectation
 308                 * is that those instructions will have src_reg/dst_reg set to
 309                 * 0. Even otherwise, we just lose some prologue/epilogue
 310                 * optimization but everything else should work without
 311                 * any issues.
 312                 */
 313                if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
 314                        bpf_set_seen_register(ctx, dst_reg);
 315                if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
 316                        bpf_set_seen_register(ctx, src_reg);
 317
 318                switch (code) {
 319                /*
 320                 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
 321                 */
 322                case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
 323                case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
 324                        EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
 325                        goto bpf_alu32_trunc;
 326                case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
 327                case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
 328                        EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
 329                        goto bpf_alu32_trunc;
 330                case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
 331                case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
 332                case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
 333                case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
 334                        if (BPF_OP(code) == BPF_SUB)
 335                                imm = -imm;
 336                        if (imm) {
 337                                if (imm >= -32768 && imm < 32768)
 338                                        EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
 339                                else {
 340                                        PPC_LI32(b2p[TMP_REG_1], imm);
 341                                        EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
 342                                }
 343                        }
 344                        goto bpf_alu32_trunc;
 345                case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
 346                case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
 347                        if (BPF_CLASS(code) == BPF_ALU)
 348                                EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
 349                        else
 350                                EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
 351                        goto bpf_alu32_trunc;
 352                case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
 353                case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
 354                        if (imm >= -32768 && imm < 32768)
 355                                EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
 356                        else {
 357                                PPC_LI32(b2p[TMP_REG_1], imm);
 358                                if (BPF_CLASS(code) == BPF_ALU)
 359                                        EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
 360                                                        b2p[TMP_REG_1]));
 361                                else
 362                                        EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
 363                                                        b2p[TMP_REG_1]));
 364                        }
 365                        goto bpf_alu32_trunc;
 366                case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
 367                case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
 368                        if (BPF_OP(code) == BPF_MOD) {
 369                                EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
 370                                EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
 371                                                b2p[TMP_REG_1]));
 372                                EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
 373                        } else
 374                                EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
 375                        goto bpf_alu32_trunc;
 376                case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
 377                case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
 378                        if (BPF_OP(code) == BPF_MOD) {
 379                                EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
 380                                EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
 381                                                b2p[TMP_REG_1]));
 382                                EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
 383                        } else
 384                                EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
 385                        break;
 386                case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
 387                case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
 388                case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
 389                case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
 390                        if (imm == 0)
 391                                return -EINVAL;
 392                        else if (imm == 1)
 393                                goto bpf_alu32_trunc;
 394
 395                        PPC_LI32(b2p[TMP_REG_1], imm);
 396                        switch (BPF_CLASS(code)) {
 397                        case BPF_ALU:
 398                                if (BPF_OP(code) == BPF_MOD) {
 399                                        EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
 400                                                        dst_reg,
 401                                                        b2p[TMP_REG_1]));
 402                                        EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
 403                                                        b2p[TMP_REG_1],
 404                                                        b2p[TMP_REG_2]));
 405                                        EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
 406                                                        b2p[TMP_REG_1]));
 407                                } else
 408                                        EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
 409                                                        b2p[TMP_REG_1]));
 410                                break;
 411                        case BPF_ALU64:
 412                                if (BPF_OP(code) == BPF_MOD) {
 413                                        EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
 414                                                        dst_reg,
 415                                                        b2p[TMP_REG_1]));
 416                                        EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
 417                                                        b2p[TMP_REG_1],
 418                                                        b2p[TMP_REG_2]));
 419                                        EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
 420                                                        b2p[TMP_REG_1]));
 421                                } else
 422                                        EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
 423                                                        b2p[TMP_REG_1]));
 424                                break;
 425                        }
 426                        goto bpf_alu32_trunc;
 427                case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
 428                case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 429                        EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
 430                        goto bpf_alu32_trunc;
 431
 432                /*
 433                 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
 434                 */
 435                case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
 436                case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 437                        EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
 438                        goto bpf_alu32_trunc;
 439                case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
 440                case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 441                        if (!IMM_H(imm))
 442                                EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
 443                        else {
 444                                /* Sign-extended */
 445                                PPC_LI32(b2p[TMP_REG_1], imm);
 446                                EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
 447                        }
 448                        goto bpf_alu32_trunc;
 449                case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 450                case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 451                        EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
 452                        goto bpf_alu32_trunc;
 453                case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
 454                case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
 455                        if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 456                                /* Sign-extended */
 457                                PPC_LI32(b2p[TMP_REG_1], imm);
 458                                EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
 459                        } else {
 460                                if (IMM_L(imm))
 461                                        EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
 462                                if (IMM_H(imm))
 463                                        EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
 464                        }
 465                        goto bpf_alu32_trunc;
 466                case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
 467                case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
 468                        EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
 469                        goto bpf_alu32_trunc;
 470                case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
 471                case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
 472                        if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 473                                /* Sign-extended */
 474                                PPC_LI32(b2p[TMP_REG_1], imm);
 475                                EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
 476                        } else {
 477                                if (IMM_L(imm))
 478                                        EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
 479                                if (IMM_H(imm))
 480                                        EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
 481                        }
 482                        goto bpf_alu32_trunc;
 483                case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
 484                        /* slw clears top 32 bits */
 485                        EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
 486                        /* skip zero extension move, but set address map. */
 487                        if (insn_is_zext(&insn[i + 1]))
 488                                addrs[++i] = ctx->idx * 4;
 489                        break;
 490                case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
 491                        EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
 492                        break;
 493                case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
 494                        /* with imm 0, we still need to clear top 32 bits */
 495                        EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
 496                        if (insn_is_zext(&insn[i + 1]))
 497                                addrs[++i] = ctx->idx * 4;
 498                        break;
 499                case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
 500                        if (imm != 0)
 501                                EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
 502                        break;
 503                case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
 504                        EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
 505                        if (insn_is_zext(&insn[i + 1]))
 506                                addrs[++i] = ctx->idx * 4;
 507                        break;
 508                case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
 509                        EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
 510                        break;
 511                case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
 512                        EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
 513                        if (insn_is_zext(&insn[i + 1]))
 514                                addrs[++i] = ctx->idx * 4;
 515                        break;
 516                case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
 517                        if (imm != 0)
 518                                EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
 519                        break;
 520                case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
 521                        EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
 522                        goto bpf_alu32_trunc;
 523                case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
 524                        EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
 525                        break;
 526                case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
 527                        EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
 528                        goto bpf_alu32_trunc;
 529                case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
 530                        if (imm != 0)
 531                                EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
 532                        break;
 533
 534                /*
 535                 * MOV
 536                 */
 537                case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
 538                case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 539                        if (imm == 1) {
 540                                /* special mov32 for zext */
 541                                EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 542                                break;
 543                        }
 544                        EMIT(PPC_RAW_MR(dst_reg, src_reg));
 545                        goto bpf_alu32_trunc;
 546                case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
 547                case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
 548                        PPC_LI32(dst_reg, imm);
 549                        if (imm < 0)
 550                                goto bpf_alu32_trunc;
 551                        else if (insn_is_zext(&insn[i + 1]))
 552                                addrs[++i] = ctx->idx * 4;
 553                        break;
 554
 555bpf_alu32_trunc:
 556                /* Truncate to 32-bits */
 557                if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
 558                        EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 559                break;
 560
 561                /*
 562                 * BPF_FROM_BE/LE
 563                 */
 564                case BPF_ALU | BPF_END | BPF_FROM_LE:
 565                case BPF_ALU | BPF_END | BPF_FROM_BE:
 566#ifdef __BIG_ENDIAN__
 567                        if (BPF_SRC(code) == BPF_FROM_BE)
 568                                goto emit_clear;
 569#else /* !__BIG_ENDIAN__ */
 570                        if (BPF_SRC(code) == BPF_FROM_LE)
 571                                goto emit_clear;
 572#endif
 573                        switch (imm) {
 574                        case 16:
 575                                /* Rotate 8 bits left & mask with 0x0000ff00 */
 576                                EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
 577                                /* Rotate 8 bits right & insert LSB to reg */
 578                                EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
 579                                /* Move result back to dst_reg */
 580                                EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
 581                                break;
 582                        case 32:
 583                                /*
 584                                 * Rotate word left by 8 bits:
 585                                 * 2 bytes are already in their final position
 586                                 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
 587                                 */
 588                                EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
 589                                /* Rotate 24 bits and insert byte 1 */
 590                                EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
 591                                /* Rotate 24 bits and insert byte 3 */
 592                                EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
 593                                EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
 594                                break;
 595                        case 64:
 596                                /*
 597                                 * Way easier and faster(?) to store the value
 598                                 * into stack and then use ldbrx
 599                                 *
 600                                 * ctx->seen will be reliable in pass2, but
 601                                 * the instructions generated will remain the
 602                                 * same across all passes
 603                                 */
 604                                PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
 605                                EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
 606                                EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
 607                                break;
 608                        }
 609                        break;
 610
 611emit_clear:
 612                        switch (imm) {
 613                        case 16:
 614                                /* zero-extend 16 bits into 64 bits */
 615                                EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
 616                                if (insn_is_zext(&insn[i + 1]))
 617                                        addrs[++i] = ctx->idx * 4;
 618                                break;
 619                        case 32:
 620                                if (!fp->aux->verifier_zext)
 621                                        /* zero-extend 32 bits into 64 bits */
 622                                        EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
 623                                break;
 624                        case 64:
 625                                /* nop */
 626                                break;
 627                        }
 628                        break;
 629
 630                /*
 631                 * BPF_ST NOSPEC (speculation barrier)
 632                 */
 633                case BPF_ST | BPF_NOSPEC:
 634                        break;
 635
 636                /*
 637                 * BPF_ST(X)
 638                 */
 639                case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
 640                case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 641                        if (BPF_CLASS(code) == BPF_ST) {
 642                                EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
 643                                src_reg = b2p[TMP_REG_1];
 644                        }
 645                        EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
 646                        break;
 647                case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 648                case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 649                        if (BPF_CLASS(code) == BPF_ST) {
 650                                EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
 651                                src_reg = b2p[TMP_REG_1];
 652                        }
 653                        EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
 654                        break;
 655                case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 656                case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 657                        if (BPF_CLASS(code) == BPF_ST) {
 658                                PPC_LI32(b2p[TMP_REG_1], imm);
 659                                src_reg = b2p[TMP_REG_1];
 660                        }
 661                        EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
 662                        break;
 663                case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 664                case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 665                        if (BPF_CLASS(code) == BPF_ST) {
 666                                PPC_LI32(b2p[TMP_REG_1], imm);
 667                                src_reg = b2p[TMP_REG_1];
 668                        }
 669                        PPC_BPF_STL(src_reg, dst_reg, off);
 670                        break;
 671
 672                /*
 673                 * BPF_STX ATOMIC (atomic ops)
 674                 */
 675                case BPF_STX | BPF_ATOMIC | BPF_W:
 676                        if (imm != BPF_ADD) {
 677                                pr_err_ratelimited(
 678                                        "eBPF filter atomic op code %02x (@%d) unsupported\n",
 679                                        code, i);
 680                                return -ENOTSUPP;
 681                        }
 682
 683                        /* *(u32 *)(dst + off) += src */
 684
 685                        /* Get EA into TMP_REG_1 */
 686                        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
 687                        tmp_idx = ctx->idx * 4;
 688                        /* load value from memory into TMP_REG_2 */
 689                        EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
 690                        /* add value from src_reg into this */
 691                        EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
 692                        /* store result back */
 693                        EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
 694                        /* we're done if this succeeded */
 695                        PPC_BCC_SHORT(COND_NE, tmp_idx);
 696                        break;
 697                case BPF_STX | BPF_ATOMIC | BPF_DW:
 698                        if (imm != BPF_ADD) {
 699                                pr_err_ratelimited(
 700                                        "eBPF filter atomic op code %02x (@%d) unsupported\n",
 701                                        code, i);
 702                                return -ENOTSUPP;
 703                        }
 704                        /* *(u64 *)(dst + off) += src */
 705
 706                        EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
 707                        tmp_idx = ctx->idx * 4;
 708                        EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
 709                        EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
 710                        EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
 711                        PPC_BCC_SHORT(COND_NE, tmp_idx);
 712                        break;
 713
 714                /*
 715                 * BPF_LDX
 716                 */
 717                /* dst = *(u8 *)(ul) (src + off) */
 718                case BPF_LDX | BPF_MEM | BPF_B:
 719                        EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
 720                        if (insn_is_zext(&insn[i + 1]))
 721                                addrs[++i] = ctx->idx * 4;
 722                        break;
 723                /* dst = *(u16 *)(ul) (src + off) */
 724                case BPF_LDX | BPF_MEM | BPF_H:
 725                        EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
 726                        if (insn_is_zext(&insn[i + 1]))
 727                                addrs[++i] = ctx->idx * 4;
 728                        break;
 729                /* dst = *(u32 *)(ul) (src + off) */
 730                case BPF_LDX | BPF_MEM | BPF_W:
 731                        EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
 732                        if (insn_is_zext(&insn[i + 1]))
 733                                addrs[++i] = ctx->idx * 4;
 734                        break;
 735                /* dst = *(u64 *)(ul) (src + off) */
 736                case BPF_LDX | BPF_MEM | BPF_DW:
 737                        PPC_BPF_LL(dst_reg, src_reg, off);
 738                        break;
 739
 740                /*
 741                 * Doubleword load
 742                 * 16 byte instruction that uses two 'struct bpf_insn'
 743                 */
 744                case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 745                        imm64 = ((u64)(u32) insn[i].imm) |
 746                                    (((u64)(u32) insn[i+1].imm) << 32);
 747                        /* Adjust for two bpf instructions */
 748                        addrs[++i] = ctx->idx * 4;
 749                        PPC_LI64(dst_reg, imm64);
 750                        break;
 751
 752                /*
 753                 * Return/Exit
 754                 */
 755                case BPF_JMP | BPF_EXIT:
 756                        /*
 757                         * If this isn't the very last instruction, branch to
 758                         * the epilogue. If we _are_ the last instruction,
 759                         * we'll just fall through to the epilogue.
 760                         */
 761                        if (i != flen - 1)
 762                                PPC_JMP(exit_addr);
 763                        /* else fall through to the epilogue */
 764                        break;
 765
 766                /*
 767                 * Call kernel helper or bpf function
 768                 */
 769                case BPF_JMP | BPF_CALL:
 770                        ctx->seen |= SEEN_FUNC;
 771
 772                        ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
 773                                                    &func_addr, &func_addr_fixed);
 774                        if (ret < 0)
 775                                return ret;
 776
 777                        if (func_addr_fixed)
 778                                bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
 779                        else
 780                                bpf_jit_emit_func_call_rel(image, ctx, func_addr);
 781                        /* move return value from r3 to BPF_REG_0 */
 782                        EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
 783                        break;
 784
 785                /*
 786                 * Jumps and branches
 787                 */
 788                case BPF_JMP | BPF_JA:
 789                        PPC_JMP(addrs[i + 1 + off]);
 790                        break;
 791
 792                case BPF_JMP | BPF_JGT | BPF_K:
 793                case BPF_JMP | BPF_JGT | BPF_X:
 794                case BPF_JMP | BPF_JSGT | BPF_K:
 795                case BPF_JMP | BPF_JSGT | BPF_X:
 796                case BPF_JMP32 | BPF_JGT | BPF_K:
 797                case BPF_JMP32 | BPF_JGT | BPF_X:
 798                case BPF_JMP32 | BPF_JSGT | BPF_K:
 799                case BPF_JMP32 | BPF_JSGT | BPF_X:
 800                        true_cond = COND_GT;
 801                        goto cond_branch;
 802                case BPF_JMP | BPF_JLT | BPF_K:
 803                case BPF_JMP | BPF_JLT | BPF_X:
 804                case BPF_JMP | BPF_JSLT | BPF_K:
 805                case BPF_JMP | BPF_JSLT | BPF_X:
 806                case BPF_JMP32 | BPF_JLT | BPF_K:
 807                case BPF_JMP32 | BPF_JLT | BPF_X:
 808                case BPF_JMP32 | BPF_JSLT | BPF_K:
 809                case BPF_JMP32 | BPF_JSLT | BPF_X:
 810                        true_cond = COND_LT;
 811                        goto cond_branch;
 812                case BPF_JMP | BPF_JGE | BPF_K:
 813                case BPF_JMP | BPF_JGE | BPF_X:
 814                case BPF_JMP | BPF_JSGE | BPF_K:
 815                case BPF_JMP | BPF_JSGE | BPF_X:
 816                case BPF_JMP32 | BPF_JGE | BPF_K:
 817                case BPF_JMP32 | BPF_JGE | BPF_X:
 818                case BPF_JMP32 | BPF_JSGE | BPF_K:
 819                case BPF_JMP32 | BPF_JSGE | BPF_X:
 820                        true_cond = COND_GE;
 821                        goto cond_branch;
 822                case BPF_JMP | BPF_JLE | BPF_K:
 823                case BPF_JMP | BPF_JLE | BPF_X:
 824                case BPF_JMP | BPF_JSLE | BPF_K:
 825                case BPF_JMP | BPF_JSLE | BPF_X:
 826                case BPF_JMP32 | BPF_JLE | BPF_K:
 827                case BPF_JMP32 | BPF_JLE | BPF_X:
 828                case BPF_JMP32 | BPF_JSLE | BPF_K:
 829                case BPF_JMP32 | BPF_JSLE | BPF_X:
 830                        true_cond = COND_LE;
 831                        goto cond_branch;
 832                case BPF_JMP | BPF_JEQ | BPF_K:
 833                case BPF_JMP | BPF_JEQ | BPF_X:
 834                case BPF_JMP32 | BPF_JEQ | BPF_K:
 835                case BPF_JMP32 | BPF_JEQ | BPF_X:
 836                        true_cond = COND_EQ;
 837                        goto cond_branch;
 838                case BPF_JMP | BPF_JNE | BPF_K:
 839                case BPF_JMP | BPF_JNE | BPF_X:
 840                case BPF_JMP32 | BPF_JNE | BPF_K:
 841                case BPF_JMP32 | BPF_JNE | BPF_X:
 842                        true_cond = COND_NE;
 843                        goto cond_branch;
 844                case BPF_JMP | BPF_JSET | BPF_K:
 845                case BPF_JMP | BPF_JSET | BPF_X:
 846                case BPF_JMP32 | BPF_JSET | BPF_K:
 847                case BPF_JMP32 | BPF_JSET | BPF_X:
 848                        true_cond = COND_NE;
 849                        /* Fall through */
 850
 851cond_branch:
 852                        switch (code) {
 853                        case BPF_JMP | BPF_JGT | BPF_X:
 854                        case BPF_JMP | BPF_JLT | BPF_X:
 855                        case BPF_JMP | BPF_JGE | BPF_X:
 856                        case BPF_JMP | BPF_JLE | BPF_X:
 857                        case BPF_JMP | BPF_JEQ | BPF_X:
 858                        case BPF_JMP | BPF_JNE | BPF_X:
 859                        case BPF_JMP32 | BPF_JGT | BPF_X:
 860                        case BPF_JMP32 | BPF_JLT | BPF_X:
 861                        case BPF_JMP32 | BPF_JGE | BPF_X:
 862                        case BPF_JMP32 | BPF_JLE | BPF_X:
 863                        case BPF_JMP32 | BPF_JEQ | BPF_X:
 864                        case BPF_JMP32 | BPF_JNE | BPF_X:
 865                                /* unsigned comparison */
 866                                if (BPF_CLASS(code) == BPF_JMP32)
 867                                        EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
 868                                else
 869                                        EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
 870                                break;
 871                        case BPF_JMP | BPF_JSGT | BPF_X:
 872                        case BPF_JMP | BPF_JSLT | BPF_X:
 873                        case BPF_JMP | BPF_JSGE | BPF_X:
 874                        case BPF_JMP | BPF_JSLE | BPF_X:
 875                        case BPF_JMP32 | BPF_JSGT | BPF_X:
 876                        case BPF_JMP32 | BPF_JSLT | BPF_X:
 877                        case BPF_JMP32 | BPF_JSGE | BPF_X:
 878                        case BPF_JMP32 | BPF_JSLE | BPF_X:
 879                                /* signed comparison */
 880                                if (BPF_CLASS(code) == BPF_JMP32)
 881                                        EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
 882                                else
 883                                        EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
 884                                break;
 885                        case BPF_JMP | BPF_JSET | BPF_X:
 886                        case BPF_JMP32 | BPF_JSET | BPF_X:
 887                                if (BPF_CLASS(code) == BPF_JMP) {
 888                                        EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
 889                                                    src_reg));
 890                                } else {
 891                                        int tmp_reg = b2p[TMP_REG_1];
 892
 893                                        EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
 894                                        EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
 895                                                       31));
 896                                }
 897                                break;
 898                        case BPF_JMP | BPF_JNE | BPF_K:
 899                        case BPF_JMP | BPF_JEQ | BPF_K:
 900                        case BPF_JMP | BPF_JGT | BPF_K:
 901                        case BPF_JMP | BPF_JLT | BPF_K:
 902                        case BPF_JMP | BPF_JGE | BPF_K:
 903                        case BPF_JMP | BPF_JLE | BPF_K:
 904                        case BPF_JMP32 | BPF_JNE | BPF_K:
 905                        case BPF_JMP32 | BPF_JEQ | BPF_K:
 906                        case BPF_JMP32 | BPF_JGT | BPF_K:
 907                        case BPF_JMP32 | BPF_JLT | BPF_K:
 908                        case BPF_JMP32 | BPF_JGE | BPF_K:
 909                        case BPF_JMP32 | BPF_JLE | BPF_K:
 910                        {
 911                                bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
 912
 913                                /*
 914                                 * Need sign-extended load, so only positive
 915                                 * values can be used as imm in cmpldi
 916                                 */
 917                                if (imm >= 0 && imm < 32768) {
 918                                        if (is_jmp32)
 919                                                EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
 920                                        else
 921                                                EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
 922                                } else {
 923                                        /* sign-extending load */
 924                                        PPC_LI32(b2p[TMP_REG_1], imm);
 925                                        /* ... but unsigned comparison */
 926                                        if (is_jmp32)
 927                                                EMIT(PPC_RAW_CMPLW(dst_reg,
 928                                                          b2p[TMP_REG_1]));
 929                                        else
 930                                                EMIT(PPC_RAW_CMPLD(dst_reg,
 931                                                          b2p[TMP_REG_1]));
 932                                }
 933                                break;
 934                        }
 935                        case BPF_JMP | BPF_JSGT | BPF_K:
 936                        case BPF_JMP | BPF_JSLT | BPF_K:
 937                        case BPF_JMP | BPF_JSGE | BPF_K:
 938                        case BPF_JMP | BPF_JSLE | BPF_K:
 939                        case BPF_JMP32 | BPF_JSGT | BPF_K:
 940                        case BPF_JMP32 | BPF_JSLT | BPF_K:
 941                        case BPF_JMP32 | BPF_JSGE | BPF_K:
 942                        case BPF_JMP32 | BPF_JSLE | BPF_K:
 943                        {
 944                                bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
 945
 946                                /*
 947                                 * signed comparison, so any 16-bit value
 948                                 * can be used in cmpdi
 949                                 */
 950                                if (imm >= -32768 && imm < 32768) {
 951                                        if (is_jmp32)
 952                                                EMIT(PPC_RAW_CMPWI(dst_reg, imm));
 953                                        else
 954                                                EMIT(PPC_RAW_CMPDI(dst_reg, imm));
 955                                } else {
 956                                        PPC_LI32(b2p[TMP_REG_1], imm);
 957                                        if (is_jmp32)
 958                                                EMIT(PPC_RAW_CMPW(dst_reg,
 959                                                         b2p[TMP_REG_1]));
 960                                        else
 961                                                EMIT(PPC_RAW_CMPD(dst_reg,
 962                                                         b2p[TMP_REG_1]));
 963                                }
 964                                break;
 965                        }
 966                        case BPF_JMP | BPF_JSET | BPF_K:
 967                        case BPF_JMP32 | BPF_JSET | BPF_K:
 968                                /* andi does not sign-extend the immediate */
 969                                if (imm >= 0 && imm < 32768)
 970                                        /* PPC_ANDI is _only/always_ dot-form */
 971                                        EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
 972                                else {
 973                                        int tmp_reg = b2p[TMP_REG_1];
 974
 975                                        PPC_LI32(tmp_reg, imm);
 976                                        if (BPF_CLASS(code) == BPF_JMP) {
 977                                                EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
 978                                                            tmp_reg));
 979                                        } else {
 980                                                EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
 981                                                        tmp_reg));
 982                                                EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
 983                                                               0, 0, 31));
 984                                        }
 985                                }
 986                                break;
 987                        }
 988                        PPC_BCC(true_cond, addrs[i + 1 + off]);
 989                        break;
 990
 991                /*
 992                 * Tail call
 993                 */
 994                case BPF_JMP | BPF_TAIL_CALL:
 995                        ctx->seen |= SEEN_TAILCALL;
 996                        bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
 997                        break;
 998
 999                default:
1000                        /*
1001                         * The filter contains something cruel & unusual.
1002                         * We don't handle it, but also there shouldn't be
1003                         * anything missing from our list.
1004                         */
1005                        pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1006                                        code, i);
1007                        return -ENOTSUPP;
1008                }
1009        }
1010
1011        /* Set end-of-body-code address for exit. */
1012        addrs[i] = ctx->idx * 4;
1013
1014        return 0;
1015}
1016