Skip to content

Commit 5e581da

Browse files
borkmannAlexei Starovoitov
authored and
Alexei Starovoitov
committed
bpf: make unknown opcode handling more robust
Recent findings by syzcaller fixed in 7891a87 ("bpf: arsh is not supported in 32 bit alu thus reject it") triggered a warning in the interpreter due to unknown opcode not being rejected by the verifier. The 'return 0' for an unknown opcode is really not optimal, since with BPF to BPF calls, this would go untracked by the verifier. Do two things here to improve the situation: i) perform basic insn sanity check early on in the verification phase and reject every non-uapi insn right there. The bpf_opcode_in_insntable() table reuses the same mapping as the jumptable in ___bpf_prog_run() sans the non-public mappings. And ii) in ___bpf_prog_run() we do need to BUG in the case where the verifier would ever create an unknown opcode due to some rewrites. Note that JITs do not have such issues since they would punt to interpreter in these situations. Moreover, the BPF_JIT_ALWAYS_ON would also help to avoid such unknown opcodes in the first place. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 2a5418a commit 5e581da

File tree

3 files changed

+154
-105
lines changed

3 files changed

+154
-105
lines changed

include/linux/filter.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -688,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
688688
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
689689
void bpf_prog_free(struct bpf_prog *fp);
690690

691+
bool bpf_opcode_in_insntable(u8 code);
692+
691693
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
692694
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
693695
gfp_t gfp_extra_flags);

kernel/bpf/core.c

Lines changed: 145 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -782,6 +782,137 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
782782
}
783783
EXPORT_SYMBOL_GPL(__bpf_call_base);
784784

785+
/* All UAPI available opcodes. */
786+
#define BPF_INSN_MAP(INSN_2, INSN_3) \
787+
/* 32 bit ALU operations. */ \
788+
/* Register based. */ \
789+
INSN_3(ALU, ADD, X), \
790+
INSN_3(ALU, SUB, X), \
791+
INSN_3(ALU, AND, X), \
792+
INSN_3(ALU, OR, X), \
793+
INSN_3(ALU, LSH, X), \
794+
INSN_3(ALU, RSH, X), \
795+
INSN_3(ALU, XOR, X), \
796+
INSN_3(ALU, MUL, X), \
797+
INSN_3(ALU, MOV, X), \
798+
INSN_3(ALU, DIV, X), \
799+
INSN_3(ALU, MOD, X), \
800+
INSN_2(ALU, NEG), \
801+
INSN_3(ALU, END, TO_BE), \
802+
INSN_3(ALU, END, TO_LE), \
803+
/* Immediate based. */ \
804+
INSN_3(ALU, ADD, K), \
805+
INSN_3(ALU, SUB, K), \
806+
INSN_3(ALU, AND, K), \
807+
INSN_3(ALU, OR, K), \
808+
INSN_3(ALU, LSH, K), \
809+
INSN_3(ALU, RSH, K), \
810+
INSN_3(ALU, XOR, K), \
811+
INSN_3(ALU, MUL, K), \
812+
INSN_3(ALU, MOV, K), \
813+
INSN_3(ALU, DIV, K), \
814+
INSN_3(ALU, MOD, K), \
815+
/* 64 bit ALU operations. */ \
816+
/* Register based. */ \
817+
INSN_3(ALU64, ADD, X), \
818+
INSN_3(ALU64, SUB, X), \
819+
INSN_3(ALU64, AND, X), \
820+
INSN_3(ALU64, OR, X), \
821+
INSN_3(ALU64, LSH, X), \
822+
INSN_3(ALU64, RSH, X), \
823+
INSN_3(ALU64, XOR, X), \
824+
INSN_3(ALU64, MUL, X), \
825+
INSN_3(ALU64, MOV, X), \
826+
INSN_3(ALU64, ARSH, X), \
827+
INSN_3(ALU64, DIV, X), \
828+
INSN_3(ALU64, MOD, X), \
829+
INSN_2(ALU64, NEG), \
830+
/* Immediate based. */ \
831+
INSN_3(ALU64, ADD, K), \
832+
INSN_3(ALU64, SUB, K), \
833+
INSN_3(ALU64, AND, K), \
834+
INSN_3(ALU64, OR, K), \
835+
INSN_3(ALU64, LSH, K), \
836+
INSN_3(ALU64, RSH, K), \
837+
INSN_3(ALU64, XOR, K), \
838+
INSN_3(ALU64, MUL, K), \
839+
INSN_3(ALU64, MOV, K), \
840+
INSN_3(ALU64, ARSH, K), \
841+
INSN_3(ALU64, DIV, K), \
842+
INSN_3(ALU64, MOD, K), \
843+
/* Call instruction. */ \
844+
INSN_2(JMP, CALL), \
845+
/* Exit instruction. */ \
846+
INSN_2(JMP, EXIT), \
847+
/* Jump instructions. */ \
848+
/* Register based. */ \
849+
INSN_3(JMP, JEQ, X), \
850+
INSN_3(JMP, JNE, X), \
851+
INSN_3(JMP, JGT, X), \
852+
INSN_3(JMP, JLT, X), \
853+
INSN_3(JMP, JGE, X), \
854+
INSN_3(JMP, JLE, X), \
855+
INSN_3(JMP, JSGT, X), \
856+
INSN_3(JMP, JSLT, X), \
857+
INSN_3(JMP, JSGE, X), \
858+
INSN_3(JMP, JSLE, X), \
859+
INSN_3(JMP, JSET, X), \
860+
/* Immediate based. */ \
861+
INSN_3(JMP, JEQ, K), \
862+
INSN_3(JMP, JNE, K), \
863+
INSN_3(JMP, JGT, K), \
864+
INSN_3(JMP, JLT, K), \
865+
INSN_3(JMP, JGE, K), \
866+
INSN_3(JMP, JLE, K), \
867+
INSN_3(JMP, JSGT, K), \
868+
INSN_3(JMP, JSLT, K), \
869+
INSN_3(JMP, JSGE, K), \
870+
INSN_3(JMP, JSLE, K), \
871+
INSN_3(JMP, JSET, K), \
872+
INSN_2(JMP, JA), \
873+
/* Store instructions. */ \
874+
/* Register based. */ \
875+
INSN_3(STX, MEM, B), \
876+
INSN_3(STX, MEM, H), \
877+
INSN_3(STX, MEM, W), \
878+
INSN_3(STX, MEM, DW), \
879+
INSN_3(STX, XADD, W), \
880+
INSN_3(STX, XADD, DW), \
881+
/* Immediate based. */ \
882+
INSN_3(ST, MEM, B), \
883+
INSN_3(ST, MEM, H), \
884+
INSN_3(ST, MEM, W), \
885+
INSN_3(ST, MEM, DW), \
886+
/* Load instructions. */ \
887+
/* Register based. */ \
888+
INSN_3(LDX, MEM, B), \
889+
INSN_3(LDX, MEM, H), \
890+
INSN_3(LDX, MEM, W), \
891+
INSN_3(LDX, MEM, DW), \
892+
/* Immediate based. */ \
893+
INSN_3(LD, IMM, DW), \
894+
/* Misc (old cBPF carry-over). */ \
895+
INSN_3(LD, ABS, B), \
896+
INSN_3(LD, ABS, H), \
897+
INSN_3(LD, ABS, W), \
898+
INSN_3(LD, IND, B), \
899+
INSN_3(LD, IND, H), \
900+
INSN_3(LD, IND, W)
901+
902+
bool bpf_opcode_in_insntable(u8 code)
903+
{
904+
#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
905+
#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
906+
static const bool public_insntable[256] = {
907+
[0 ... 255] = false,
908+
/* Now overwrite non-defaults ... */
909+
BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
910+
};
911+
#undef BPF_INSN_3_TBL
912+
#undef BPF_INSN_2_TBL
913+
return public_insntable[code];
914+
}
915+
785916
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
786917
/**
787918
* __bpf_prog_run - run eBPF program on a given context
@@ -793,115 +924,18 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
793924
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
794925
{
795926
u64 tmp;
927+
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
928+
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
796929
static const void *jumptable[256] = {
797930
[0 ... 255] = &&default_label,
798931
/* Now overwrite non-defaults ... */
799-
/* 32 bit ALU operations */
800-
[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
801-
[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
802-
[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
803-
[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
804-
[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
805-
[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
806-
[BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
807-
[BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
808-
[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
809-
[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
810-
[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
811-
[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
812-
[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
813-
[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
814-
[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
815-
[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
816-
[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
817-
[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
818-
[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
819-
[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
820-
[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
821-
[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
822-
[BPF_ALU | BPF_NEG] = &&ALU_NEG,
823-
[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
824-
[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
825-
/* 64 bit ALU operations */
826-
[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
827-
[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
828-
[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
829-
[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
830-
[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
831-
[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
832-
[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
833-
[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
834-
[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
835-
[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
836-
[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
837-
[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
838-
[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
839-
[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
840-
[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
841-
[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
842-
[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
843-
[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
844-
[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
845-
[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
846-
[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
847-
[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
848-
[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
849-
[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
850-
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
851-
/* Call instruction */
852-
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
932+
BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
933+
/* Non-UAPI available opcodes. */
853934
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
854935
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
855-
/* Jumps */
856-
[BPF_JMP | BPF_JA] = &&JMP_JA,
857-
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
858-
[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
859-
[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
860-
[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
861-
[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
862-
[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
863-
[BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
864-
[BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
865-
[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
866-
[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
867-
[BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
868-
[BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
869-
[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
870-
[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
871-
[BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
872-
[BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
873-
[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
874-
[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
875-
[BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
876-
[BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
877-
[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
878-
[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
879-
/* Program return */
880-
[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
881-
/* Store instructions */
882-
[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
883-
[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
884-
[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
885-
[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
886-
[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
887-
[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
888-
[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
889-
[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
890-
[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
891-
[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
892-
/* Load instructions */
893-
[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
894-
[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
895-
[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
896-
[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
897-
[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
898-
[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
899-
[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
900-
[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
901-
[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
902-
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
903-
[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
904936
};
937+
#undef BPF_INSN_3_LBL
938+
#undef BPF_INSN_2_LBL
905939
u32 tail_call_cnt = 0;
906940
void *ptr;
907941
int off;
@@ -1302,8 +1336,14 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
13021336
goto load_byte;
13031337

13041338
default_label:
1305-
/* If we ever reach this, we have a bug somewhere. */
1306-
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1339+
/* If we ever reach this, we have a bug somewhere. Die hard here
1340+
* instead of just returning 0; we could be somewhere in a subprog,
1341+
* so execution could continue otherwise which we do /not/ want.
1342+
*
1343+
* Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1344+
*/
1345+
pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1346+
BUG_ON(1);
13071347
return 0;
13081348
}
13091349
STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */

kernel/bpf/verifier.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4981,6 +4981,13 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
49814981
next_insn:
49824982
insn++;
49834983
i++;
4984+
continue;
4985+
}
4986+
4987+
/* Basic sanity check before we invest more work here. */
4988+
if (!bpf_opcode_in_insntable(insn->code)) {
4989+
verbose(env, "unknown opcode %02x\n", insn->code);
4990+
return -EINVAL;
49844991
}
49854992
}
49864993

0 commit comments

Comments
 (0)