mirror of https://github.com/torvalds/linux.git
bpf: Add arm64 JIT support for PROBE_MEM32 pseudo instructions.
Add support for [LDX | STX | ST], PROBE_MEM32, [B | H | W | DW] instructions. They are similar to PROBE_MEM instructions with the following differences: - PROBE_MEM32 supports store. - PROBE_MEM32 relies on the verifier to clear upper 32-bit of the src/dst register - PROBE_MEM32 adds 64-bit kern_vm_start address (which is stored in R28 in the prologue). Due to bpf_arena constructions such R28 + reg + off16 access is guaranteed to be within arena virtual range, so no address check at run-time. - PROBE_MEM32 allows STX and ST. If they fault the store is a nop. When LDX faults the destination register is zeroed. To support these on arm64, we do tmp2 = R28 + src/dst reg and then use tmp2 as the new src/dst register. This allows us to reuse most of the code for normal [LDX | STX | ST]. Signed-off-by: Puranjay Mohan <puranjay12@gmail.com> Link: https://lore.kernel.org/r/20240325150716.4387-2-puranjay12@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
c07b4bcd51
commit
339af577ec
|
|
@ -29,6 +29,7 @@
|
|||
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
|
||||
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
|
||||
#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
|
||||
#define ARENA_VM_START (MAX_BPF_JIT_REG + 5)
|
||||
|
||||
#define check_imm(bits, imm) do { \
|
||||
if ((((imm) > 0) && ((imm) >> (bits))) || \
|
||||
|
|
@ -67,6 +68,8 @@ static const int bpf2a64[] = {
|
|||
/* temporary register for blinding constants */
|
||||
[BPF_REG_AX] = A64_R(9),
|
||||
[FP_BOTTOM] = A64_R(27),
|
||||
/* callee saved register for kern_vm_start address */
|
||||
[ARENA_VM_START] = A64_R(28),
|
||||
};
|
||||
|
||||
struct jit_ctx {
|
||||
|
|
@ -295,7 +298,7 @@ static bool is_lsi_offset(int offset, int scale)
|
|||
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
|
||||
|
||||
static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
|
||||
bool is_exception_cb)
|
||||
bool is_exception_cb, u64 arena_vm_start)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
const bool is_main_prog = !bpf_is_subprog(prog);
|
||||
|
|
@ -306,6 +309,7 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
|
|||
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||
const u8 tcc = bpf2a64[TCALL_CNT];
|
||||
const u8 fpb = bpf2a64[FP_BOTTOM];
|
||||
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
||||
const int idx0 = ctx->idx;
|
||||
int cur_offset;
|
||||
|
||||
|
|
@ -411,6 +415,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
|
|||
|
||||
/* Set up function call stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
|
||||
|
||||
if (arena_vm_start)
|
||||
emit_a64_mov_i64(arena_vm_base, arena_vm_start, ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -738,6 +746,7 @@ static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
|
|||
|
||||
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
|
||||
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
|
||||
#define DONT_CLEAR 5 /* Unused ARM64 register from BPF's POV */
|
||||
|
||||
bool ex_handler_bpf(const struct exception_table_entry *ex,
|
||||
struct pt_regs *regs)
|
||||
|
|
@ -745,6 +754,7 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
|
|||
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
|
||||
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
|
||||
|
||||
if (dst_reg != DONT_CLEAR)
|
||||
regs->regs[dst_reg] = 0;
|
||||
regs->pc = (unsigned long)&ex->fixup - offset;
|
||||
return true;
|
||||
|
|
@ -765,7 +775,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
|||
return 0;
|
||||
|
||||
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEM32)
|
||||
return 0;
|
||||
|
||||
if (!ctx->prog->aux->extable ||
|
||||
|
|
@ -810,6 +821,9 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
|||
|
||||
ex->insn = ins_offset;
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_LDX)
|
||||
dst_reg = DONT_CLEAR;
|
||||
|
||||
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
|
||||
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
|
||||
|
||||
|
|
@ -829,12 +843,13 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
bool extra_pass)
|
||||
{
|
||||
const u8 code = insn->code;
|
||||
const u8 dst = bpf2a64[insn->dst_reg];
|
||||
const u8 src = bpf2a64[insn->src_reg];
|
||||
u8 dst = bpf2a64[insn->dst_reg];
|
||||
u8 src = bpf2a64[insn->src_reg];
|
||||
const u8 tmp = bpf2a64[TMP_REG_1];
|
||||
const u8 tmp2 = bpf2a64[TMP_REG_2];
|
||||
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||
const u8 fpb = bpf2a64[FP_BOTTOM];
|
||||
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
||||
const s16 off = insn->off;
|
||||
const s32 imm = insn->imm;
|
||||
const int i = insn - ctx->prog->insnsi;
|
||||
|
|
@ -1237,7 +1252,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
|
||||
if (ctx->fpb_offset > 0 && src == fp) {
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, src, arena_vm_base), ctx);
|
||||
src = tmp2;
|
||||
}
|
||||
if (ctx->fpb_offset > 0 && src == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
|
||||
src_adj = fpb;
|
||||
off_adj = off + ctx->fpb_offset;
|
||||
} else {
|
||||
|
|
@ -1322,7 +1345,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
case BPF_ST | BPF_MEM | BPF_DW:
|
||||
if (ctx->fpb_offset > 0 && dst == fp) {
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
|
||||
dst = tmp2;
|
||||
}
|
||||
if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
|
||||
dst_adj = fpb;
|
||||
off_adj = off + ctx->fpb_offset;
|
||||
} else {
|
||||
|
|
@ -1365,6 +1396,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ret = add_exception_handler(insn, ctx, dst);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
/* STX: *(size *)(dst + off) = src */
|
||||
|
|
@ -1372,7 +1407,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
case BPF_STX | BPF_MEM | BPF_H:
|
||||
case BPF_STX | BPF_MEM | BPF_B:
|
||||
case BPF_STX | BPF_MEM | BPF_DW:
|
||||
if (ctx->fpb_offset > 0 && dst == fp) {
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
|
||||
case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
|
||||
dst = tmp2;
|
||||
}
|
||||
if (ctx->fpb_offset > 0 && dst == fp && BPF_MODE(insn->code) != BPF_PROBE_MEM32) {
|
||||
dst_adj = fpb;
|
||||
off_adj = off + ctx->fpb_offset;
|
||||
} else {
|
||||
|
|
@ -1413,6 +1456,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
}
|
||||
break;
|
||||
}
|
||||
|
||||
ret = add_exception_handler(insn, ctx, dst);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case BPF_STX | BPF_ATOMIC | BPF_W:
|
||||
|
|
@ -1594,6 +1641,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
struct jit_ctx ctx;
|
||||
u64 arena_vm_start;
|
||||
u8 *image_ptr;
|
||||
u8 *ro_image_ptr;
|
||||
|
||||
|
|
@ -1611,6 +1659,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog = tmp;
|
||||
}
|
||||
|
||||
arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
|
||||
|
|
@ -1648,7 +1697,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* BPF line info needs ctx->offset[i] to be the offset of
|
||||
* instruction[i] in jited image, so build prologue first.
|
||||
*/
|
||||
if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
|
||||
if (build_prologue(&ctx, was_classic, prog->aux->exception_cb,
|
||||
arena_vm_start)) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
}
|
||||
|
|
@ -1696,7 +1746,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.idx = 0;
|
||||
ctx.exentry_idx = 0;
|
||||
|
||||
build_prologue(&ctx, was_classic, prog->aux->exception_cb);
|
||||
build_prologue(&ctx, was_classic, prog->aux->exception_cb, arena_vm_start);
|
||||
|
||||
if (build_body(&ctx, extra_pass)) {
|
||||
prog = orig_prog;
|
||||
|
|
|
|||
Loading…
Reference in New Issue