mirror of https://github.com/torvalds/linux.git
bpf: verifier: refactor kfunc specialization
Move kfunc specialization (function address substitution) to later stage of verification to support a new use case, where we need to take into consideration whether kfunc is called in sleepable context. Minor refactoring in add_kfunc_call(), making sure that if function fails, kfunc desc is not added to tab->descs (previously it could be added or not, depending on what failed). Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20251026203853.135105-9-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
e3e36edb1b
commit
d869d56ca8
|
|
@ -209,8 +209,6 @@ static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
|
|||
static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
|
||||
static int ref_set_non_owning(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg);
|
||||
static void specialize_kfunc(struct bpf_verifier_env *env,
|
||||
u32 func_id, u16 offset, unsigned long *addr);
|
||||
static bool is_trusted_reg(const struct bpf_reg_state *reg);
|
||||
|
||||
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
|
||||
|
|
@ -3126,6 +3124,8 @@ struct bpf_kfunc_btf_tab {
|
|||
u32 nr_descs;
|
||||
};
|
||||
|
||||
static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc);
|
||||
|
||||
static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
|
||||
{
|
||||
const struct bpf_kfunc_desc *d0 = a;
|
||||
|
|
@ -3143,7 +3143,7 @@ static int kfunc_btf_cmp_by_off(const void *a, const void *b)
|
|||
return d0->offset - d1->offset;
|
||||
}
|
||||
|
||||
static const struct bpf_kfunc_desc *
|
||||
static struct bpf_kfunc_desc *
|
||||
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
|
||||
{
|
||||
struct bpf_kfunc_desc desc = {
|
||||
|
|
@ -3266,12 +3266,12 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
|
|||
{
|
||||
const struct btf_type *func, *func_proto;
|
||||
struct bpf_kfunc_btf_tab *btf_tab;
|
||||
struct btf_func_model func_model;
|
||||
struct bpf_kfunc_desc_tab *tab;
|
||||
struct bpf_prog_aux *prog_aux;
|
||||
struct bpf_kfunc_desc *desc;
|
||||
const char *func_name;
|
||||
struct btf *desc_btf;
|
||||
unsigned long call_imm;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
|
||||
|
|
@ -3355,19 +3355,6 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
|
|||
func_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
specialize_kfunc(env, func_id, offset, &addr);
|
||||
|
||||
if (bpf_jit_supports_far_kfunc_call()) {
|
||||
call_imm = func_id;
|
||||
} else {
|
||||
call_imm = BPF_CALL_IMM(addr);
|
||||
/* Check whether the relative offset overflows desc->imm */
|
||||
if ((unsigned long)(s32)call_imm != call_imm) {
|
||||
verbose(env, "address of kernel function %s is out of range\n",
|
||||
func_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (bpf_dev_bound_kfunc_id(func_id)) {
|
||||
err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
|
||||
|
|
@ -3375,18 +3362,20 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
|
|||
return err;
|
||||
}
|
||||
|
||||
desc = &tab->descs[tab->nr_descs++];
|
||||
desc->func_id = func_id;
|
||||
desc->imm = call_imm;
|
||||
desc->offset = offset;
|
||||
desc->addr = addr;
|
||||
err = btf_distill_func_proto(&env->log, desc_btf,
|
||||
func_proto, func_name,
|
||||
&desc->func_model);
|
||||
if (!err)
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_desc_cmp_by_id_off, NULL);
|
||||
return err;
|
||||
&func_model);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
desc = &tab->descs[tab->nr_descs++];
|
||||
desc->func_id = func_id;
|
||||
desc->offset = offset;
|
||||
desc->addr = addr;
|
||||
desc->func_model = func_model;
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_desc_cmp_by_id_off, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
|
||||
|
|
@ -21880,46 +21869,57 @@ static int fixup_call_args(struct bpf_verifier_env *env)
|
|||
}
|
||||
|
||||
/* replace a generic kfunc with a specialized version if necessary */
|
||||
static void specialize_kfunc(struct bpf_verifier_env *env,
|
||||
u32 func_id, u16 offset, unsigned long *addr)
|
||||
static int specialize_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_desc *desc)
|
||||
{
|
||||
struct bpf_prog *prog = env->prog;
|
||||
bool seen_direct_write;
|
||||
void *xdp_kfunc;
|
||||
bool is_rdonly;
|
||||
u32 func_id = desc->func_id;
|
||||
u16 offset = desc->offset;
|
||||
unsigned long addr = desc->addr, call_imm;
|
||||
|
||||
if (offset) /* return if module BTF is used */
|
||||
goto set_imm;
|
||||
|
||||
if (bpf_dev_bound_kfunc_id(func_id)) {
|
||||
xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id);
|
||||
if (xdp_kfunc) {
|
||||
*addr = (unsigned long)xdp_kfunc;
|
||||
return;
|
||||
}
|
||||
if (xdp_kfunc)
|
||||
addr = (unsigned long)xdp_kfunc;
|
||||
/* fallback to default kfunc when not supported by netdev */
|
||||
}
|
||||
|
||||
if (offset)
|
||||
return;
|
||||
|
||||
if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
|
||||
} else if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
|
||||
seen_direct_write = env->seen_direct_write;
|
||||
is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
|
||||
|
||||
if (is_rdonly)
|
||||
*addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
|
||||
addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
|
||||
|
||||
/* restore env->seen_direct_write to its original value, since
|
||||
* may_access_direct_pkt_data mutates it
|
||||
*/
|
||||
env->seen_direct_write = seen_direct_write;
|
||||
} else if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr]) {
|
||||
if (bpf_lsm_has_d_inode_locked(prog))
|
||||
addr = (unsigned long)bpf_set_dentry_xattr_locked;
|
||||
} else if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr]) {
|
||||
if (bpf_lsm_has_d_inode_locked(prog))
|
||||
addr = (unsigned long)bpf_remove_dentry_xattr_locked;
|
||||
}
|
||||
|
||||
if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr] &&
|
||||
bpf_lsm_has_d_inode_locked(prog))
|
||||
*addr = (unsigned long)bpf_set_dentry_xattr_locked;
|
||||
|
||||
if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr] &&
|
||||
bpf_lsm_has_d_inode_locked(prog))
|
||||
*addr = (unsigned long)bpf_remove_dentry_xattr_locked;
|
||||
set_imm:
|
||||
if (bpf_jit_supports_far_kfunc_call()) {
|
||||
call_imm = func_id;
|
||||
} else {
|
||||
call_imm = BPF_CALL_IMM(addr);
|
||||
/* Check whether the relative offset overflows desc->imm */
|
||||
if ((unsigned long)(s32)call_imm != call_imm) {
|
||||
verbose(env, "address of kernel func_id %u is out of range\n", func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
desc->imm = call_imm;
|
||||
desc->addr = addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
|
||||
|
|
@ -21942,7 +21942,8 @@ static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
|
|||
static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
struct bpf_insn *insn_buf, int insn_idx, int *cnt)
|
||||
{
|
||||
const struct bpf_kfunc_desc *desc;
|
||||
struct bpf_kfunc_desc *desc;
|
||||
int err;
|
||||
|
||||
if (!insn->imm) {
|
||||
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
|
||||
|
|
@ -21962,6 +21963,10 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
err = specialize_kfunc(env, desc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!bpf_jit_supports_far_kfunc_call())
|
||||
insn->imm = BPF_CALL_IMM(desc->addr);
|
||||
if (insn->off)
|
||||
|
|
|
|||
Loading…
Reference in New Issue