mirror of https://github.com/torvalds/linux.git
bpf-fixes
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmkFVBIACgkQ6rmadz2v bTrWVg//chctHGZZcP2ZbLxDDwLOwfjjsUY2COaD9P3ZN8/vWX6GEbvElLulkLgD Hwv3pe2C6NzHN9QH37M+WtJmLE1vI5aRuMXzpBKhOtOFAE5BfHzXeON0M4pswZd1 jh7f4w7mBdW3MMoR2Dg/l+lbGxDKFfb9jfD1blm+uOuBodHdbIpa66Mscakannrx tNWoauPDcu7fu7b+KCItnICC+VewaoDmhr20Q8X/kwvqbNPZ98D/tzUw7YlngO1d p+K/oKVAfXbWbW79agNoqD+zVDKAos7dQgqCDY/cuZhJNzt4xBZfTkM62SXdHU7g aCXHg+qxoWMrYTWGGueAhwf4gB3YIe0atKxP9w5gbjtxbWa5Y6oTyIpgGKvO5SMj 7qsmg/m338kS4aKQVjr9D042W+qqxRjrn2eF/x4Sth1GXMJd1ny14NpoNGEk/xsU TZfBdFgNOYUa1jeK3N3oEDdlxx8ITA9gsNPzSy9O8Ke6WRHp5u9Ob/7UIJsiVYWw 6SPdIhagv719m93GvAC4Xe3BrRi1dmf5UX39oOqpnGKkg4lT/xNu4aYP89UbFVGW XgTPX+Cm7kRKb32Fv9GiLC0sTQEWVAiB0jVTGB9E8v15P7ybJ/9IrcRNcwcrKGNS ny+cn1SR+CmX6c8TdliSzLdtgGuPk3QrXkwWs4442IphtbPnhE4= =t7MS -----END PGP SIGNATURE----- Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf Pull bpf fixes from Alexei Starovoitov: - Mark migrate_disable/enable() as always_inline to avoid issues with partial inlining (Yonghong Song) - Fix powerpc stack register definition in libbpf bpf_tracing.h (Andrii Nakryiko) - Reject negative head_room in __bpf_skb_change_head (Daniel Borkmann) - Conditionally include dynptr copy kfuncs (Malin Jonsson) - Sync pending IRQ work before freeing BPF ring buffer (Noorain Eqbal) - Do not audit capability check in x86 do_jit() (Ondrej Mosnacek) - Fix arm64 JIT of BPF_ST insn when it writes into arena memory (Puranjay Mohan) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf/arm64: Fix BPF_ST into arena memory bpf: Make migrate_disable always inline to avoid partial inlining bpf: Reject negative head_room in __bpf_skb_change_head bpf: Conditionally include dynptr copy kfuncs libbpf: Fix powerpc's stack register definition in bpf_tracing.h bpf: Do not audit capability check in do_jit() bpf: Sync pending IRQ work before freeing ring buffer
This commit is contained in:
commit
ba36dd5ee6
|
|
@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||||
u8 src = bpf2a64[insn->src_reg];
|
u8 src = bpf2a64[insn->src_reg];
|
||||||
const u8 tmp = bpf2a64[TMP_REG_1];
|
const u8 tmp = bpf2a64[TMP_REG_1];
|
||||||
const u8 tmp2 = bpf2a64[TMP_REG_2];
|
const u8 tmp2 = bpf2a64[TMP_REG_2];
|
||||||
|
const u8 tmp3 = bpf2a64[TMP_REG_3];
|
||||||
const u8 fp = bpf2a64[BPF_REG_FP];
|
const u8 fp = bpf2a64[BPF_REG_FP];
|
||||||
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
|
||||||
const u8 priv_sp = bpf2a64[PRIVATE_SP];
|
const u8 priv_sp = bpf2a64[PRIVATE_SP];
|
||||||
|
|
@ -1757,8 +1758,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
|
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
|
||||||
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
|
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
|
||||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
|
||||||
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
|
emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx);
|
||||||
dst = tmp2;
|
dst = tmp3;
|
||||||
}
|
}
|
||||||
if (dst == fp) {
|
if (dst == fp) {
|
||||||
dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
|
dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
|
||||||
|
|
|
||||||
|
|
@ -2701,7 +2701,7 @@ st: if (is_imm8(insn->off))
|
||||||
/* Update cleanup_addr */
|
/* Update cleanup_addr */
|
||||||
ctx->cleanup_addr = proglen;
|
ctx->cleanup_addr = proglen;
|
||||||
if (bpf_prog_was_classic(bpf_prog) &&
|
if (bpf_prog_was_classic(bpf_prog) &&
|
||||||
!capable(CAP_SYS_ADMIN)) {
|
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
|
||||||
u8 *ip = image + addrs[i - 1];
|
u8 *ip = image + addrs[i - 1];
|
||||||
|
|
||||||
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
||||||
|
|
|
||||||
|
|
@ -2407,12 +2407,12 @@ static inline void __migrate_enable(void) { }
|
||||||
* be defined in kernel/sched/core.c.
|
* be defined in kernel/sched/core.c.
|
||||||
*/
|
*/
|
||||||
#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
|
#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
|
||||||
static inline void migrate_disable(void)
|
static __always_inline void migrate_disable(void)
|
||||||
{
|
{
|
||||||
__migrate_disable();
|
__migrate_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void migrate_enable(void)
|
static __always_inline void migrate_enable(void)
|
||||||
{
|
{
|
||||||
__migrate_enable();
|
__migrate_enable();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4345,6 +4345,7 @@ BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLE
|
||||||
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
|
BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
|
||||||
BTF_ID_FLAGS(func, bpf_local_irq_save)
|
BTF_ID_FLAGS(func, bpf_local_irq_save)
|
||||||
BTF_ID_FLAGS(func, bpf_local_irq_restore)
|
BTF_ID_FLAGS(func, bpf_local_irq_restore)
|
||||||
|
#ifdef CONFIG_BPF_EVENTS
|
||||||
BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
|
BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)
|
||||||
BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
|
BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)
|
||||||
BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
|
BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr)
|
||||||
|
|
@ -4353,6 +4354,7 @@ BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)
|
||||||
BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
|
BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)
|
||||||
BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
|
BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
|
||||||
BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
|
BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_DMA_SHARED_BUFFER
|
#ifdef CONFIG_DMA_SHARED_BUFFER
|
||||||
BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
|
BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
|
||||||
BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
|
BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
|
||||||
|
|
|
||||||
|
|
@ -216,6 +216,8 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
|
||||||
|
|
||||||
static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
|
static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
|
||||||
{
|
{
|
||||||
|
irq_work_sync(&rb->work);
|
||||||
|
|
||||||
/* copy pages pointer and nr_pages to local variable, as we are going
|
/* copy pages pointer and nr_pages to local variable, as we are going
|
||||||
* to unmap rb itself with vunmap() below
|
* to unmap rb itself with vunmap() below
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -3877,7 +3877,8 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
|
||||||
u32 new_len = skb->len + head_room;
|
u32 new_len = skb->len + head_room;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
|
if (unlikely(flags || (int)head_room < 0 ||
|
||||||
|
(!skb_is_gso(skb) && new_len > max_len) ||
|
||||||
new_len < skb->len))
|
new_len < skb->len))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -311,7 +311,7 @@ struct pt_regs___arm64 {
|
||||||
#define __PT_RET_REG regs[31]
|
#define __PT_RET_REG regs[31]
|
||||||
#define __PT_FP_REG __unsupported__
|
#define __PT_FP_REG __unsupported__
|
||||||
#define __PT_RC_REG gpr[3]
|
#define __PT_RC_REG gpr[3]
|
||||||
#define __PT_SP_REG sp
|
#define __PT_SP_REG gpr[1]
|
||||||
#define __PT_IP_REG nip
|
#define __PT_IP_REG nip
|
||||||
|
|
||||||
#elif defined(bpf_target_sparc)
|
#elif defined(bpf_target_sparc)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue