x86/futex: Convert to scoped user access

Replace the open coded implementation with the scoped user access
guards

No functional change intended.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://patch.msgid.link/20251027083745.799714344@linutronix.de
This commit is contained in:
Thomas Gleixner 2025-10-27 09:44:02 +01:00 committed by Ingo Molnar
parent e4e28fd698
commit e02718c986
1 changed files with 33 additions and 42 deletions

View File

@ -46,38 +46,31 @@ do { \
} while(0)
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
u32 __user *uaddr)
{
if (can_do_masked_user_access())
uaddr = masked_user_access_begin(uaddr);
else if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
switch (op) {
case FUTEX_OP_SET:
unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_ADD:
unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
uaddr, oparg, Efault);
break;
case FUTEX_OP_OR:
unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_ANDN:
unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
break;
case FUTEX_OP_XOR:
unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
break;
default:
user_access_end();
return -ENOSYS;
scoped_user_rw_access(uaddr, Efault) {
switch (op) {
case FUTEX_OP_SET:
unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_ADD:
unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_OR:
unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
break;
case FUTEX_OP_ANDN:
unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
break;
case FUTEX_OP_XOR:
unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
break;
default:
return -ENOSYS;
}
}
user_access_end();
return 0;
Efault:
user_access_end();
return -EFAULT;
}
@ -86,21 +79,19 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
{
int ret = 0;
if (can_do_masked_user_access())
uaddr = masked_user_access_begin(uaddr);
else if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
asm volatile("\n"
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
"2:\n"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
: "r" (newval), "1" (oldval)
: "memory"
);
user_access_end();
*uval = oldval;
scoped_user_rw_access(uaddr, Efault) {
asm_inline volatile("\n"
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
"2:\n"
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0)
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
: "r" (newval), "1" (oldval)
: "memory");
*uval = oldval;
}
return ret;
Efault:
return -EFAULT;
}
#endif