mirror of https://github.com/torvalds/linux.git
Scoped user mode access and related changes:
- Implement the missing u64 user access function on ARM when
CONFIG_CPU_SPECTRE=n. This makes it possible to access a 64bit value in
generic code with [unsafe_]get_user(). All other architectures and ARM
variants provide the relevant accessors already.
- Ensure that ASM GOTO jump label usage in the user mode access helpers
always goes through a local C scope label indirection inside the
helpers. This is required because compilers are not supporting that a
ASM GOTO target leaves a auto cleanup scope. GCC silently fails to emit
the cleanup invocation and CLANG fails the build.
This provides generic wrapper macros and the conversion of affected
architecture code to use them.
- Scoped user mode access with auto cleanup
Access to user mode memory can be required in hot code paths, but if it
has to be done with user controlled pointers, the access is shielded
with a speculation barrier, so that the CPU cannot speculate around the
address range check. Those speculation barriers impact performance quite
significantly. This can be avoided by "masking" the provided pointer so
it is guaranteed to be in the valid user memory access range and
otherwise to point to a guaranteed unpopulated address space. This has
to be done without branches so it creates an address dependency for the
access, which the CPU cannot speculate ahead.
This results in repeating and error prone programming patterns:
if (can_do_masked_user_access())
from = masked_user_read_access_begin((from));
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
user_read_access_end();
return 0;
Efault:
user_read_access_end();
return -EFAULT;
which can be replaced with scopes and automatic cleanup:
scoped_user_read_access(from, Efault)
unsafe_get_user(val, from, Efault);
return 0;
Efault:
return -EFAULT;
- Convert code which implements the above pattern over to
scope_user.*.access(). This also corrects a couple of imbalanced
masked_*_begin() instances which are harmless on most architectures, but
prevent PowerPC from implementing the masking optimization.
- Add a missing speculation barrier in copy_from_user_iter()
-----BEGIN PGP SIGNATURE-----
iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmksRfITHHRnbHhAbGlu
dXRyb25peC5kZQAKCRCmGPVMDXSYoVhBEACEySjWcyCrD1e0ZFMFAOJZFI2BShav
reotzCzmHYQdpVukDRxc64BgM2vN4yB04xnyMhi2o4hSTiIJhz1NzbKggsQJhVoA
psYz+xEI161HuLZnUBUBuF9RRko/HVsbGqO2JFCuOKor4GCycvjVgupR3EIN9h5T
HZEWGIgaTmN7MBj0QRrJgJkaaSTnPKOwWaNMV/F9pfk27zuB7vuV8WM9P3FaJYG+
JGa9td7VGaBpWavxgMJqfdvXWBCVDDfZ1dunWx8tPTnLxKZZZD6HlfQXhZTr2n1e
rtJpGgfVBx5Uqxn4RrhS0I7QeK1b9rrt3IU7EkFoaa3Z8LU5B7cHlm7KyicyoHhy
SzFFUszssznT/0OhA5fmgPRlqI295HynW2p1L4Xy9hC0EZ2vXJPG5rO6X3x6QwSR
asjRB7x/6JzWQUzE7/nhXd9KcB66wvQxhnjp7GqulF74aPBCtIdXXDD68YEDYkbi
dPC3NRBr0ePbsGVGWbYvYIPWcvo1u814C2io1zKwmVbiN6lCYURgQK861vfAZUP8
oP5D2a6ENgezDKoJo6eJ82inuDu64qZy7OOkU/aO3cbOuWGVyY9CjYD11x85Nr0k
UNabSOfvcmhmobtYUiAgLLrjX1grQUG3F74ZQTw513mwgMObuDAAoS11GPjY6HL6
b99WUJRv8jP66A==
=6no0
-----END PGP SIGNATURE-----
Merge tag 'core-uaccess-2025-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scoped user access updates from Thomas Gleixner:
"Scoped user mode access and related changes:
- Implement the missing u64 user access function on ARM when
CONFIG_CPU_SPECTRE=n.
This makes it possible to access a 64bit value in generic code with
[unsafe_]get_user(). All other architectures and ARM variants
provide the relevant accessors already.
- Ensure that ASM GOTO jump label usage in the user mode access
helpers always goes through a local C scope label indirection
inside the helpers.
This is required because compilers are not supporting that a ASM
GOTO target leaves a auto cleanup scope. GCC silently fails to emit
the cleanup invocation and CLANG fails the build.
[ Editor's note: gcc-16 will have fixed the code generation issue
in commit f68fe3ddda4 ("eh: Invoke cleanups/destructors in asm
goto jumps [PR122835]"). But we obviously have to deal with clang
and older versions of gcc, so.. - Linus ]
This provides generic wrapper macros and the conversion of affected
architecture code to use them.
- Scoped user mode access with auto cleanup
Access to user mode memory can be required in hot code paths, but
if it has to be done with user controlled pointers, the access is
shielded with a speculation barrier, so that the CPU cannot
speculate around the address range check. Those speculation
barriers impact performance quite significantly.
This cost can be avoided by "masking" the provided pointer so it is
guaranteed to be in the valid user memory access range and
otherwise to point to a guaranteed unpopulated address space. This
has to be done without branches so it creates an address dependency
for the access, which the CPU cannot speculate ahead.
This results in repeating and error prone programming patterns:
if (can_do_masked_user_access())
from = masked_user_read_access_begin((from));
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
user_read_access_end();
return 0;
Efault:
user_read_access_end();
return -EFAULT;
which can be replaced with scopes and automatic cleanup:
scoped_user_read_access(from, Efault)
unsafe_get_user(val, from, Efault);
return 0;
Efault:
return -EFAULT;
- Convert code which implements the above pattern over to
scope_user.*.access(). This also corrects a couple of imbalanced
masked_*_begin() instances which are harmless on most
architectures, but prevent PowerPC from implementing the masking
optimization.
- Add a missing speculation barrier in copy_from_user_iter()"
* tag 'core-uaccess-2025-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lib/strn*,uaccess: Use masked_user_{read/write}_access_begin when required
scm: Convert put_cmsg() to scoped user access
iov_iter: Add missing speculation barrier to copy_from_user_iter()
iov_iter: Convert copy_from_user_iter() to masked user access
select: Convert to scoped user access
x86/futex: Convert to scoped user access
futex: Convert to get/put_user_inline()
uaccess: Provide put/get_user_inline()
uaccess: Provide scoped user access regions
arm64: uaccess: Use unsafe wrappers for ASM GOTO
s390/uaccess: Use unsafe wrappers for ASM GOTO
riscv/uaccess: Use unsafe wrappers for ASM GOTO
powerpc/uaccess: Use unsafe wrappers for ASM GOTO
x86/uaccess: Use unsafe wrappers for ASM GOTO
uaccess: Provide ASM GOTO safe wrappers for unsafe_*_user()
ARM: uaccess: Implement missing __get_user_asm_dword()
This commit is contained in:
commit
1dce50698a
|
|
@ -283,10 +283,17 @@ extern int __put_user_8(void *, unsigned long long);
|
||||||
__gu_err; \
|
__gu_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is a type: either unsigned long, if the argument fits into
|
||||||
|
* that type, or otherwise unsigned long long.
|
||||||
|
*/
|
||||||
|
#define __long_type(x) \
|
||||||
|
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
||||||
|
|
||||||
#define __get_user_err(x, ptr, err, __t) \
|
#define __get_user_err(x, ptr, err, __t) \
|
||||||
do { \
|
do { \
|
||||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||||
unsigned long __gu_val; \
|
__long_type(x) __gu_val; \
|
||||||
unsigned int __ua_flags; \
|
unsigned int __ua_flags; \
|
||||||
__chk_user_ptr(ptr); \
|
__chk_user_ptr(ptr); \
|
||||||
might_fault(); \
|
might_fault(); \
|
||||||
|
|
@ -295,6 +302,7 @@ do { \
|
||||||
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
|
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
|
||||||
case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
|
case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
|
||||||
case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
|
case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
|
||||||
|
case 8: __get_user_asm_dword(__gu_val, __gu_addr, err, __t); break; \
|
||||||
default: (__gu_val) = __get_user_bad(); \
|
default: (__gu_val) = __get_user_bad(); \
|
||||||
} \
|
} \
|
||||||
uaccess_restore(__ua_flags); \
|
uaccess_restore(__ua_flags); \
|
||||||
|
|
@ -353,6 +361,22 @@ do { \
|
||||||
#define __get_user_asm_word(x, addr, err, __t) \
|
#define __get_user_asm_word(x, addr, err, __t) \
|
||||||
__get_user_asm(x, addr, err, "ldr" __t)
|
__get_user_asm(x, addr, err, "ldr" __t)
|
||||||
|
|
||||||
|
#ifdef __ARMEB__
|
||||||
|
#define __WORD0_OFFS 4
|
||||||
|
#define __WORD1_OFFS 0
|
||||||
|
#else
|
||||||
|
#define __WORD0_OFFS 0
|
||||||
|
#define __WORD1_OFFS 4
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define __get_user_asm_dword(x, addr, err, __t) \
|
||||||
|
({ \
|
||||||
|
unsigned long __w0, __w1; \
|
||||||
|
__get_user_asm(__w0, addr + __WORD0_OFFS, err, "ldr" __t); \
|
||||||
|
__get_user_asm(__w1, addr + __WORD1_OFFS, err, "ldr" __t); \
|
||||||
|
(x) = ((u64)__w1 << 32) | (u64) __w0; \
|
||||||
|
})
|
||||||
|
|
||||||
#define __put_user_switch(x, ptr, __err, __fn) \
|
#define __put_user_switch(x, ptr, __err, __fn) \
|
||||||
do { \
|
do { \
|
||||||
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
|
const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
|
||||||
|
|
|
||||||
|
|
@ -422,9 +422,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
|
||||||
}
|
}
|
||||||
#define user_access_begin(a,b) user_access_begin(a,b)
|
#define user_access_begin(a,b) user_access_begin(a,b)
|
||||||
#define user_access_end() uaccess_ttbr0_disable()
|
#define user_access_end() uaccess_ttbr0_disable()
|
||||||
#define unsafe_put_user(x, ptr, label) \
|
#define arch_unsafe_put_user(x, ptr, label) \
|
||||||
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
|
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
|
||||||
#define unsafe_get_user(x, ptr, label) \
|
#define arch_unsafe_get_user(x, ptr, label) \
|
||||||
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
|
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -451,7 +451,7 @@ user_write_access_begin(const void __user *ptr, size_t len)
|
||||||
#define user_write_access_begin user_write_access_begin
|
#define user_write_access_begin user_write_access_begin
|
||||||
#define user_write_access_end prevent_current_write_to_user
|
#define user_write_access_end prevent_current_write_to_user
|
||||||
|
|
||||||
#define unsafe_get_user(x, p, e) do { \
|
#define arch_unsafe_get_user(x, p, e) do { \
|
||||||
__long_type(*(p)) __gu_val; \
|
__long_type(*(p)) __gu_val; \
|
||||||
__typeof__(*(p)) __user *__gu_addr = (p); \
|
__typeof__(*(p)) __user *__gu_addr = (p); \
|
||||||
\
|
\
|
||||||
|
|
@ -459,7 +459,7 @@ user_write_access_begin(const void __user *ptr, size_t len)
|
||||||
(x) = (__typeof__(*(p)))__gu_val; \
|
(x) = (__typeof__(*(p)))__gu_val; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define unsafe_put_user(x, p, e) \
|
#define arch_unsafe_put_user(x, p, e) \
|
||||||
__put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
|
__put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
|
||||||
|
|
||||||
#define unsafe_copy_from_user(d, s, l, e) \
|
#define unsafe_copy_from_user(d, s, l, e) \
|
||||||
|
|
@ -504,11 +504,11 @@ do { \
|
||||||
unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
|
unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
#define arch_get_kernel_nofault(dst, src, type, err_label) \
|
||||||
__get_user_size_goto(*((type *)(dst)), \
|
__get_user_size_goto(*((type *)(dst)), \
|
||||||
(__force type __user *)(src), sizeof(type), err_label)
|
(__force type __user *)(src), sizeof(type), err_label)
|
||||||
|
|
||||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
#define arch_put_kernel_nofault(dst, src, type, err_label) \
|
||||||
__put_user_size_goto(*((type *)(src)), \
|
__put_user_size_goto(*((type *)(src)), \
|
||||||
(__force type __user *)(dst), sizeof(type), err_label)
|
(__force type __user *)(dst), sizeof(type), err_label)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -437,10 +437,10 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||||
__clear_user(untagged_addr(to), n) : n;
|
__clear_user(untagged_addr(to), n) : n;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
#define arch_get_kernel_nofault(dst, src, type, err_label) \
|
||||||
__get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
|
__get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
|
||||||
|
|
||||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
#define arch_put_kernel_nofault(dst, src, type, err_label) \
|
||||||
__put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
|
__put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
|
||||||
|
|
||||||
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
|
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
|
||||||
|
|
@ -460,10 +460,10 @@ static inline void user_access_restore(unsigned long enabled) { }
|
||||||
* We want the unsafe accessors to always be inlined and use
|
* We want the unsafe accessors to always be inlined and use
|
||||||
* the error labels - thus the macro games.
|
* the error labels - thus the macro games.
|
||||||
*/
|
*/
|
||||||
#define unsafe_put_user(x, ptr, label) \
|
#define arch_unsafe_put_user(x, ptr, label) \
|
||||||
__put_user_nocheck(x, (ptr), label)
|
__put_user_nocheck(x, (ptr), label)
|
||||||
|
|
||||||
#define unsafe_get_user(x, ptr, label) do { \
|
#define arch_unsafe_get_user(x, ptr, label) do { \
|
||||||
__inttype(*(ptr)) __gu_val; \
|
__inttype(*(ptr)) __gu_val; \
|
||||||
__get_user_nocheck(__gu_val, (ptr), label); \
|
__get_user_nocheck(__gu_val, (ptr), label); \
|
||||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||||
|
|
|
||||||
|
|
@ -468,8 +468,8 @@ do { \
|
||||||
|
|
||||||
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
|
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT && CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
|
||||||
|
|
||||||
#define __get_kernel_nofault __mvc_kernel_nofault
|
#define arch_get_kernel_nofault __mvc_kernel_nofault
|
||||||
#define __put_kernel_nofault __mvc_kernel_nofault
|
#define arch_put_kernel_nofault __mvc_kernel_nofault
|
||||||
|
|
||||||
void __cmpxchg_user_key_called_with_bad_pointer(void);
|
void __cmpxchg_user_key_called_with_bad_pointer(void);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -46,38 +46,31 @@ do { \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
static __always_inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
||||||
u32 __user *uaddr)
|
u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
if (can_do_masked_user_access())
|
scoped_user_rw_access(uaddr, Efault) {
|
||||||
uaddr = masked_user_access_begin(uaddr);
|
switch (op) {
|
||||||
else if (!user_access_begin(uaddr, sizeof(u32)))
|
case FUTEX_OP_SET:
|
||||||
return -EFAULT;
|
unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
|
||||||
|
break;
|
||||||
switch (op) {
|
case FUTEX_OP_ADD:
|
||||||
case FUTEX_OP_SET:
|
unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval, uaddr, oparg, Efault);
|
||||||
unsafe_atomic_op1("xchgl %0, %2", oval, uaddr, oparg, Efault);
|
break;
|
||||||
break;
|
case FUTEX_OP_OR:
|
||||||
case FUTEX_OP_ADD:
|
unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
|
||||||
unsafe_atomic_op1(LOCK_PREFIX "xaddl %0, %2", oval,
|
break;
|
||||||
uaddr, oparg, Efault);
|
case FUTEX_OP_ANDN:
|
||||||
break;
|
unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
|
||||||
case FUTEX_OP_OR:
|
break;
|
||||||
unsafe_atomic_op2("orl %4, %3", oval, uaddr, oparg, Efault);
|
case FUTEX_OP_XOR:
|
||||||
break;
|
unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
|
||||||
case FUTEX_OP_ANDN:
|
break;
|
||||||
unsafe_atomic_op2("andl %4, %3", oval, uaddr, ~oparg, Efault);
|
default:
|
||||||
break;
|
return -ENOSYS;
|
||||||
case FUTEX_OP_XOR:
|
}
|
||||||
unsafe_atomic_op2("xorl %4, %3", oval, uaddr, oparg, Efault);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
user_access_end();
|
|
||||||
return -ENOSYS;
|
|
||||||
}
|
}
|
||||||
user_access_end();
|
|
||||||
return 0;
|
return 0;
|
||||||
Efault:
|
Efault:
|
||||||
user_access_end();
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -86,21 +79,19 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (can_do_masked_user_access())
|
scoped_user_rw_access(uaddr, Efault) {
|
||||||
uaddr = masked_user_access_begin(uaddr);
|
asm_inline volatile("\n"
|
||||||
else if (!user_access_begin(uaddr, sizeof(u32)))
|
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
|
||||||
return -EFAULT;
|
"2:\n"
|
||||||
asm volatile("\n"
|
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0)
|
||||||
"1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
|
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
|
||||||
"2:\n"
|
: "r" (newval), "1" (oldval)
|
||||||
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
|
: "memory");
|
||||||
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
|
*uval = oldval;
|
||||||
: "r" (newval), "1" (oldval)
|
}
|
||||||
: "memory"
|
|
||||||
);
|
|
||||||
user_access_end();
|
|
||||||
*uval = oldval;
|
|
||||||
return ret;
|
return ret;
|
||||||
|
Efault:
|
||||||
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -528,18 +528,18 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
|
||||||
#define user_access_save() smap_save()
|
#define user_access_save() smap_save()
|
||||||
#define user_access_restore(x) smap_restore(x)
|
#define user_access_restore(x) smap_restore(x)
|
||||||
|
|
||||||
#define unsafe_put_user(x, ptr, label) \
|
#define arch_unsafe_put_user(x, ptr, label) \
|
||||||
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
|
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
|
||||||
|
|
||||||
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||||
#define unsafe_get_user(x, ptr, err_label) \
|
#define arch_unsafe_get_user(x, ptr, err_label) \
|
||||||
do { \
|
do { \
|
||||||
__inttype(*(ptr)) __gu_val; \
|
__inttype(*(ptr)) __gu_val; \
|
||||||
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
|
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
|
||||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||||
#define unsafe_get_user(x, ptr, err_label) \
|
#define arch_unsafe_get_user(x, ptr, err_label) \
|
||||||
do { \
|
do { \
|
||||||
int __gu_err; \
|
int __gu_err; \
|
||||||
__inttype(*(ptr)) __gu_val; \
|
__inttype(*(ptr)) __gu_val; \
|
||||||
|
|
@ -618,11 +618,11 @@ do { \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
#define arch_get_kernel_nofault(dst, src, type, err_label) \
|
||||||
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
|
__get_user_size(*((type *)(dst)), (__force type __user *)(src), \
|
||||||
sizeof(type), err_label)
|
sizeof(type), err_label)
|
||||||
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
#define arch_get_kernel_nofault(dst, src, type, err_label) \
|
||||||
do { \
|
do { \
|
||||||
int __kr_err; \
|
int __kr_err; \
|
||||||
\
|
\
|
||||||
|
|
@ -633,7 +633,7 @@ do { \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||||
|
|
||||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
#define arch_put_kernel_nofault(dst, src, type, err_label) \
|
||||||
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
|
__put_user_size(*((type *)(src)), (__force type __user *)(dst), \
|
||||||
sizeof(type), err_label)
|
sizeof(type), err_label)
|
||||||
|
|
||||||
|
|
|
||||||
12
fs/select.c
12
fs/select.c
|
|
@ -776,17 +776,13 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
|
||||||
{
|
{
|
||||||
// the path is hot enough for overhead of copy_from_user() to matter
|
// the path is hot enough for overhead of copy_from_user() to matter
|
||||||
if (from) {
|
if (from) {
|
||||||
if (can_do_masked_user_access())
|
scoped_user_read_access(from, Efault) {
|
||||||
from = masked_user_access_begin(from);
|
unsafe_get_user(to->p, &from->p, Efault);
|
||||||
else if (!user_read_access_begin(from, sizeof(*from)))
|
unsafe_get_user(to->size, &from->size, Efault);
|
||||||
return -EFAULT;
|
}
|
||||||
unsafe_get_user(to->p, &from->p, Efault);
|
|
||||||
unsafe_get_user(to->size, &from->size, Efault);
|
|
||||||
user_read_access_end();
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
Efault:
|
Efault:
|
||||||
user_read_access_end();
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
#ifndef __LINUX_UACCESS_H__
|
#ifndef __LINUX_UACCESS_H__
|
||||||
#define __LINUX_UACCESS_H__
|
#define __LINUX_UACCESS_H__
|
||||||
|
|
||||||
|
#include <linux/cleanup.h>
|
||||||
#include <linux/fault-inject-usercopy.h>
|
#include <linux/fault-inject-usercopy.h>
|
||||||
#include <linux/instrumented.h>
|
#include <linux/instrumented.h>
|
||||||
#include <linux/minmax.h>
|
#include <linux/minmax.h>
|
||||||
|
|
@ -35,9 +36,17 @@
|
||||||
|
|
||||||
#ifdef masked_user_access_begin
|
#ifdef masked_user_access_begin
|
||||||
#define can_do_masked_user_access() 1
|
#define can_do_masked_user_access() 1
|
||||||
|
# ifndef masked_user_write_access_begin
|
||||||
|
# define masked_user_write_access_begin masked_user_access_begin
|
||||||
|
# endif
|
||||||
|
# ifndef masked_user_read_access_begin
|
||||||
|
# define masked_user_read_access_begin masked_user_access_begin
|
||||||
|
#endif
|
||||||
#else
|
#else
|
||||||
#define can_do_masked_user_access() 0
|
#define can_do_masked_user_access() 0
|
||||||
#define masked_user_access_begin(src) NULL
|
#define masked_user_access_begin(src) NULL
|
||||||
|
#define masked_user_read_access_begin(src) NULL
|
||||||
|
#define masked_user_write_access_begin(src) NULL
|
||||||
#define mask_user_address(src) (src)
|
#define mask_user_address(src) (src)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
@ -518,7 +527,34 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
|
||||||
long count);
|
long count);
|
||||||
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
|
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
|
||||||
|
|
||||||
#ifndef __get_kernel_nofault
|
#ifdef arch_get_kernel_nofault
|
||||||
|
/*
|
||||||
|
* Wrap the architecture implementation so that @label can be outside of a
|
||||||
|
* cleanup() scope. A regular C goto works correctly, but ASM goto does
|
||||||
|
* not. Clang rejects such an attempt, but GCC silently emits buggy code.
|
||||||
|
*/
|
||||||
|
#define __get_kernel_nofault(dst, src, type, label) \
|
||||||
|
do { \
|
||||||
|
__label__ local_label; \
|
||||||
|
arch_get_kernel_nofault(dst, src, type, local_label); \
|
||||||
|
if (0) { \
|
||||||
|
local_label: \
|
||||||
|
goto label; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define __put_kernel_nofault(dst, src, type, label) \
|
||||||
|
do { \
|
||||||
|
__label__ local_label; \
|
||||||
|
arch_put_kernel_nofault(dst, src, type, local_label); \
|
||||||
|
if (0) { \
|
||||||
|
local_label: \
|
||||||
|
goto label; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#elif !defined(__get_kernel_nofault) /* arch_get_kernel_nofault */
|
||||||
|
|
||||||
#define __get_kernel_nofault(dst, src, type, label) \
|
#define __get_kernel_nofault(dst, src, type, label) \
|
||||||
do { \
|
do { \
|
||||||
type __user *p = (type __force __user *)(src); \
|
type __user *p = (type __force __user *)(src); \
|
||||||
|
|
@ -535,7 +571,8 @@ do { \
|
||||||
if (__put_user(data, p)) \
|
if (__put_user(data, p)) \
|
||||||
goto label; \
|
goto label; \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
|
||||||
|
#endif /* !__get_kernel_nofault */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_kernel_nofault(): safely attempt to read from a location
|
* get_kernel_nofault(): safely attempt to read from a location
|
||||||
|
|
@ -549,7 +586,42 @@ do { \
|
||||||
copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
|
copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
|
||||||
})
|
})
|
||||||
|
|
||||||
#ifndef user_access_begin
|
#ifdef user_access_begin
|
||||||
|
|
||||||
|
#ifdef arch_unsafe_get_user
|
||||||
|
/*
|
||||||
|
* Wrap the architecture implementation so that @label can be outside of a
|
||||||
|
* cleanup() scope. A regular C goto works correctly, but ASM goto does
|
||||||
|
* not. Clang rejects such an attempt, but GCC silently emits buggy code.
|
||||||
|
*
|
||||||
|
* Some architectures use internal local labels already, but this extra
|
||||||
|
* indirection here is harmless because the compiler optimizes it out
|
||||||
|
* completely in any case. This construct just ensures that the ASM GOTO
|
||||||
|
* target is always in the local scope. The C goto 'label' works correctly
|
||||||
|
* when leaving a cleanup() scope.
|
||||||
|
*/
|
||||||
|
#define unsafe_get_user(x, ptr, label) \
|
||||||
|
do { \
|
||||||
|
__label__ local_label; \
|
||||||
|
arch_unsafe_get_user(x, ptr, local_label); \
|
||||||
|
if (0) { \
|
||||||
|
local_label: \
|
||||||
|
goto label; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define unsafe_put_user(x, ptr, label) \
|
||||||
|
do { \
|
||||||
|
__label__ local_label; \
|
||||||
|
arch_unsafe_put_user(x, ptr, local_label); \
|
||||||
|
if (0) { \
|
||||||
|
local_label: \
|
||||||
|
goto label; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
#endif /* arch_unsafe_get_user */
|
||||||
|
|
||||||
|
#else /* user_access_begin */
|
||||||
#define user_access_begin(ptr,len) access_ok(ptr, len)
|
#define user_access_begin(ptr,len) access_ok(ptr, len)
|
||||||
#define user_access_end() do { } while (0)
|
#define user_access_end() do { } while (0)
|
||||||
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
|
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
|
||||||
|
|
@ -559,7 +631,8 @@ do { \
|
||||||
#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
|
#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
|
||||||
static inline unsigned long user_access_save(void) { return 0UL; }
|
static inline unsigned long user_access_save(void) { return 0UL; }
|
||||||
static inline void user_access_restore(unsigned long flags) { }
|
static inline void user_access_restore(unsigned long flags) { }
|
||||||
#endif
|
#endif /* !user_access_begin */
|
||||||
|
|
||||||
#ifndef user_write_access_begin
|
#ifndef user_write_access_begin
|
||||||
#define user_write_access_begin user_access_begin
|
#define user_write_access_begin user_access_begin
|
||||||
#define user_write_access_end user_access_end
|
#define user_write_access_end user_access_end
|
||||||
|
|
@ -569,6 +642,239 @@ static inline void user_access_restore(unsigned long flags) { }
|
||||||
#define user_read_access_end user_access_end
|
#define user_read_access_end user_access_end
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Define RW variant so the below _mode macro expansion works */
|
||||||
|
#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
|
||||||
|
#define user_rw_access_begin(u, s) user_access_begin(u, s)
|
||||||
|
#define user_rw_access_end() user_access_end()
|
||||||
|
|
||||||
|
/* Scoped user access */
|
||||||
|
#define USER_ACCESS_GUARD(_mode) \
|
||||||
|
static __always_inline void __user * \
|
||||||
|
class_user_##_mode##_begin(void __user *ptr) \
|
||||||
|
{ \
|
||||||
|
return ptr; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
static __always_inline void \
|
||||||
|
class_user_##_mode##_end(void __user *ptr) \
|
||||||
|
{ \
|
||||||
|
user_##_mode##_access_end(); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
DEFINE_CLASS(user_ ##_mode## _access, void __user *, \
|
||||||
|
class_user_##_mode##_end(_T), \
|
||||||
|
class_user_##_mode##_begin(ptr), void __user *ptr) \
|
||||||
|
\
|
||||||
|
static __always_inline class_user_##_mode##_access_t \
|
||||||
|
class_user_##_mode##_access_ptr(void __user *scope) \
|
||||||
|
{ \
|
||||||
|
return scope; \
|
||||||
|
}
|
||||||
|
|
||||||
|
USER_ACCESS_GUARD(read)
|
||||||
|
USER_ACCESS_GUARD(write)
|
||||||
|
USER_ACCESS_GUARD(rw)
|
||||||
|
#undef USER_ACCESS_GUARD
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __scoped_user_access_begin - Start a scoped user access
|
||||||
|
* @mode: The mode of the access class (read, write, rw)
|
||||||
|
* @uptr: The pointer to access user space memory
|
||||||
|
* @size: Size of the access
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* Internal helper for __scoped_user_access(). Don't use directly.
|
||||||
|
*/
|
||||||
|
#define __scoped_user_access_begin(mode, uptr, size, elbl) \
|
||||||
|
({ \
|
||||||
|
typeof(uptr) __retptr; \
|
||||||
|
\
|
||||||
|
if (can_do_masked_user_access()) { \
|
||||||
|
__retptr = masked_user_##mode##_access_begin(uptr); \
|
||||||
|
} else { \
|
||||||
|
__retptr = uptr; \
|
||||||
|
if (!user_##mode##_access_begin(uptr, size)) \
|
||||||
|
goto elbl; \
|
||||||
|
} \
|
||||||
|
__retptr; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __scoped_user_access - Open a scope for user access
|
||||||
|
* @mode: The mode of the access class (read, write, rw)
|
||||||
|
* @uptr: The pointer to access user space memory
|
||||||
|
* @size: Size of the access
|
||||||
|
* @elbl: Error label to goto when the access region is rejected. It
|
||||||
|
* must be placed outside the scope
|
||||||
|
*
|
||||||
|
* If the user access function inside the scope requires a fault label, it
|
||||||
|
* can use @elbl or a different label outside the scope, which requires
|
||||||
|
* that user access which is implemented with ASM GOTO has been properly
|
||||||
|
* wrapped. See unsafe_get_user() for reference.
|
||||||
|
*
|
||||||
|
* scoped_user_rw_access(ptr, efault) {
|
||||||
|
* unsafe_get_user(rval, &ptr->rval, efault);
|
||||||
|
* unsafe_put_user(wval, &ptr->wval, efault);
|
||||||
|
* }
|
||||||
|
* return 0;
|
||||||
|
* efault:
|
||||||
|
* return -EFAULT;
|
||||||
|
*
|
||||||
|
* The scope is internally implemented as a autoterminating nested for()
|
||||||
|
* loop, which can be left with 'return', 'break' and 'goto' at any
|
||||||
|
* point.
|
||||||
|
*
|
||||||
|
* When the scope is left user_##@_mode##_access_end() is automatically
|
||||||
|
* invoked.
|
||||||
|
*
|
||||||
|
* When the architecture supports masked user access and the access region
|
||||||
|
* which is determined by @uptr and @size is not a valid user space
|
||||||
|
* address, i.e. < TASK_SIZE, the scope sets the pointer to a faulting user
|
||||||
|
* space address and does not terminate early. This optimizes for the good
|
||||||
|
* case and lets the performance uncritical bad case go through the fault.
|
||||||
|
*
|
||||||
|
* The eventual modification of the pointer is limited to the scope.
|
||||||
|
* Outside of the scope the original pointer value is unmodified, so that
|
||||||
|
* the original pointer value is available for diagnostic purposes in an
|
||||||
|
* out of scope fault path.
|
||||||
|
*
|
||||||
|
* Nesting scoped user access into a user access scope is invalid and fails
|
||||||
|
* the build. Nesting into other guards, e.g. pagefault is safe.
|
||||||
|
*
|
||||||
|
* The masked variant does not check the size of the access and relies on a
|
||||||
|
* mapping hole (e.g. guard page) to catch an out of range pointer, the
|
||||||
|
* first access to user memory inside the scope has to be within
|
||||||
|
* @uptr ... @uptr + PAGE_SIZE - 1
|
||||||
|
*
|
||||||
|
* Don't use directly. Use scoped_masked_user_$MODE_access() instead.
|
||||||
|
*/
|
||||||
|
#define __scoped_user_access(mode, uptr, size, elbl) \
|
||||||
|
for (bool done = false; !done; done = true) \
|
||||||
|
for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
|
||||||
|
!done; done = true) \
|
||||||
|
for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \
|
||||||
|
/* Force modified pointer usage within the scope */ \
|
||||||
|
for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scoped_user_read_access_size - Start a scoped user read access with given size
|
||||||
|
* @usrc: Pointer to the user space address to read from
|
||||||
|
* @size: Size of the access starting from @usrc
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* For further information see __scoped_user_access() above.
|
||||||
|
*/
|
||||||
|
#define scoped_user_read_access_size(usrc, size, elbl) \
|
||||||
|
__scoped_user_access(read, usrc, size, elbl)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scoped_user_read_access - Start a scoped user read access
|
||||||
|
* @usrc: Pointer to the user space address to read from
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* The size of the access starting from @usrc is determined via sizeof(*@usrc)).
|
||||||
|
*
|
||||||
|
* For further information see __scoped_user_access() above.
|
||||||
|
*/
|
||||||
|
#define scoped_user_read_access(usrc, elbl) \
|
||||||
|
scoped_user_read_access_size(usrc, sizeof(*(usrc)), elbl)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scoped_user_write_access_size - Start a scoped user write access with given size
|
||||||
|
* @udst: Pointer to the user space address to write to
|
||||||
|
* @size: Size of the access starting from @udst
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* For further information see __scoped_user_access() above.
|
||||||
|
*/
|
||||||
|
#define scoped_user_write_access_size(udst, size, elbl) \
|
||||||
|
__scoped_user_access(write, udst, size, elbl)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scoped_user_write_access - Start a scoped user write access
|
||||||
|
* @udst: Pointer to the user space address to write to
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* The size of the access starting from @udst is determined via sizeof(*@udst)).
|
||||||
|
*
|
||||||
|
* For further information see __scoped_user_access() above.
|
||||||
|
*/
|
||||||
|
#define scoped_user_write_access(udst, elbl) \
|
||||||
|
scoped_user_write_access_size(udst, sizeof(*(udst)), elbl)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scoped_user_rw_access_size - Start a scoped user read/write access with given size
|
||||||
|
* @uptr Pointer to the user space address to read from and write to
|
||||||
|
* @size: Size of the access starting from @uptr
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* For further information see __scoped_user_access() above.
|
||||||
|
*/
|
||||||
|
#define scoped_user_rw_access_size(uptr, size, elbl) \
|
||||||
|
__scoped_user_access(rw, uptr, size, elbl)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scoped_user_rw_access - Start a scoped user read/write access
|
||||||
|
* @uptr Pointer to the user space address to read from and write to
|
||||||
|
* @elbl: Error label to goto when the access region is rejected
|
||||||
|
*
|
||||||
|
* The size of the access starting from @uptr is determined via sizeof(*@uptr)).
|
||||||
|
*
|
||||||
|
* For further information see __scoped_user_access() above.
|
||||||
|
*/
|
||||||
|
#define scoped_user_rw_access(uptr, elbl) \
|
||||||
|
scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_user_inline - Read user data inlined
|
||||||
|
* @val: The variable to store the value read from user memory
|
||||||
|
* @usrc: Pointer to the user space memory to read from
|
||||||
|
*
|
||||||
|
* Return: 0 if successful, -EFAULT when faulted
|
||||||
|
*
|
||||||
|
* Inlined variant of get_user(). Only use when there is a demonstrable
|
||||||
|
* performance reason.
|
||||||
|
*/
|
||||||
|
#define get_user_inline(val, usrc) \
|
||||||
|
({ \
|
||||||
|
__label__ efault; \
|
||||||
|
typeof(usrc) _tmpsrc = usrc; \
|
||||||
|
int _ret = 0; \
|
||||||
|
\
|
||||||
|
scoped_user_read_access(_tmpsrc, efault) \
|
||||||
|
unsafe_get_user(val, _tmpsrc, efault); \
|
||||||
|
if (0) { \
|
||||||
|
efault: \
|
||||||
|
_ret = -EFAULT; \
|
||||||
|
} \
|
||||||
|
_ret; \
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* put_user_inline - Write to user memory inlined
|
||||||
|
* @val: The value to write
|
||||||
|
* @udst: Pointer to the user space memory to write to
|
||||||
|
*
|
||||||
|
* Return: 0 if successful, -EFAULT when faulted
|
||||||
|
*
|
||||||
|
* Inlined variant of put_user(). Only use when there is a demonstrable
|
||||||
|
* performance reason.
|
||||||
|
*/
|
||||||
|
#define put_user_inline(val, udst) \
|
||||||
|
({ \
|
||||||
|
__label__ efault; \
|
||||||
|
typeof(udst) _tmpdst = udst; \
|
||||||
|
int _ret = 0; \
|
||||||
|
\
|
||||||
|
scoped_user_write_access(_tmpdst, efault) \
|
||||||
|
unsafe_put_user(val, _tmpdst, efault); \
|
||||||
|
if (0) { \
|
||||||
|
efault: \
|
||||||
|
_ret = -EFAULT; \
|
||||||
|
} \
|
||||||
|
_ret; \
|
||||||
|
})
|
||||||
|
|
||||||
#ifdef CONFIG_HARDENED_USERCOPY
|
#ifdef CONFIG_HARDENED_USERCOPY
|
||||||
void __noreturn usercopy_abort(const char *name, const char *detail,
|
void __noreturn usercopy_abort(const char *name, const char *detail,
|
||||||
bool to_user, unsigned long offset,
|
bool to_user, unsigned long offset,
|
||||||
|
|
|
||||||
|
|
@ -581,7 +581,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
|
||||||
if (flags & FLAGS_NUMA) {
|
if (flags & FLAGS_NUMA) {
|
||||||
u32 __user *naddr = (void *)uaddr + size / 2;
|
u32 __user *naddr = (void *)uaddr + size / 2;
|
||||||
|
|
||||||
if (futex_get_value(&node, naddr))
|
if (get_user_inline(node, naddr))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if ((node != FUTEX_NO_NODE) &&
|
if ((node != FUTEX_NO_NODE) &&
|
||||||
|
|
@ -601,7 +601,7 @@ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
|
||||||
node = numa_node_id();
|
node = numa_node_id();
|
||||||
node_updated = true;
|
node_updated = true;
|
||||||
}
|
}
|
||||||
if (node_updated && futex_put_value(node, naddr))
|
if (node_updated && put_user_inline(node, naddr))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -281,63 +281,11 @@ static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Read from user memory with pagefaults disabled */
|
||||||
* This does a plain atomic user space read, and the user pointer has
|
|
||||||
* already been verified earlier by get_futex_key() to be both aligned
|
|
||||||
* and actually in user space, just like futex_atomic_cmpxchg_inatomic().
|
|
||||||
*
|
|
||||||
* We still want to avoid any speculation, and while __get_user() is
|
|
||||||
* the traditional model for this, it's actually slower than doing
|
|
||||||
* this manually these days.
|
|
||||||
*
|
|
||||||
* We could just have a per-architecture special function for it,
|
|
||||||
* the same way we do futex_atomic_cmpxchg_inatomic(), but rather
|
|
||||||
* than force everybody to do that, write it out long-hand using
|
|
||||||
* the low-level user-access infrastructure.
|
|
||||||
*
|
|
||||||
* This looks a bit overkill, but generally just results in a couple
|
|
||||||
* of instructions.
|
|
||||||
*/
|
|
||||||
static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
|
|
||||||
{
|
|
||||||
u32 val;
|
|
||||||
|
|
||||||
if (can_do_masked_user_access())
|
|
||||||
from = masked_user_access_begin(from);
|
|
||||||
else if (!user_read_access_begin(from, sizeof(*from)))
|
|
||||||
return -EFAULT;
|
|
||||||
unsafe_get_user(val, from, Efault);
|
|
||||||
user_read_access_end();
|
|
||||||
*dest = val;
|
|
||||||
return 0;
|
|
||||||
Efault:
|
|
||||||
user_read_access_end();
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __always_inline int futex_put_value(u32 val, u32 __user *to)
|
|
||||||
{
|
|
||||||
if (can_do_masked_user_access())
|
|
||||||
to = masked_user_access_begin(to);
|
|
||||||
else if (!user_write_access_begin(to, sizeof(*to)))
|
|
||||||
return -EFAULT;
|
|
||||||
unsafe_put_user(val, to, Efault);
|
|
||||||
user_write_access_end();
|
|
||||||
return 0;
|
|
||||||
Efault:
|
|
||||||
user_write_access_end();
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
|
static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
|
||||||
{
|
{
|
||||||
int ret;
|
guard(pagefault)();
|
||||||
|
return get_user_inline(*dest, from);
|
||||||
pagefault_disable();
|
|
||||||
ret = futex_get_value(dest, from);
|
|
||||||
pagefault_enable();
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void __futex_unqueue(struct futex_q *q);
|
extern void __futex_unqueue(struct futex_q *q);
|
||||||
|
|
|
||||||
|
|
@ -49,12 +49,24 @@ size_t copy_from_user_iter(void __user *iter_from, size_t progress,
|
||||||
|
|
||||||
if (should_fail_usercopy())
|
if (should_fail_usercopy())
|
||||||
return len;
|
return len;
|
||||||
if (access_ok(iter_from, len)) {
|
if (can_do_masked_user_access()) {
|
||||||
to += progress;
|
iter_from = mask_user_address(iter_from);
|
||||||
instrument_copy_from_user_before(to, iter_from, len);
|
} else {
|
||||||
res = raw_copy_from_user(to, iter_from, len);
|
if (!access_ok(iter_from, len))
|
||||||
instrument_copy_from_user_after(to, iter_from, len, res);
|
return res;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that bad access_ok() speculation will not
|
||||||
|
* lead to nasty side effects *after* the copy is
|
||||||
|
* finished:
|
||||||
|
*/
|
||||||
|
barrier_nospec();
|
||||||
}
|
}
|
||||||
|
to += progress;
|
||||||
|
instrument_copy_from_user_before(to, iter_from, len);
|
||||||
|
res = raw_copy_from_user(to, iter_from, len);
|
||||||
|
instrument_copy_from_user_after(to, iter_from, len, res);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -126,7 +126,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
|
||||||
if (can_do_masked_user_access()) {
|
if (can_do_masked_user_access()) {
|
||||||
long retval;
|
long retval;
|
||||||
|
|
||||||
src = masked_user_access_begin(src);
|
src = masked_user_read_access_begin(src);
|
||||||
retval = do_strncpy_from_user(dst, src, count, count);
|
retval = do_strncpy_from_user(dst, src, count, count);
|
||||||
user_read_access_end();
|
user_read_access_end();
|
||||||
return retval;
|
return retval;
|
||||||
|
|
|
||||||
|
|
@ -99,7 +99,7 @@ long strnlen_user(const char __user *str, long count)
|
||||||
if (can_do_masked_user_access()) {
|
if (can_do_masked_user_access()) {
|
||||||
long retval;
|
long retval;
|
||||||
|
|
||||||
str = masked_user_access_begin(str);
|
str = masked_user_read_access_begin(str);
|
||||||
retval = do_strnlen_user(str, count, count);
|
retval = do_strnlen_user(str, count, count);
|
||||||
user_read_access_end();
|
user_read_access_end();
|
||||||
return retval;
|
return retval;
|
||||||
|
|
|
||||||
|
|
@ -273,17 +273,13 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
|
||||||
|
|
||||||
check_object_size(data, cmlen - sizeof(*cm), true);
|
check_object_size(data, cmlen - sizeof(*cm), true);
|
||||||
|
|
||||||
if (can_do_masked_user_access())
|
scoped_user_write_access_size(cm, cmlen, efault) {
|
||||||
cm = masked_user_access_begin(cm);
|
unsafe_put_user(cmlen, &cm->cmsg_len, efault);
|
||||||
else if (!user_write_access_begin(cm, cmlen))
|
unsafe_put_user(level, &cm->cmsg_level, efault);
|
||||||
goto efault;
|
unsafe_put_user(type, &cm->cmsg_type, efault);
|
||||||
|
unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
|
||||||
unsafe_put_user(cmlen, &cm->cmsg_len, efault_end);
|
cmlen - sizeof(*cm), efault);
|
||||||
unsafe_put_user(level, &cm->cmsg_level, efault_end);
|
}
|
||||||
unsafe_put_user(type, &cm->cmsg_type, efault_end);
|
|
||||||
unsafe_copy_to_user(CMSG_USER_DATA(cm), data,
|
|
||||||
cmlen - sizeof(*cm), efault_end);
|
|
||||||
user_write_access_end();
|
|
||||||
} else {
|
} else {
|
||||||
struct cmsghdr *cm = msg->msg_control;
|
struct cmsghdr *cm = msg->msg_control;
|
||||||
|
|
||||||
|
|
@ -301,8 +297,6 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
|
||||||
msg->msg_controllen -= cmlen;
|
msg->msg_controllen -= cmlen;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
efault_end:
|
|
||||||
user_write_access_end();
|
|
||||||
efault:
|
efault:
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue