mirror of https://github.com/torvalds/linux.git
rseq: Switch to TIF_RSEQ if supported
TIF_NOTIFY_RESUME is a multiplexing TIF bit, which is suboptimal especially with the RSEQ fast path depending on it, but not really handling it. Define a separate TIF_RSEQ in the generic TIF space and enable the full separation of fast and slow path for architectures which utilize that. That avoids the hassle with invocations of resume_user_mode_work() from hypervisors, which clear TIF_NOTIFY_RESUME. It makes the therefore required re-evaluation at the end of vcpu_run() a NOOP on architectures which utilize the generic TIF space and have a separate TIF_RSEQ. The hypervisor TIF handling does not include the separate TIF_RSEQ as there is no point in doing so. The guest does neither know nor care about the VMM host applications RSEQ state. That state is only relevant when the ioctl() returns to user space. The fastpath implementation still utilizes TIF_NOTIFY_RESUME for failure handling, but this only happens within exit_to_user_mode_loop(), so arguably the hypervisor ioctl() code is long done when this happens. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084307.903622031@linutronix.de
This commit is contained in:
parent
7a5201ea19
commit
32034df66b
|
|
@ -45,4 +45,7 @@
|
||||||
# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
|
# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define TIF_RSEQ 11 // Run RSEQ fast path
|
||||||
|
#define _TIF_RSEQ BIT(TIF_RSEQ)
|
||||||
|
|
||||||
#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
|
#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@
|
||||||
#define EXIT_TO_USER_MODE_WORK \
|
#define EXIT_TO_USER_MODE_WORK \
|
||||||
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||||||
_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
|
_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
|
||||||
_TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
|
_TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
|
||||||
ARCH_EXIT_TO_USER_MODE_WORK)
|
ARCH_EXIT_TO_USER_MODE_WORK)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *reg
|
||||||
|
|
||||||
static inline void rseq_raise_notify_resume(struct task_struct *t)
|
static inline void rseq_raise_notify_resume(struct task_struct *t)
|
||||||
{
|
{
|
||||||
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
|
set_tsk_thread_flag(t, TIF_RSEQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Invoked from context switch to force evaluation on exit to user */
|
/* Invoked from context switch to force evaluation on exit to user */
|
||||||
|
|
@ -114,17 +114,25 @@ static inline void rseq_force_update(void)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KVM/HYPERV invoke resume_user_mode_work() before entering guest mode,
|
* KVM/HYPERV invoke resume_user_mode_work() before entering guest mode,
|
||||||
* which clears TIF_NOTIFY_RESUME. To avoid updating user space RSEQ in
|
* which clears TIF_NOTIFY_RESUME on architectures that don't use the
|
||||||
* that case just to do it eventually again before returning to user space,
|
* generic TIF bits and therefore can't provide a separate TIF_RSEQ flag.
|
||||||
* the entry resume_user_mode_work() invocation is ignored as the register
|
|
||||||
* argument is NULL.
|
|
||||||
*
|
*
|
||||||
* After returning from guest mode, they have to invoke this function to
|
* To avoid updating user space RSEQ in that case just to do it eventually
|
||||||
* re-raise TIF_NOTIFY_RESUME if necessary.
|
* again before returning to user space, because __rseq_handle_slowpath()
|
||||||
|
* does nothing when invoked with NULL register state.
|
||||||
|
*
|
||||||
|
* After returning from guest mode, before exiting to userspace, hypervisors
|
||||||
|
* must invoke this function to re-raise TIF_NOTIFY_RESUME if necessary.
|
||||||
*/
|
*/
|
||||||
static inline void rseq_virt_userspace_exit(void)
|
static inline void rseq_virt_userspace_exit(void)
|
||||||
{
|
{
|
||||||
if (current->rseq.event.sched_switch)
|
if (current->rseq.event.sched_switch)
|
||||||
|
/*
|
||||||
|
* The generic optimization for deferring RSEQ updates until the next
|
||||||
|
* exit relies on having a dedicated TIF_RSEQ.
|
||||||
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_HAVE_GENERIC_TIF_BITS) &&
|
||||||
|
current->rseq.event.sched_switch)
|
||||||
rseq_raise_notify_resume(current);
|
rseq_raise_notify_resume(current);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -507,18 +507,44 @@ static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *reg
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs)
|
/* Required to allow conversion to GENERIC_ENTRY w/o GENERIC_TIF_BITS */
|
||||||
|
#ifdef CONFIG_HAVE_GENERIC_TIF_BITS
|
||||||
|
static __always_inline bool test_tif_rseq(unsigned long ti_work)
|
||||||
{
|
{
|
||||||
|
return ti_work & _TIF_RSEQ;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void clear_tif_rseq(void)
|
||||||
|
{
|
||||||
|
static_assert(TIF_RSEQ != TIF_NOTIFY_RESUME);
|
||||||
|
clear_thread_flag(TIF_RSEQ);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static __always_inline bool test_tif_rseq(unsigned long ti_work) { return true; }
|
||||||
|
static __always_inline void clear_tif_rseq(void) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static __always_inline bool
|
||||||
|
rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
|
||||||
|
{
|
||||||
|
if (likely(!test_tif_rseq(ti_work)))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
|
if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
|
||||||
current->rseq.event.slowpath = true;
|
current->rseq.event.slowpath = true;
|
||||||
set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
|
set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_tif_rseq();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_GENERIC_ENTRY */
|
#else /* CONFIG_GENERIC_ENTRY */
|
||||||
static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs) { return false; }
|
static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
#endif /* !CONFIG_GENERIC_ENTRY */
|
#endif /* !CONFIG_GENERIC_ENTRY */
|
||||||
|
|
||||||
static __always_inline void rseq_syscall_exit_to_user_mode(void)
|
static __always_inline void rseq_syscall_exit_to_user_mode(void)
|
||||||
|
|
@ -577,7 +603,7 @@ static inline void rseq_debug_syscall_return(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
#else /* CONFIG_RSEQ */
|
#else /* CONFIG_RSEQ */
|
||||||
static inline void rseq_note_user_irq_entry(void) { }
|
static inline void rseq_note_user_irq_entry(void) { }
|
||||||
static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs)
|
static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -67,6 +67,11 @@ enum syscall_work_bit {
|
||||||
#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
|
#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef TIF_RSEQ
|
||||||
|
# define TIF_RSEQ TIF_NOTIFY_RESUME
|
||||||
|
# define _TIF_RSEQ _TIF_NOTIFY_RESUME
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#ifndef arch_set_restart_data
|
#ifndef arch_set_restart_data
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,12 @@
|
||||||
/* Workaround to allow gradual conversion of architecture code */
|
/* Workaround to allow gradual conversion of architecture code */
|
||||||
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
|
void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_GENERIC_TIF_BITS
|
||||||
|
#define EXIT_TO_USER_MODE_WORK_LOOP (EXIT_TO_USER_MODE_WORK & ~_TIF_RSEQ)
|
||||||
|
#else
|
||||||
|
#define EXIT_TO_USER_MODE_WORK_LOOP (EXIT_TO_USER_MODE_WORK)
|
||||||
|
#endif
|
||||||
|
|
||||||
static __always_inline unsigned long __exit_to_user_mode_loop(struct pt_regs *regs,
|
static __always_inline unsigned long __exit_to_user_mode_loop(struct pt_regs *regs,
|
||||||
unsigned long ti_work)
|
unsigned long ti_work)
|
||||||
{
|
{
|
||||||
|
|
@ -18,7 +24,7 @@ static __always_inline unsigned long __exit_to_user_mode_loop(struct pt_regs *re
|
||||||
* Before returning to user space ensure that all pending work
|
* Before returning to user space ensure that all pending work
|
||||||
* items have been completed.
|
* items have been completed.
|
||||||
*/
|
*/
|
||||||
while (ti_work & EXIT_TO_USER_MODE_WORK) {
|
while (ti_work & EXIT_TO_USER_MODE_WORK_LOOP) {
|
||||||
|
|
||||||
local_irq_enable_exit_to_user(ti_work);
|
local_irq_enable_exit_to_user(ti_work);
|
||||||
|
|
||||||
|
|
@ -68,7 +74,7 @@ __always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
||||||
for (;;) {
|
for (;;) {
|
||||||
ti_work = __exit_to_user_mode_loop(regs, ti_work);
|
ti_work = __exit_to_user_mode_loop(regs, ti_work);
|
||||||
|
|
||||||
if (likely(!rseq_exit_to_user_mode_restart(regs)))
|
if (likely(!rseq_exit_to_user_mode_restart(regs, ti_work)))
|
||||||
return ti_work;
|
return ti_work;
|
||||||
ti_work = read_thread_flags();
|
ti_work = read_thread_flags();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue