mirror of https://github.com/torvalds/linux.git
rseq: Record interrupt from user space
For RSEQ the only relevant reason to inspect and eventually fixup (abort) user space critical sections is when user space was interrupted and the task was scheduled out. If the user to kernel entry was from a syscall no fixup is required. If user space invokes a syscall from a critical section it can keep the pieces as documented. This is only supported on architectures which utilize the generic entry code. If your architecture does not use it, bad luck. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084306.905067101@linutronix.de
This commit is contained in:
parent
4b7de6df20
commit
2fc0e4b412
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
#include <linux/context_tracking.h>
|
#include <linux/context_tracking.h>
|
||||||
#include <linux/kmsan.h>
|
#include <linux/kmsan.h>
|
||||||
#include <linux/rseq.h>
|
#include <linux/rseq_entry.h>
|
||||||
#include <linux/static_call_types.h>
|
#include <linux/static_call_types.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
|
@ -281,6 +281,7 @@ static __always_inline void exit_to_user_mode(void)
|
||||||
static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
|
static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
enter_from_user_mode(regs);
|
enter_from_user_mode(regs);
|
||||||
|
rseq_note_user_irq_entry();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -31,11 +31,17 @@ static inline void rseq_sched_switch_event(struct task_struct *t)
|
||||||
|
|
||||||
static __always_inline void rseq_exit_to_user_mode(void)
|
static __always_inline void rseq_exit_to_user_mode(void)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
|
struct rseq_event *ev = ¤t->rseq.event;
|
||||||
if (WARN_ON_ONCE(current->rseq.event.has_rseq &&
|
|
||||||
current->rseq.event.events))
|
if (IS_ENABLED(CONFIG_DEBUG_RSEQ))
|
||||||
current->rseq.event.events = 0;
|
WARN_ON_ONCE(ev->sched_switch);
|
||||||
}
|
|
||||||
|
/*
|
||||||
|
* Ensure that event (especially user_irq) is cleared when the
|
||||||
|
* interrupt did not result in a schedule and therefore the
|
||||||
|
* rseq processing did not clear it.
|
||||||
|
*/
|
||||||
|
ev->events = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,18 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _LINUX_RSEQ_ENTRY_H
|
||||||
|
#define _LINUX_RSEQ_ENTRY_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_RSEQ
|
||||||
|
#include <linux/rseq.h>
|
||||||
|
|
||||||
|
static __always_inline void rseq_note_user_irq_entry(void)
|
||||||
|
{
|
||||||
|
if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY))
|
||||||
|
current->rseq.event.user_irq = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_RSEQ */
|
||||||
|
static inline void rseq_note_user_irq_entry(void) { }
|
||||||
|
#endif /* !CONFIG_RSEQ */
|
||||||
|
|
||||||
|
#endif /* _LINUX_RSEQ_ENTRY_H */
|
||||||
|
|
@ -12,6 +12,7 @@ struct rseq;
|
||||||
* @all: Compound to initialize and clear the data efficiently
|
* @all: Compound to initialize and clear the data efficiently
|
||||||
* @events: Compound to access events with a single load/store
|
* @events: Compound to access events with a single load/store
|
||||||
* @sched_switch: True if the task was scheduled out
|
* @sched_switch: True if the task was scheduled out
|
||||||
|
* @user_irq: True on interrupt entry from user mode
|
||||||
* @has_rseq: True if the task has a rseq pointer installed
|
* @has_rseq: True if the task has a rseq pointer installed
|
||||||
*/
|
*/
|
||||||
struct rseq_event {
|
struct rseq_event {
|
||||||
|
|
@ -22,6 +23,7 @@ struct rseq_event {
|
||||||
u16 events;
|
u16 events;
|
||||||
struct {
|
struct {
|
||||||
u8 sched_switch;
|
u8 sched_switch;
|
||||||
|
u8 user_irq;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue