Merge branch 'for-next/entry' into for-next/core

* for-next/entry:
  arm/syscalls: mark syscall invocation as likely in invoke_syscall
  arm64: entry: Switch to generic IRQ entry
  arm64: entry: Move arm64_preempt_schedule_irq() into __exit_to_kernel_mode()
  arm64: entry: Refactor preempt_schedule_irq() check code
  entry: Add arch_irqentry_exit_need_resched() for arm64
  arm64: entry: Use preempt_count() and need_resched() helper
  arm64: entry: Rework arm64_preempt_schedule_irq()
  arm64: entry: Refactor the entry and exit for exceptions from EL1
  arm64: ptrace: Replace interrupts_enabled() with regs_irqs_disabled()
This commit is contained in:
Will Deacon 2025-09-24 16:34:02 +01:00
commit 7df73a0049
14 changed files with 231 additions and 297 deletions

View File

@ -151,6 +151,7 @@ config ARM64
select GENERIC_EARLY_IOREMAP select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP select GENERIC_IDLE_POLL_SETUP
select GENERIC_IOREMAP select GENERIC_IOREMAP
select GENERIC_IRQ_ENTRY
select GENERIC_IRQ_IPI select GENERIC_IRQ_IPI
select GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD select GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE

View File

@ -128,7 +128,7 @@ static inline void local_daif_inherit(struct pt_regs *regs)
{ {
unsigned long flags = regs->pstate & DAIF_MASK; unsigned long flags = regs->pstate & DAIF_MASK;
if (interrupts_enabled(regs)) if (!regs_irqs_disabled(regs))
trace_hardirqs_on(); trace_hardirqs_on();
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())

View File

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_ARM64_ENTRY_COMMON_H
#define _ASM_ARM64_ENTRY_COMMON_H
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/mte.h>
#include <asm/stacktrace.h>
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_MTE_ASYNC_FAULT | _TIF_FOREIGN_FPSTATE)
static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
unsigned long ti_work)
{
if (ti_work & _TIF_MTE_ASYNC_FAULT) {
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
send_sig_fault(SIGSEGV, SEGV_MTEAERR, (void __user *)NULL, current);
}
if (ti_work & _TIF_FOREIGN_FPSTATE)
fpsimd_restore_current_state();
}
#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
static inline bool arch_irqentry_exit_need_resched(void)
{
/*
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
* priority masking is used the GIC irqchip driver will clear DAIF.IF
* using gic_arch_enable_irqs() for normal IRQs. If anything is set in
* DAIF we must have handled an NMI, so skip preemption.
*/
if (system_uses_irq_prio_masking() && read_sysreg(daif))
return false;
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (!system_capabilities_finalized())
return false;
return true;
}
#define arch_irqentry_exit_need_resched arch_irqentry_exit_need_resched
#endif /* _ASM_ARM64_ENTRY_COMMON_H */

View File

@ -89,7 +89,6 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_el0_mops(struct pt_regs *regs, unsigned long esr); void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_el1_mops(struct pt_regs *regs, unsigned long esr); void do_el1_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr);
void do_signal(struct pt_regs *regs);
void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */ #endif /* __ASM_EXCEPTION_H */

View File

@ -2,7 +2,6 @@
#ifndef __ASM_PREEMPT_H #ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H #define __ASM_PREEMPT_H
#include <linux/jump_label.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#define PREEMPT_NEED_RESCHED BIT(32) #define PREEMPT_NEED_RESCHED BIT(32)
@ -87,7 +86,6 @@ void preempt_schedule_notrace(void);
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_preempt_schedule(void); void dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule() #define __preempt_schedule() dynamic_preempt_schedule()
void dynamic_preempt_schedule_notrace(void); void dynamic_preempt_schedule_notrace(void);

View File

@ -169,10 +169,6 @@ struct pt_regs {
u64 sdei_ttbr1; u64 sdei_ttbr1;
struct frame_record_meta stackframe; struct frame_record_meta stackframe;
/* Only valid for some EL1 exceptions. */
u64 lockdep_hardirqs;
u64 exit_rcu;
}; };
/* For correct stack alignment, pt_regs has to be a multiple of 16 bytes. */ /* For correct stack alignment, pt_regs has to be a multiple of 16 bytes. */
@ -214,11 +210,12 @@ static inline void forget_syscall(struct pt_regs *regs)
(regs)->pmr == GIC_PRIO_IRQON : \ (regs)->pmr == GIC_PRIO_IRQON : \
true) true)
#define interrupts_enabled(regs) \ static __always_inline bool regs_irqs_disabled(const struct pt_regs *regs)
(!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) {
return (regs->pstate & PSR_I_BIT) || !irqs_priority_unmasked(regs);
}
#define fast_interrupts_enabled(regs) \ #define interrupts_enabled(regs) (!regs_irqs_disabled(regs))
(!((regs)->pstate & PSR_F_BIT))
static inline unsigned long user_stack_pointer(struct pt_regs *regs) static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{ {

View File

@ -14,7 +14,7 @@ enum ipi_vector {
static inline int xen_irqs_disabled(struct pt_regs *regs) static inline int xen_irqs_disabled(struct pt_regs *regs)
{ {
return !interrupts_enabled(regs); return regs_irqs_disabled(regs);
} }
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))

View File

@ -417,7 +417,7 @@ int apei_claim_sea(struct pt_regs *regs)
return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
if (regs) if (regs)
return_to_irqs_enabled = interrupts_enabled(regs); return_to_irqs_enabled = !regs_irqs_disabled(regs);
/* /*
* SEA can interrupt SError, mask it and describe this as an NMI so * SEA can interrupt SError, mask it and describe this as an NMI so

View File

@ -167,7 +167,7 @@ static void send_user_sigtrap(int si_code)
if (WARN_ON(!user_mode(regs))) if (WARN_ON(!user_mode(regs)))
return; return;
if (interrupts_enabled(regs)) if (!regs_irqs_disabled(regs))
local_irq_enable(); local_irq_enable();
arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs), arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs),

View File

@ -6,6 +6,7 @@
*/ */
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/irq-entry-common.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/livepatch.h> #include <linux/livepatch.h>
@ -37,29 +38,20 @@
* This is intended to match the logic in irqentry_enter(), handling the kernel * This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only. * mode transitions only.
*/ */
static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs) static __always_inline irqentry_state_t __enter_from_kernel_mode(struct pt_regs *regs)
{ {
regs->exit_rcu = false; return irqentry_enter(regs);
if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
lockdep_hardirqs_off(CALLER_ADDR0);
ct_irq_enter();
trace_hardirqs_off_finish();
regs->exit_rcu = true;
return;
}
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
} }
static void noinstr enter_from_kernel_mode(struct pt_regs *regs) static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs)
{ {
__enter_from_kernel_mode(regs); irqentry_state_t state;
state = __enter_from_kernel_mode(regs);
mte_check_tfsr_entry(); mte_check_tfsr_entry();
mte_disable_tco_entry(current); mte_disable_tco_entry(current);
return state;
} }
/* /*
@ -70,30 +62,17 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
* This is intended to match the logic in irqentry_exit(), handling the kernel * This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere. * mode transitions only, and with preemption handled elsewhere.
*/ */
static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs) static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs,
irqentry_state_t state)
{ {
lockdep_assert_irqs_disabled(); irqentry_exit(regs, state);
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
ct_irq_exit();
lockdep_hardirqs_on(CALLER_ADDR0);
return;
}
trace_hardirqs_on();
} else {
if (regs->exit_rcu)
ct_irq_exit();
}
} }
static void noinstr exit_to_kernel_mode(struct pt_regs *regs) static void noinstr exit_to_kernel_mode(struct pt_regs *regs,
irqentry_state_t state)
{ {
mte_check_tfsr_exit(); mte_check_tfsr_exit();
__exit_to_kernel_mode(regs); __exit_to_kernel_mode(regs, state);
} }
/* /*
@ -101,18 +80,15 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
* Before this function is called it is not safe to call regular kernel code, * Before this function is called it is not safe to call regular kernel code,
* instrumentable code, or any code which may trigger an exception. * instrumentable code, or any code which may trigger an exception.
*/ */
static __always_inline void __enter_from_user_mode(void) static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
{ {
lockdep_hardirqs_off(CALLER_ADDR0); enter_from_user_mode(regs);
CT_WARN_ON(ct_state() != CT_STATE_USER);
user_exit_irqoff();
trace_hardirqs_off_finish();
mte_disable_tco_entry(current); mte_disable_tco_entry(current);
} }
static __always_inline void enter_from_user_mode(struct pt_regs *regs) static __always_inline void arm64_enter_from_user_mode(struct pt_regs *regs)
{ {
__enter_from_user_mode(); __enter_from_user_mode(regs);
} }
/* /*
@ -120,113 +96,19 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
* After this function returns it is not safe to call regular kernel code, * After this function returns it is not safe to call regular kernel code,
* instrumentable code, or any code which may trigger an exception. * instrumentable code, or any code which may trigger an exception.
*/ */
static __always_inline void __exit_to_user_mode(void)
static __always_inline void arm64_exit_to_user_mode(struct pt_regs *regs)
{ {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
user_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
}
static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
{
do {
local_irq_enable();
if (thread_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY))
schedule();
if (thread_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
send_sig_fault(SIGSEGV, SEGV_MTEAERR,
(void __user *)NULL, current);
}
if (thread_flags & _TIF_PATCH_PENDING)
klp_update_patch_state(current);
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
if (thread_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
if (thread_flags & _TIF_FOREIGN_FPSTATE)
fpsimd_restore_current_state();
local_irq_disable();
thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
}
static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long flags;
local_irq_disable(); local_irq_disable();
flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
local_daif_mask();
lockdep_sys_exit();
}
static __always_inline void exit_to_user_mode(struct pt_regs *regs)
{
exit_to_user_mode_prepare(regs); exit_to_user_mode_prepare(regs);
local_daif_mask();
mte_check_tfsr_exit(); mte_check_tfsr_exit();
__exit_to_user_mode(); exit_to_user_mode();
} }
asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
{ {
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
}
/*
* Handle IRQ/context state management when entering an NMI from user/kernel
* mode. Before this function is called it is not safe to call regular kernel
* code, instrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
lockdep_hardirq_enter();
ct_nmi_enter();
trace_hardirqs_off_finish();
ftrace_nmi_enter();
}
/*
* Handle IRQ/context state management when exiting an NMI from user/kernel
* mode. After this function returns it is not safe to call regular kernel
* code, instrumentable code, or any code which may trigger an exception.
*/
static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
ftrace_nmi_exit();
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare();
}
ct_nmi_exit();
lockdep_hardirq_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
__nmi_exit();
} }
/* /*
@ -234,14 +116,18 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
* kernel mode. Before this function is called it is not safe to call regular * kernel mode. Before this function is called it is not safe to call regular
* kernel code, instrumentable code, or any code which may trigger an exception. * kernel code, instrumentable code, or any code which may trigger an exception.
*/ */
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) static noinstr irqentry_state_t arm64_enter_el1_dbg(struct pt_regs *regs)
{ {
regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); irqentry_state_t state;
state.lockdep = lockdep_hardirqs_enabled();
lockdep_hardirqs_off(CALLER_ADDR0); lockdep_hardirqs_off(CALLER_ADDR0);
ct_nmi_enter(); ct_nmi_enter();
trace_hardirqs_off_finish(); trace_hardirqs_off_finish();
return state;
} }
/* /*
@ -249,62 +135,19 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
* kernel mode. After this function returns it is not safe to call regular * kernel mode. After this function returns it is not safe to call regular
* kernel code, instrumentable code, or any code which may trigger an exception. * kernel code, instrumentable code, or any code which may trigger an exception.
*/ */
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs,
irqentry_state_t state)
{ {
bool restore = regs->lockdep_hardirqs; if (state.lockdep) {
if (restore) {
trace_hardirqs_on_prepare(); trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(); lockdep_hardirqs_on_prepare();
} }
ct_nmi_exit(); ct_nmi_exit();
if (restore) if (state.lockdep)
lockdep_hardirqs_on(CALLER_ADDR0); lockdep_hardirqs_on(CALLER_ADDR0);
} }
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
#define need_irq_preemption() \
(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
#else
#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
#endif
static void __sched arm64_preempt_schedule_irq(void)
{
if (!need_irq_preemption())
return;
/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
* preempt_count().
*/
if (READ_ONCE(current_thread_info()->preempt_count) != 0)
return;
/*
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
* priority masking is used the GIC irqchip driver will clear DAIF.IF
* using gic_arch_enable_irqs() for normal IRQs. If anything is set in
* DAIF we must have handled an NMI, so skip preemption.
*/
if (system_uses_irq_prio_masking() && read_sysreg(daif))
return;
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (system_capabilities_finalized())
preempt_schedule_irq();
}
static void do_interrupt_handler(struct pt_regs *regs, static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *)) void (*handler)(struct pt_regs *))
{ {
@ -324,7 +167,7 @@ extern void (*handle_arch_fiq)(struct pt_regs *);
static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector, static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
unsigned long esr) unsigned long esr)
{ {
arm64_enter_nmi(regs); irqentry_nmi_enter(regs);
console_verbose(); console_verbose();
@ -475,73 +318,87 @@ UNHANDLED(el1t, 64, error)
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{ {
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
irqentry_state_t state;
enter_from_kernel_mode(regs); state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_mem_abort(far, esr, regs); do_mem_abort(far, esr, regs);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
{ {
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
irqentry_state_t state;
enter_from_kernel_mode(regs); state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs); do_sp_pc_abort(far, esr, regs);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); irqentry_state_t state;
state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_el1_undef(regs, esr); do_el1_undef(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); irqentry_state_t state;
state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_el1_bti(regs, esr); do_el1_bti(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr) static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); irqentry_state_t state;
state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_el1_gcs(regs, esr); do_el1_gcs(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); irqentry_state_t state;
state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_el1_mops(regs, esr); do_el1_mops(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr) static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
{ {
arm64_enter_el1_dbg(regs); irqentry_state_t state;
state = arm64_enter_el1_dbg(regs);
debug_exception_enter(regs); debug_exception_enter(regs);
do_breakpoint(esr, regs); do_breakpoint(esr, regs);
debug_exception_exit(regs); debug_exception_exit(regs);
arm64_exit_el1_dbg(regs); arm64_exit_el1_dbg(regs, state);
} }
static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr) static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
{ {
arm64_enter_el1_dbg(regs); irqentry_state_t state;
state = arm64_enter_el1_dbg(regs);
if (!cortex_a76_erratum_1463225_debug_handler(regs)) { if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
debug_exception_enter(regs); debug_exception_enter(regs);
/* /*
@ -554,37 +411,42 @@ static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
do_el1_softstep(esr, regs); do_el1_softstep(esr, regs);
debug_exception_exit(regs); debug_exception_exit(regs);
} }
arm64_exit_el1_dbg(regs); arm64_exit_el1_dbg(regs, state);
} }
static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr) static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
{ {
/* Watchpoints are the only debug exception to write FAR_EL1 */ /* Watchpoints are the only debug exception to write FAR_EL1 */
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
irqentry_state_t state;
arm64_enter_el1_dbg(regs); state = arm64_enter_el1_dbg(regs);
debug_exception_enter(regs); debug_exception_enter(regs);
do_watchpoint(far, esr, regs); do_watchpoint(far, esr, regs);
debug_exception_exit(regs); debug_exception_exit(regs);
arm64_exit_el1_dbg(regs); arm64_exit_el1_dbg(regs, state);
} }
static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr) static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
{ {
arm64_enter_el1_dbg(regs); irqentry_state_t state;
state = arm64_enter_el1_dbg(regs);
debug_exception_enter(regs); debug_exception_enter(regs);
do_el1_brk64(esr, regs); do_el1_brk64(esr, regs);
debug_exception_exit(regs); debug_exception_exit(regs);
arm64_exit_el1_dbg(regs); arm64_exit_el1_dbg(regs, state);
} }
static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_kernel_mode(regs); irqentry_state_t state;
state = enter_from_kernel_mode(regs);
local_daif_inherit(regs); local_daif_inherit(regs);
do_el1_fpac(regs, esr); do_el1_fpac(regs, esr);
local_daif_mask(); local_daif_mask();
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs, state);
} }
asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
@ -639,30 +501,32 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
static __always_inline void __el1_pnmi(struct pt_regs *regs, static __always_inline void __el1_pnmi(struct pt_regs *regs,
void (*handler)(struct pt_regs *)) void (*handler)(struct pt_regs *))
{ {
arm64_enter_nmi(regs); irqentry_state_t state;
state = irqentry_nmi_enter(regs);
do_interrupt_handler(regs, handler); do_interrupt_handler(regs, handler);
arm64_exit_nmi(regs); irqentry_nmi_exit(regs, state);
} }
static __always_inline void __el1_irq(struct pt_regs *regs, static __always_inline void __el1_irq(struct pt_regs *regs,
void (*handler)(struct pt_regs *)) void (*handler)(struct pt_regs *))
{ {
enter_from_kernel_mode(regs); irqentry_state_t state;
state = enter_from_kernel_mode(regs);
irq_enter_rcu(); irq_enter_rcu();
do_interrupt_handler(regs, handler); do_interrupt_handler(regs, handler);
irq_exit_rcu(); irq_exit_rcu();
arm64_preempt_schedule_irq(); exit_to_kernel_mode(regs, state);
exit_to_kernel_mode(regs);
} }
static void noinstr el1_interrupt(struct pt_regs *regs, static void noinstr el1_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *)) void (*handler)(struct pt_regs *))
{ {
write_sysreg(DAIF_PROCCTX_NOIRQ, daif); write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && regs_irqs_disabled(regs))
__el1_pnmi(regs, handler); __el1_pnmi(regs, handler);
else else
__el1_irq(regs, handler); __el1_irq(regs, handler);
@ -681,21 +545,22 @@ asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs) asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
irqentry_state_t state;
local_daif_restore(DAIF_ERRCTX); local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs); state = irqentry_nmi_enter(regs);
do_serror(regs, esr); do_serror(regs, esr);
arm64_exit_nmi(regs); irqentry_nmi_exit(regs, state);
} }
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
{ {
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs); do_mem_abort(far, esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr) static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
@ -710,50 +575,50 @@ static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(far)) if (!is_ttbr0_addr(far))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_mem_abort(far, esr, regs); do_mem_abort(far, esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_fpsimd_acc(esr, regs); do_fpsimd_acc(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sve_acc(esr, regs); do_sve_acc(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sme_acc(esr, regs); do_sme_acc(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_fpsimd_exc(esr, regs); do_fpsimd_exc(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_sys(esr, regs); do_el0_sys(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr) static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
@ -763,58 +628,58 @@ static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(instruction_pointer(regs))) if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(far, esr, regs); do_sp_pc_abort(far, esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr) static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_sp_pc_abort(regs->sp, esr, regs); do_sp_pc_abort(regs->sp, esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr) static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_undef(regs, esr); do_el0_undef(regs, esr);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_bti(struct pt_regs *regs) static void noinstr el0_bti(struct pt_regs *regs)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_bti(regs); do_el0_bti(regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr) static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_mops(regs, esr); do_el0_mops(regs, esr);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr) static void noinstr el0_gcs(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_gcs(regs, esr); do_el0_gcs(regs, esr);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr) static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
bad_el0_sync(regs, 0, esr); bad_el0_sync(regs, 0, esr);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr) static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
@ -822,12 +687,12 @@ static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(regs->pc)) if (!is_ttbr0_addr(regs->pc))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
debug_exception_enter(regs); debug_exception_enter(regs);
do_breakpoint(esr, regs); do_breakpoint(esr, regs);
debug_exception_exit(regs); debug_exception_exit(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr) static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
@ -835,7 +700,7 @@ static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
if (!is_ttbr0_addr(regs->pc)) if (!is_ttbr0_addr(regs->pc))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
/* /*
* After handling a breakpoint, we suspend the breakpoint * After handling a breakpoint, we suspend the breakpoint
* and use single-step to move to the next instruction. * and use single-step to move to the next instruction.
@ -846,7 +711,7 @@ static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_softstep(esr, regs); do_el0_softstep(esr, regs);
} }
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr) static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
@ -854,39 +719,39 @@ static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
/* Watchpoints are the only debug exception to write FAR_EL1 */ /* Watchpoints are the only debug exception to write FAR_EL1 */
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
debug_exception_enter(regs); debug_exception_enter(regs);
do_watchpoint(far, esr, regs); do_watchpoint(far, esr, regs);
debug_exception_exit(regs); debug_exception_exit(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr) static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_brk64(esr, regs); do_el0_brk64(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_svc(struct pt_regs *regs) static void noinstr el0_svc(struct pt_regs *regs)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler(); cortex_a76_erratum_1463225_svc_handler();
fpsimd_syscall_enter(); fpsimd_syscall_enter();
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_svc(regs); do_el0_svc(regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
fpsimd_syscall_exit(); fpsimd_syscall_exit();
} }
static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr) static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_fpac(regs, esr); do_el0_fpac(regs, esr);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs) asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
@ -960,7 +825,7 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
static void noinstr el0_interrupt(struct pt_regs *regs, static void noinstr el0_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *)) void (*handler)(struct pt_regs *))
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
write_sysreg(DAIF_PROCCTX_NOIRQ, daif); write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
@ -971,7 +836,7 @@ static void noinstr el0_interrupt(struct pt_regs *regs,
do_interrupt_handler(regs, handler); do_interrupt_handler(regs, handler);
irq_exit_rcu(); irq_exit_rcu();
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr __el0_irq_handler_common(struct pt_regs *regs) static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
@ -997,14 +862,15 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
static void noinstr __el0_error_handler_common(struct pt_regs *regs) static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
irqentry_state_t state;
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_ERRCTX); local_daif_restore(DAIF_ERRCTX);
arm64_enter_nmi(regs); state = irqentry_nmi_enter(regs);
do_serror(regs, esr); do_serror(regs, esr);
arm64_exit_nmi(regs); irqentry_nmi_exit(regs, state);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs) asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
@ -1015,27 +881,27 @@ asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_cp15(esr, regs); do_el0_cp15(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_svc_compat(struct pt_regs *regs) static void noinstr el0_svc_compat(struct pt_regs *regs)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler(); cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_el0_svc_compat(regs); do_el0_svc_compat(regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr) static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr)
{ {
enter_from_user_mode(regs); arm64_enter_from_user_mode(regs);
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
do_bkpt32(esr, regs); do_bkpt32(esr, regs);
exit_to_user_mode(regs); arm64_exit_to_user_mode(regs);
} }
asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs) asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
@ -1114,7 +980,7 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1); unsigned long far = read_sysreg(far_el1);
arm64_enter_nmi(regs); irqentry_nmi_enter(regs);
panic_bad_stack(regs, esr, far); panic_bad_stack(regs, esr, far);
} }
@ -1122,6 +988,7 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
asmlinkage noinstr unsigned long asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{ {
irqentry_state_t state;
unsigned long ret; unsigned long ret;
/* /*
@ -1146,9 +1013,9 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
else if (cpu_has_pan()) else if (cpu_has_pan())
set_pstate_pan(0); set_pstate_pan(0);
arm64_enter_nmi(regs); state = irqentry_nmi_enter(regs);
ret = do_sdei_event(regs, arg); ret = do_sdei_event(regs, arg);
arm64_exit_nmi(regs); irqentry_nmi_exit(regs, state);
return ret; return ret;
} }

View File

@ -243,7 +243,7 @@ unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
* If we interrupted the kernel with interrupts masked, we always go * If we interrupted the kernel with interrupts masked, we always go
* back to wherever we came from. * back to wherever we came from.
*/ */
if (mode == kernel_mode && !interrupts_enabled(regs)) if (mode == kernel_mode && regs_irqs_disabled(regs))
return SDEI_EV_HANDLED; return SDEI_EV_HANDLED;
/* /*

View File

@ -9,6 +9,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/irq-entry-common.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/freezer.h> #include <linux/freezer.h>
@ -1576,7 +1577,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* the kernel can handle, and then we build all the user-level signal handling * the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that. * stack-frames in one go after that.
*/ */
void do_signal(struct pt_regs *regs) void arch_do_signal_or_restart(struct pt_regs *regs)
{ {
unsigned long continue_addr = 0, restart_addr = 0; unsigned long continue_addr = 0, restart_addr = 0;
int retval = 0; int retval = 0;

View File

@ -43,7 +43,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
add_random_kstack_offset(); add_random_kstack_offset();
if (scno < sc_nr) { if (likely(scno < sc_nr)) {
syscall_fn_t syscall_fn; syscall_fn_t syscall_fn;
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)]; syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
ret = __invoke_syscall(regs, syscall_fn); ret = __invoke_syscall(regs, syscall_fn);

View File

@ -143,6 +143,20 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
return ret; return ret;
} }
/**
* arch_irqentry_exit_need_resched - Architecture specific need resched function
*
* Invoked from raw_irqentry_exit_cond_resched() to check if resched is needed.
* Defaults return true.
*
* The main purpose is to permit arch to avoid preemption of a task from an IRQ.
*/
static inline bool arch_irqentry_exit_need_resched(void);
#ifndef arch_irqentry_exit_need_resched
static inline bool arch_irqentry_exit_need_resched(void) { return true; }
#endif
void raw_irqentry_exit_cond_resched(void) void raw_irqentry_exit_cond_resched(void)
{ {
if (!preempt_count()) { if (!preempt_count()) {
@ -150,7 +164,7 @@ void raw_irqentry_exit_cond_resched(void)
rcu_irq_exit_check_preempt(); rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack()); WARN_ON_ONCE(!on_thread_stack());
if (need_resched()) if (need_resched() && arch_irqentry_exit_need_resched())
preempt_schedule_irq(); preempt_schedule_irq();
} }
} }