x86/percpu/64: Remove INIT_PER_CPU macros

Now that the load and link addresses of percpu variables are the same,
these macros are no longer necessary.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250123190747.745588-12-brgerst@gmail.com
This commit is contained in:
Brian Gerst 2025-01-23 14:07:43 -05:00 committed by Ingo Molnar
parent a8327be7b2
commit 38a4968b31
6 changed files with 1 additions and 33 deletions

View File

@ -46,7 +46,6 @@ struct gdt_page {
} __attribute__((aligned(PAGE_SIZE))); } __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
DECLARE_INIT_PER_CPU(gdt_page);
/* Provide the original GDT */ /* Provide the original GDT */
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu) static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)

View File

@ -20,12 +20,6 @@
#define PER_CPU_VAR(var) __percpu(var)__percpu_rel #define PER_CPU_VAR(var) __percpu(var)__percpu_rel
#ifdef CONFIG_X86_64_SMP
# define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else
# define INIT_PER_CPU_VAR(var) var
#endif
#else /* !__ASSEMBLY__: */ #else /* !__ASSEMBLY__: */
#include <linux/build_bug.h> #include <linux/build_bug.h>
@ -97,22 +91,6 @@
#define __percpu_arg(x) __percpu_prefix "%" #x #define __percpu_arg(x) __percpu_prefix "%" #x
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x #define __force_percpu_arg(x) __force_percpu_prefix "%" #x
/*
* Initialized pointers to per-CPU variables needed for the boot
* processor need to use these macros to get the proper address
* offset from __per_cpu_load on SMP.
*
* There also must be an entry in vmlinux_64.lds.S
*/
#define DECLARE_INIT_PER_CPU(var) \
extern typeof(var) init_per_cpu_var(var)
#ifdef CONFIG_X86_64_SMP
# define init_per_cpu_var(var) init_per_cpu__##var
#else
# define init_per_cpu_var(var) var
#endif
/* /*
* For arch-specific code, we can use direct single-insn ops (they * For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). * don't give an lvalue though).

View File

@ -567,7 +567,7 @@ void early_setup_idt(void)
*/ */
void __head startup_64_setup_gdt_idt(void) void __head startup_64_setup_gdt_idt(void)
{ {
struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt); struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt;
void *handler = NULL; void *handler = NULL;
struct desc_ptr startup_gdt_descr = { struct desc_ptr startup_gdt_descr = {

View File

@ -27,7 +27,6 @@
#include <asm/apic.h> #include <asm/apic.h>
DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible; DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
DECLARE_INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
/* /*

View File

@ -471,13 +471,6 @@ SECTIONS
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/*
* Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor.
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x)
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_MITIGATION_UNRET_ENTRY #ifdef CONFIG_MITIGATION_UNRET_ENTRY
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");

View File

@ -90,7 +90,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__initramfs_start|" "__initramfs_start|"
"(jiffies|jiffies_64)|" "(jiffies|jiffies_64)|"
#if ELF_BITS == 64 #if ELF_BITS == 64
"init_per_cpu__.*|"
"__end_rodata_hpage_align|" "__end_rodata_hpage_align|"
#endif #endif
"_end)$" "_end)$"