mirror of https://github.com/torvalds/linux.git
arm64/mm: Remove randomization of the linear map
Since commit
97d6786e06 ("arm64: mm: account for hotplug memory when randomizing the linear region")
the decision whether or not to randomize the placement of the system's
DRAM inside the linear map is based on the capabilities of the CPU
rather than how much memory is present at boot time. This change was
necessary because memory hotplug may result in DRAM appearing in places
that are not covered by the linear region at all (and therefore
unusable) if the decision is solely based on the memory map at boot.
In the Android GKI kernel, which requires support for memory hotplug,
and is built with a reduced virtual address space of only 39 bits wide,
randomization of the linear map never happens in practice as a result.
And even on arm64 kernels built with support for 48 bit virtual
addressing, the wider PArange of recent CPUs means that linear map
randomization is slowly becoming a feature that only works on systems
that will soon be obsolete.
So let's just remove this feature. We can always bring it back in an
improved form if there is a real need for it.
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Kees Cook <kees@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250318134949.3194334-2-ardb+git@google.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
0af2f6be1b
commit
1db780bafa
|
|
@ -52,7 +52,6 @@ PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
|
||||||
PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
|
PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
|
||||||
#endif
|
#endif
|
||||||
PROVIDE(__pi__ctype = _ctype);
|
PROVIDE(__pi__ctype = _ctype);
|
||||||
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
|
|
||||||
|
|
||||||
PROVIDE(__pi_init_idmap_pg_dir = init_idmap_pg_dir);
|
PROVIDE(__pi_init_idmap_pg_dir = init_idmap_pg_dir);
|
||||||
PROVIDE(__pi_init_idmap_pg_end = init_idmap_pg_end);
|
PROVIDE(__pi_init_idmap_pg_end = init_idmap_pg_end);
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,6 @@
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
|
|
||||||
u16 __initdata memstart_offset_seed;
|
|
||||||
|
|
||||||
bool __ro_after_init __kaslr_is_enabled = false;
|
bool __ro_after_init __kaslr_is_enabled = false;
|
||||||
|
|
||||||
void __init kaslr_init(void)
|
void __init kaslr_init(void)
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,6 @@
|
||||||
|
|
||||||
#include "pi.h"
|
#include "pi.h"
|
||||||
|
|
||||||
extern u16 memstart_offset_seed;
|
|
||||||
|
|
||||||
static u64 __init get_kaslr_seed(void *fdt, int node)
|
static u64 __init get_kaslr_seed(void *fdt, int node)
|
||||||
{
|
{
|
||||||
static char const seed_str[] __initconst = "kaslr-seed";
|
static char const seed_str[] __initconst = "kaslr-seed";
|
||||||
|
|
@ -53,8 +51,6 @@ u64 __init kaslr_early_init(void *fdt, int chosen)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
memstart_offset_seed = seed & U16_MAX;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
|
* OK, so we are proceeding with KASLR enabled. Calculate a suitable
|
||||||
* kernel image offset from the seed. Let's place the kernel in the
|
* kernel image offset from the seed. Let's place the kernel in the
|
||||||
|
|
|
||||||
|
|
@ -275,26 +275,6 @@ void __init arm64_memblock_init(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
|
||||||
extern u16 memstart_offset_seed;
|
|
||||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
|
||||||
int parange = cpuid_feature_extract_unsigned_field(
|
|
||||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
|
||||||
s64 range = linear_region_size -
|
|
||||||
BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the size of the linear region exceeds, by a sufficient
|
|
||||||
* margin, the size of the region that the physical memory can
|
|
||||||
* span, randomize the linear region as well.
|
|
||||||
*/
|
|
||||||
if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
|
|
||||||
range /= ARM64_MEMSTART_ALIGN;
|
|
||||||
memstart_addr -= ARM64_MEMSTART_ALIGN *
|
|
||||||
((range * memstart_offset_seed) >> 16);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register the kernel text, kernel data, initrd, and initial
|
* Register the kernel text, kernel data, initrd, and initial
|
||||||
* pagetables with memblock.
|
* pagetables with memblock.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue