arm64: mm: Represent physical memory with phys_addr_t and resource_size_t

This is a type-correctness cleanup to MMU/boot code that replaces
several instances of void * and u64 with phys_addr_t (to represent
addresses) and resource_size_t (to represent sizes) to emphasize that
the code in question concerns physical memory specifically.

The rationale for this change is to improve clarity and readability in
a few modules that handle both types (physical and virtual) of address
and differentiation is essential.

I have left u64 in cases where the address may be either physical or
virtual, where the address is exclusively virtual but used in heavy
pointer arithmetic, and in cases I may have overlooked. I do not
necessarily consider u64 the ideal type in those situations, but it
avoids breaking existing semantics in this cleanup.

This patch provably has no effect at runtime: I have verified that
.text of vmlinux is identical after this change.

Signed-off-by: Sam Edwards <CFSworks@gmail.com>
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Sam Edwards 2025-09-03 17:52:09 -07:00 committed by Will Deacon
parent c56aa9a67a
commit b868fff5b1
5 changed files with 42 additions and 36 deletions

View File

@ -18,9 +18,9 @@
extern const u8 __eh_frame_start[], __eh_frame_end[]; extern const u8 __eh_frame_start[], __eh_frame_end[];
extern void idmap_cpu_replace_ttbr1(void *pgdir); extern void idmap_cpu_replace_ttbr1(phys_addr_t pgdir);
static void __init map_segment(pgd_t *pg_dir, u64 *pgd, u64 va_offset, static void __init map_segment(pgd_t *pg_dir, phys_addr_t *pgd, u64 va_offset,
void *start, void *end, pgprot_t prot, void *start, void *end, pgprot_t prot,
bool may_use_cont, int root_level) bool may_use_cont, int root_level)
{ {
@ -40,7 +40,7 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
{ {
bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS); bool enable_scs = IS_ENABLED(CONFIG_UNWIND_PATCH_PAC_INTO_SCS);
bool twopass = IS_ENABLED(CONFIG_RELOCATABLE); bool twopass = IS_ENABLED(CONFIG_RELOCATABLE);
u64 pgdp = (u64)init_pg_dir + PAGE_SIZE; phys_addr_t pgdp = (phys_addr_t)init_pg_dir + PAGE_SIZE;
pgprot_t text_prot = PAGE_KERNEL_ROX; pgprot_t text_prot = PAGE_KERNEL_ROX;
pgprot_t data_prot = PAGE_KERNEL; pgprot_t data_prot = PAGE_KERNEL;
pgprot_t prot; pgprot_t prot;
@ -90,7 +90,7 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
true, root_level); true, root_level);
dsb(ishst); dsb(ishst);
idmap_cpu_replace_ttbr1(init_pg_dir); idmap_cpu_replace_ttbr1((phys_addr_t)init_pg_dir);
if (twopass) { if (twopass) {
if (IS_ENABLED(CONFIG_RELOCATABLE)) if (IS_ENABLED(CONFIG_RELOCATABLE))
@ -129,10 +129,10 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
/* Copy the root page table to its final location */ /* Copy the root page table to its final location */
memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE); memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
dsb(ishst); dsb(ishst);
idmap_cpu_replace_ttbr1(swapper_pg_dir); idmap_cpu_replace_ttbr1((phys_addr_t)swapper_pg_dir);
} }
static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr) static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(phys_addr_t ttbr)
{ {
u64 sctlr = read_sysreg(sctlr_el1); u64 sctlr = read_sysreg(sctlr_el1);
u64 tcr = read_sysreg(tcr_el1) | TCR_DS; u64 tcr = read_sysreg(tcr_el1) | TCR_DS;
@ -172,7 +172,7 @@ static void __init remap_idmap_for_lpa2(void)
*/ */
create_init_idmap(init_pg_dir, mask); create_init_idmap(init_pg_dir, mask);
dsb(ishst); dsb(ishst);
set_ttbr0_for_lpa2((u64)init_pg_dir); set_ttbr0_for_lpa2((phys_addr_t)init_pg_dir);
/* /*
* Recreate the initial ID map with the same granularity as before. * Recreate the initial ID map with the same granularity as before.
@ -185,17 +185,17 @@ static void __init remap_idmap_for_lpa2(void)
dsb(ishst); dsb(ishst);
/* switch back to the updated initial ID map */ /* switch back to the updated initial ID map */
set_ttbr0_for_lpa2((u64)init_idmap_pg_dir); set_ttbr0_for_lpa2((phys_addr_t)init_idmap_pg_dir);
/* wipe the temporary ID map from memory */ /* wipe the temporary ID map from memory */
memset(init_pg_dir, 0, (char *)init_pg_end - (char *)init_pg_dir); memset(init_pg_dir, 0, (char *)init_pg_end - (char *)init_pg_dir);
} }
static void *__init map_fdt(u64 fdt) static void *__init map_fdt(phys_addr_t fdt)
{ {
static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE); static u8 ptes[INIT_IDMAP_FDT_SIZE] __initdata __aligned(PAGE_SIZE);
u64 efdt = fdt + MAX_FDT_SIZE; phys_addr_t efdt = fdt + MAX_FDT_SIZE;
u64 ptep = (u64)ptes; phys_addr_t ptep = (phys_addr_t)ptes; /* We're idmapped when called */
/* /*
* Map up to MAX_FDT_SIZE bytes, but avoid overlap with * Map up to MAX_FDT_SIZE bytes, but avoid overlap with
@ -232,7 +232,7 @@ static bool __init ng_mappings_allowed(void)
return true; return true;
} }
asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt) asmlinkage void __init early_map_kernel(u64 boot_status, phys_addr_t fdt)
{ {
static char const chosen_str[] __initconst = "/chosen"; static char const chosen_str[] __initconst = "/chosen";
u64 va_base, pa_base = (u64)&_text; u64 va_base, pa_base = (u64)&_text;
@ -240,7 +240,7 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
int root_level = 4 - CONFIG_PGTABLE_LEVELS; int root_level = 4 - CONFIG_PGTABLE_LEVELS;
int va_bits = VA_BITS; int va_bits = VA_BITS;
int chosen; int chosen;
void *fdt_mapped = map_fdt((u64)fdt); void *fdt_mapped = map_fdt(fdt);
/* Clear BSS and the initial page tables */ /* Clear BSS and the initial page tables */
memset(__bss_start, 0, (char *)init_pg_end - (char *)__bss_start); memset(__bss_start, 0, (char *)init_pg_end - (char *)__bss_start);

View File

@ -26,8 +26,9 @@
* @va_offset: Offset between a physical page and its current mapping * @va_offset: Offset between a physical page and its current mapping
* in the VA space * in the VA space
*/ */
void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot, void __init map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa,
int level, pte_t *tbl, bool may_use_cont, u64 va_offset) pgprot_t prot, int level, pte_t *tbl, bool may_use_cont,
u64 va_offset)
{ {
u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX; u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
ptdesc_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK; ptdesc_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
@ -87,19 +88,22 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
} }
} }
asmlinkage u64 __init create_init_idmap(pgd_t *pg_dir, ptdesc_t clrmask) asmlinkage phys_addr_t __init create_init_idmap(pgd_t *pg_dir, ptdesc_t clrmask)
{ {
u64 ptep = (u64)pg_dir + PAGE_SIZE; phys_addr_t ptep = (phys_addr_t)pg_dir + PAGE_SIZE; /* MMU is off */
pgprot_t text_prot = PAGE_KERNEL_ROX; pgprot_t text_prot = PAGE_KERNEL_ROX;
pgprot_t data_prot = PAGE_KERNEL; pgprot_t data_prot = PAGE_KERNEL;
pgprot_val(text_prot) &= ~clrmask; pgprot_val(text_prot) &= ~clrmask;
pgprot_val(data_prot) &= ~clrmask; pgprot_val(data_prot) &= ~clrmask;
map_range(&ptep, (u64)_stext, (u64)__initdata_begin, (u64)_stext, /* MMU is off; pointer casts to phys_addr_t are safe */
text_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0); map_range(&ptep, (u64)_stext, (u64)__initdata_begin,
map_range(&ptep, (u64)__initdata_begin, (u64)_end, (u64)__initdata_begin, (phys_addr_t)_stext, text_prot, IDMAP_ROOT_LEVEL,
data_prot, IDMAP_ROOT_LEVEL, (pte_t *)pg_dir, false, 0); (pte_t *)pg_dir, false, 0);
map_range(&ptep, (u64)__initdata_begin, (u64)_end,
(phys_addr_t)__initdata_begin, data_prot, IDMAP_ROOT_LEVEL,
(pte_t *)pg_dir, false, 0);
return ptep; return ptep;
} }

View File

@ -29,9 +29,10 @@ u64 kaslr_early_init(void *fdt, int chosen);
void relocate_kernel(u64 offset); void relocate_kernel(u64 offset);
int scs_patch(const u8 eh_frame[], int size); int scs_patch(const u8 eh_frame[], int size);
void map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot, void map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa,
int level, pte_t *tbl, bool may_use_cont, u64 va_offset); pgprot_t prot, int level, pte_t *tbl, bool may_use_cont,
u64 va_offset);
asmlinkage void early_map_kernel(u64 boot_status, void *fdt); asmlinkage void early_map_kernel(u64 boot_status, phys_addr_t fdt);
asmlinkage u64 create_init_idmap(pgd_t *pgd, ptdesc_t clrmask); asmlinkage phys_addr_t create_init_idmap(pgd_t *pgd, ptdesc_t clrmask);

View File

@ -243,7 +243,7 @@ void __init arm64_memblock_init(void)
*/ */
if (memory_limit != PHYS_ADDR_MAX) { if (memory_limit != PHYS_ADDR_MAX) {
memblock_mem_limit_remove_map(memory_limit); memblock_mem_limit_remove_map(memory_limit);
memblock_add(__pa_symbol(_text), (u64)(_end - _text)); memblock_add(__pa_symbol(_text), (resource_size_t)(_end - _text));
} }
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
@ -252,8 +252,8 @@ void __init arm64_memblock_init(void)
* initrd to become inaccessible via the linear mapping. * initrd to become inaccessible via the linear mapping.
* Otherwise, this is a no-op * Otherwise, this is a no-op
*/ */
u64 base = phys_initrd_start & PAGE_MASK; phys_addr_t base = phys_initrd_start & PAGE_MASK;
u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; resource_size_t size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
/* /*
* We can only add back the initrd memory if we don't end up * We can only add back the initrd memory if we don't end up

View File

@ -794,17 +794,18 @@ static void __init declare_kernel_vmas(void)
declare_vma(&vmlinux_seg[4], _data, _end, 0); declare_vma(&vmlinux_seg[4], _data, _end, 0);
} }
void __pi_map_range(u64 *pgd, u64 start, u64 end, u64 pa, pgprot_t prot, void __pi_map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa,
int level, pte_t *tbl, bool may_use_cont, u64 va_offset); pgprot_t prot, int level, pte_t *tbl, bool may_use_cont,
u64 va_offset);
static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init, static u8 idmap_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init,
kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init; kpti_ptes[IDMAP_LEVELS - 1][PAGE_SIZE] __aligned(PAGE_SIZE) __ro_after_init;
static void __init create_idmap(void) static void __init create_idmap(void)
{ {
u64 start = __pa_symbol(__idmap_text_start); phys_addr_t start = __pa_symbol(__idmap_text_start);
u64 end = __pa_symbol(__idmap_text_end); phys_addr_t end = __pa_symbol(__idmap_text_end);
u64 ptep = __pa_symbol(idmap_ptes); phys_addr_t ptep = __pa_symbol(idmap_ptes);
__pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX, __pi_map_range(&ptep, start, end, start, PAGE_KERNEL_ROX,
IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false, IDMAP_ROOT_LEVEL, (pte_t *)idmap_pg_dir, false,
@ -812,7 +813,7 @@ static void __init create_idmap(void)
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) { if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) && !arm64_use_ng_mappings) {
extern u32 __idmap_kpti_flag; extern u32 __idmap_kpti_flag;
u64 pa = __pa_symbol(&__idmap_kpti_flag); phys_addr_t pa = __pa_symbol(&__idmap_kpti_flag);
/* /*
* The KPTI G-to-nG conversion code needs a read-write mapping * The KPTI G-to-nG conversion code needs a read-write mapping
@ -1331,8 +1332,8 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
struct range arch_get_mappable_range(void) struct range arch_get_mappable_range(void)
{ {
struct range mhp_range; struct range mhp_range;
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); phys_addr_t start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
u64 end_linear_pa = __pa(PAGE_END - 1); phys_addr_t end_linear_pa = __pa(PAGE_END - 1);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
/* /*