arm64: mm: make linear mapping permission update more robust for patial range

The commit fcf8dda8cc ("arm64: pageattr: Explicitly bail out when changing
permissions for vmalloc_huge mappings") made permission update for
partial range more robust. But the linear mapping permission update
still assumes update the whole range by iterating from the first page
all the way to the last page of the area.

Make it more robust by updating the linear mapping permission from the
page mapped by start address, and update the number of numpages.

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Yang Shi 2025-10-23 13:44:28 -07:00 committed by Catalin Marinas
parent c320dbb7c8
commit 37cb0aab90
1 changed files with 3 additions and 3 deletions

View File

@ -148,7 +148,6 @@ static int change_memory_common(unsigned long addr, int numpages,
unsigned long size = PAGE_SIZE * numpages; unsigned long size = PAGE_SIZE * numpages;
unsigned long end = start + size; unsigned long end = start + size;
struct vm_struct *area; struct vm_struct *area;
int i;
if (!PAGE_ALIGNED(addr)) { if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK; start &= PAGE_MASK;
@ -184,8 +183,9 @@ static int change_memory_common(unsigned long addr, int numpages,
*/ */
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) { pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) { unsigned long idx = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
__change_memory_common((u64)page_address(area->pages[i]), for (; numpages; idx++, numpages--) {
__change_memory_common((u64)page_address(area->pages[idx]),
PAGE_SIZE, set_mask, clear_mask); PAGE_SIZE, set_mask, clear_mask);
} }
} }