mirror of https://github.com/torvalds/linux.git
mm: mprotect: avoid unnecessary struct page accessing if pte_protnone()
If the pte_protnone() is true, we could avoid unnecessary struct page
accessing and reduce cache footprint when scanning page tables for prot
numa, there was a similar change before, see more commit a818f5363a
("autonuma: reduce cache footprint when scanning page tables").
Link: https://lkml.kernel.org/r/20251023113737.3572790-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
03aa8e4f27
commit
6e97624dac
|
|
@ -118,18 +118,13 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
|
|||
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
|
||||
}
|
||||
|
||||
static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t oldpte, pte_t *pte, int target_node,
|
||||
struct folio *folio)
|
||||
static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
|
||||
struct folio *folio)
|
||||
{
|
||||
bool ret = true;
|
||||
bool toptier;
|
||||
int nid;
|
||||
|
||||
/* Avoid TLB flush if possible */
|
||||
if (pte_protnone(oldpte))
|
||||
goto skip;
|
||||
|
||||
if (!folio)
|
||||
goto skip;
|
||||
|
||||
|
|
@ -307,23 +302,25 @@ static long change_pte_range(struct mmu_gather *tlb,
|
|||
struct page *page;
|
||||
pte_t ptent;
|
||||
|
||||
/* Already in the desired state. */
|
||||
if (prot_numa && pte_protnone(oldpte))
|
||||
continue;
|
||||
|
||||
page = vm_normal_page(vma, addr, oldpte);
|
||||
if (page)
|
||||
folio = page_folio(page);
|
||||
|
||||
/*
|
||||
* Avoid trapping faults against the zero or KSM
|
||||
* pages. See similar comment in change_huge_pmd.
|
||||
*/
|
||||
if (prot_numa) {
|
||||
int ret = prot_numa_skip(vma, addr, oldpte, pte,
|
||||
target_node, folio);
|
||||
if (ret) {
|
||||
if (prot_numa &&
|
||||
prot_numa_skip(vma, target_node, folio)) {
|
||||
|
||||
/* determine batch to skip */
|
||||
nr_ptes = mprotect_folio_pte_batch(folio,
|
||||
pte, oldpte, max_nr_ptes, /* flags = */ 0);
|
||||
continue;
|
||||
}
|
||||
/* determine batch to skip */
|
||||
nr_ptes = mprotect_folio_pte_batch(folio,
|
||||
pte, oldpte, max_nr_ptes, /* flags = */ 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
|
||||
|
|
|
|||
Loading…
Reference in New Issue