mirror of https://github.com/torvalds/linux.git
mm: mprotect: avoid unnecessary struct page accessing if pte_protnone()
If the pte_protnone() is true, we could avoid unnecessary struct page
accessing and reduce cache footprint when scanning page tables for prot
numa, there was a similar change before, see more commit a818f5363a
("autonuma: reduce cache footprint when scanning page tables").
Link: https://lkml.kernel.org/r/20251023113737.3572790-3-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Dev Jain <dev.jain@arm.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
03aa8e4f27
commit
6e97624dac
|
|
@ -118,18 +118,13 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
|
||||||
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
|
return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
|
static bool prot_numa_skip(struct vm_area_struct *vma, int target_node,
|
||||||
pte_t oldpte, pte_t *pte, int target_node,
|
struct folio *folio)
|
||||||
struct folio *folio)
|
|
||||||
{
|
{
|
||||||
bool ret = true;
|
bool ret = true;
|
||||||
bool toptier;
|
bool toptier;
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
/* Avoid TLB flush if possible */
|
|
||||||
if (pte_protnone(oldpte))
|
|
||||||
goto skip;
|
|
||||||
|
|
||||||
if (!folio)
|
if (!folio)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
|
||||||
|
|
@ -307,23 +302,25 @@ static long change_pte_range(struct mmu_gather *tlb,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
pte_t ptent;
|
pte_t ptent;
|
||||||
|
|
||||||
|
/* Already in the desired state. */
|
||||||
|
if (prot_numa && pte_protnone(oldpte))
|
||||||
|
continue;
|
||||||
|
|
||||||
page = vm_normal_page(vma, addr, oldpte);
|
page = vm_normal_page(vma, addr, oldpte);
|
||||||
if (page)
|
if (page)
|
||||||
folio = page_folio(page);
|
folio = page_folio(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid trapping faults against the zero or KSM
|
* Avoid trapping faults against the zero or KSM
|
||||||
* pages. See similar comment in change_huge_pmd.
|
* pages. See similar comment in change_huge_pmd.
|
||||||
*/
|
*/
|
||||||
if (prot_numa) {
|
if (prot_numa &&
|
||||||
int ret = prot_numa_skip(vma, addr, oldpte, pte,
|
prot_numa_skip(vma, target_node, folio)) {
|
||||||
target_node, folio);
|
|
||||||
if (ret) {
|
|
||||||
|
|
||||||
/* determine batch to skip */
|
/* determine batch to skip */
|
||||||
nr_ptes = mprotect_folio_pte_batch(folio,
|
nr_ptes = mprotect_folio_pte_batch(folio,
|
||||||
pte, oldpte, max_nr_ptes, /* flags = */ 0);
|
pte, oldpte, max_nr_ptes, /* flags = */ 0);
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
|
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue