mirror of https://github.com/torvalds/linux.git
mm/damon/vaddr: consistently use only pmd_entry for damos_migrate
For page table walks, it is usual [1] to have only one pmd entry function.
The vaddr.c code for DAMOS_MIGRATE_{HOT,COLD} is not following the
pattern. Instead, it uses both pmd and pte entry functions without a
special reason. Refactor it to use only the pmd entry function, to make
the code under mm/ more consistent.
Link: https://lkml.kernel.org/r/20251112154114.66053-6-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Suggested-by: David Hildenbrand <david@kernel.org>
Cc: Bill Wendling <morbo@google.com>
Cc: Brendan Higgins <brendan.higgins@linux.dev>
Cc: David Gow <davidgow@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Justin Stitt <justinstitt@google.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f0eb046cd3
commit
09efc56a3b
|
|
@ -697,7 +697,6 @@ static void damos_va_migrate_dests_add(struct folio *folio,
|
||||||
list_add(&folio->lru, &migration_lists[i]);
|
list_add(&folio->lru, &migration_lists[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
||||||
static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
|
static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||||
unsigned long next, struct mm_walk *walk)
|
unsigned long next, struct mm_walk *walk)
|
||||||
{
|
{
|
||||||
|
|
@ -707,58 +706,49 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||||
struct damos_migrate_dests *dests = &s->migrate_dests;
|
struct damos_migrate_dests *dests = &s->migrate_dests;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
pmd_t pmde;
|
pte_t *start_pte, *pte, ptent;
|
||||||
|
int nr;
|
||||||
|
|
||||||
ptl = pmd_lock(walk->mm, pmd);
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
pmde = pmdp_get(pmd);
|
ptl = pmd_trans_huge_lock(pmd, walk->vma);
|
||||||
|
if (ptl) {
|
||||||
|
pmd_t pmde = pmdp_get(pmd);
|
||||||
|
|
||||||
if (!pmd_present(pmde) || !pmd_trans_huge(pmde))
|
if (!pmd_present(pmde))
|
||||||
goto unlock;
|
goto huge_out;
|
||||||
|
folio = vm_normal_folio_pmd(walk->vma, addr, pmde);
|
||||||
|
if (!folio)
|
||||||
|
goto huge_out;
|
||||||
|
if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
|
||||||
|
goto huge_out;
|
||||||
|
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
|
||||||
|
migration_lists);
|
||||||
|
huge_out:
|
||||||
|
spin_unlock(ptl);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
|
|
||||||
/* Tell page walk code to not split the PMD */
|
start_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||||
walk->action = ACTION_CONTINUE;
|
if (!pte)
|
||||||
|
|
||||||
folio = vm_normal_folio_pmd(walk->vma, addr, pmde);
|
|
||||||
if (!folio)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
|
|
||||||
migration_lists);
|
|
||||||
|
|
||||||
unlock:
|
|
||||||
spin_unlock(ptl);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define damos_va_migrate_pmd_entry NULL
|
|
||||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
||||||
|
|
||||||
static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
|
|
||||||
unsigned long next, struct mm_walk *walk)
|
|
||||||
{
|
|
||||||
struct damos_va_migrate_private *priv = walk->private;
|
|
||||||
struct list_head *migration_lists = priv->migration_lists;
|
|
||||||
struct damos *s = priv->scheme;
|
|
||||||
struct damos_migrate_dests *dests = &s->migrate_dests;
|
|
||||||
struct folio *folio;
|
|
||||||
pte_t ptent;
|
|
||||||
|
|
||||||
ptent = ptep_get(pte);
|
|
||||||
if (pte_none(ptent) || !pte_present(ptent))
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
folio = vm_normal_folio(walk->vma, addr, ptent);
|
for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) {
|
||||||
if (!folio)
|
nr = 1;
|
||||||
return 0;
|
ptent = ptep_get(pte);
|
||||||
|
|
||||||
if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
|
if (pte_none(ptent) || !pte_present(ptent))
|
||||||
return 0;
|
continue;
|
||||||
|
folio = vm_normal_folio(walk->vma, addr, ptent);
|
||||||
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
|
if (!folio)
|
||||||
migration_lists);
|
continue;
|
||||||
|
if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
|
||||||
|
return 0;
|
||||||
|
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
|
||||||
|
migration_lists);
|
||||||
|
nr = folio_nr_pages(folio);
|
||||||
|
}
|
||||||
|
pte_unmap_unlock(start_pte, ptl);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -824,7 +814,7 @@ static unsigned long damos_va_migrate(struct damon_target *target,
|
||||||
struct damos_migrate_dests *dests = &s->migrate_dests;
|
struct damos_migrate_dests *dests = &s->migrate_dests;
|
||||||
struct mm_walk_ops walk_ops = {
|
struct mm_walk_ops walk_ops = {
|
||||||
.pmd_entry = damos_va_migrate_pmd_entry,
|
.pmd_entry = damos_va_migrate_pmd_entry,
|
||||||
.pte_entry = damos_va_migrate_pte_entry,
|
.pte_entry = NULL,
|
||||||
.walk_lock = PGWALK_RDLOCK,
|
.walk_lock = PGWALK_RDLOCK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue