diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index b9f0c9e3f684..2750c88e7225 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -697,7 +697,6 @@ static void damos_va_migrate_dests_add(struct folio *folio, list_add(&folio->lru, &migration_lists[i]); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { @@ -707,58 +706,49 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr, struct damos_migrate_dests *dests = &s->migrate_dests; struct folio *folio; spinlock_t *ptl; - pmd_t pmde; + pte_t *start_pte, *pte, ptent; + int nr; - ptl = pmd_lock(walk->mm, pmd); - pmde = pmdp_get(pmd); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ptl = pmd_trans_huge_lock(pmd, walk->vma); + if (ptl) { + pmd_t pmde = pmdp_get(pmd); - if (!pmd_present(pmde) || !pmd_trans_huge(pmde)) - goto unlock; + if (!pmd_present(pmde)) + goto huge_out; + folio = vm_normal_folio_pmd(walk->vma, addr, pmde); + if (!folio) + goto huge_out; + if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) + goto huge_out; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, + migration_lists); +huge_out: + spin_unlock(ptl); + return 0; + } +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - /* Tell page walk code to not split the PMD */ - walk->action = ACTION_CONTINUE; - - folio = vm_normal_folio_pmd(walk->vma, addr, pmde); - if (!folio) - goto unlock; - - if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd)) - goto unlock; - - damos_va_migrate_dests_add(folio, walk->vma, addr, dests, - migration_lists); - -unlock: - spin_unlock(ptl); - return 0; -} -#else -#define damos_va_migrate_pmd_entry NULL -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - -static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr, - unsigned long next, struct mm_walk *walk) -{ - struct damos_va_migrate_private *priv = walk->private; - struct list_head *migration_lists = priv->migration_lists; - struct damos *s = priv->scheme; - struct damos_migrate_dests *dests = &s->migrate_dests; - struct folio *folio; - pte_t ptent; - - ptent = ptep_get(pte); - if (pte_none(ptent) || !pte_present(ptent)) + start_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) return 0; - folio = vm_normal_folio(walk->vma, addr, ptent); - if (!folio) - return 0; + for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) { + nr = 1; + ptent = ptep_get(pte); - if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) - return 0; - - damos_va_migrate_dests_add(folio, walk->vma, addr, dests, - migration_lists); + if (pte_none(ptent) || !pte_present(ptent)) + continue; + folio = vm_normal_folio(walk->vma, addr, ptent); + if (!folio) + continue; + if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) + return 0; + damos_va_migrate_dests_add(folio, walk->vma, addr, dests, + migration_lists); + nr = folio_nr_pages(folio); + } + pte_unmap_unlock(start_pte, ptl); return 0; } @@ -824,7 +814,7 @@ static unsigned long damos_va_migrate(struct damon_target *target, struct damos_migrate_dests *dests = &s->migrate_dests; struct mm_walk_ops walk_ops = { .pmd_entry = damos_va_migrate_pmd_entry, - .pte_entry = damos_va_migrate_pte_entry, + .pte_entry = NULL, .walk_lock = PGWALK_RDLOCK, };