mm/shmem, swap: rework swap entry and index calculation for large swapin

Instead of calculating the swap entry differently in different swapin
paths, calculate it early before the swap cache lookup and use that for
the lookup and later swapin.  And after swapin have brought a folio,
simply round it down against the size of the folio.

This is simple and effective enough to verify the swap value.  A folio's
swap entry is always aligned by its size.  Any kind of parallel split or
race is acceptable because the final shmem_add_to_page_cache ensures that
all entries covered by the folio are correct, and thus there will be no
data corruption.

This also prevents false positive cache lookup.  If a shmem read request's
index points to the middle of a large swap entry, previously, shmem will
try the swap cache lookup using the large swap entry's starting value
(which is the first sub swap entry of this large entry).  This will lead
to false positive lookup results if only the first few swap entries are
cached but the actual requested swap entry pointed by the index is
uncached.  This is not a rare event, as swap readahead always tries to
cache order 0 folios when possible.

And this shouldn't cause any increased repeated faults.  Instead, no
matter how the shmem mapping is split in parallel, as long as the mapping
still contains the right entries, the swapin will succeed.

The final object size and stack usage are also reduced due to simplified
code:

./scripts/bloat-o-meter mm/shmem.o.old mm/shmem.o
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-145 (-145)
Function                                     old     new   delta
shmem_swapin_folio                          4056    3911    -145
Total: Before=33242, After=33097, chg -0.44%

Stack usage (Before vs After):
mm/shmem.c:2314:12:shmem_swapin_folio   264     static
mm/shmem.c:2314:12:shmem_swapin_folio   256     static

And while at it, round down the index too if swap entry is round down. 
The index is used either for folio reallocation or confirming the mapping
content.  In either case, it should be aligned with the swap folio.

Link: https://lkml.kernel.org/r/20250728075306.12704-8-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kairui Song 2025-07-28 15:53:05 +08:00 committed by Andrew Morton
parent 1326359f22
commit 93c0476e70
1 changed files with 33 additions and 34 deletions

View File

@ -2302,7 +2302,7 @@ static int shmem_split_large_entry(struct inode *inode, pgoff_t index,
if (xas_error(&xas)) if (xas_error(&xas))
return xas_error(&xas); return xas_error(&xas);
return entry_order; return 0;
} }
/* /*
@ -2323,7 +2323,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct swap_info_struct *si; struct swap_info_struct *si;
struct folio *folio = NULL; struct folio *folio = NULL;
bool skip_swapcache = false; bool skip_swapcache = false;
int error, nr_pages, order, split_order; int error, nr_pages, order;
pgoff_t offset; pgoff_t offset;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
@ -2331,11 +2331,11 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
swap = index_entry; swap = index_entry;
*foliop = NULL; *foliop = NULL;
if (is_poisoned_swp_entry(swap)) if (is_poisoned_swp_entry(index_entry))
return -EIO; return -EIO;
si = get_swap_device(swap); si = get_swap_device(index_entry);
order = shmem_confirm_swap(mapping, index, swap); order = shmem_confirm_swap(mapping, index, index_entry);
if (unlikely(!si)) { if (unlikely(!si)) {
if (order < 0) if (order < 0)
return -EEXIST; return -EEXIST;
@ -2347,6 +2347,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
return -EEXIST; return -EEXIST;
} }
/* index may point to the middle of a large entry, get the sub entry */
if (order) {
offset = index - round_down(index, 1 << order);
swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
}
/* Look it up and read it in.. */ /* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0); folio = swap_cache_get_folio(swap, NULL, 0);
if (!folio) { if (!folio) {
@ -2359,7 +2365,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) { if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
/* Direct swapin skipping swap cache & readahead */ /* Direct swapin skipping swap cache & readahead */
folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp); folio = shmem_swap_alloc_folio(inode, vma, index,
index_entry, order, gfp);
if (IS_ERR(folio)) { if (IS_ERR(folio)) {
error = PTR_ERR(folio); error = PTR_ERR(folio);
folio = NULL; folio = NULL;
@ -2367,16 +2374,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
} }
skip_swapcache = true; skip_swapcache = true;
} else { } else {
/* /* Cached swapin only supports order 0 folio */
* Cached swapin only supports order 0 folio, it is
* necessary to recalculate the new swap entry based on
* the offset, as the swapin index might be unalgined.
*/
if (order) {
offset = index - round_down(index, 1 << order);
swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
}
folio = shmem_swapin_cluster(swap, gfp, info, index); folio = shmem_swapin_cluster(swap, gfp, info, index);
if (!folio) { if (!folio) {
error = -ENOMEM; error = -ENOMEM;
@ -2384,6 +2382,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
} }
} }
} }
if (order > folio_order(folio)) { if (order > folio_order(folio)) {
/* /*
* Swapin may get smaller folios due to various reasons: * Swapin may get smaller folios due to various reasons:
@ -2393,24 +2392,25 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
* large swap entries. In such cases, we should split the * large swap entries. In such cases, we should split the
* large swap entry to prevent possible data corruption. * large swap entry to prevent possible data corruption.
*/ */
split_order = shmem_split_large_entry(inode, index, index_entry, gfp); error = shmem_split_large_entry(inode, index, index_entry, gfp);
if (split_order < 0) { if (error)
error = split_order;
goto failed_nolock; goto failed_nolock;
} }
/* /*
* If the large swap entry has already been split, it is * If the folio is large, round down swap and index by folio size.
* necessary to recalculate the new swap entry based on * No matter what race occurs, the swap layer ensures we either get
* the old order alignment. * a valid folio that has its swap entry aligned by size, or a
*/ * temporarily invalid one which we'll abort very soon and retry.
if (split_order > 0) { *
offset = index - round_down(index, 1 << split_order); * shmem_add_to_page_cache ensures the whole range contains expected
swap = swp_entry(swp_type(swap), swp_offset(index_entry) + offset); * entries and prevents any corruption, so any race split is fine
} * too, it will succeed as long as the entries are still there.
} else if (order < folio_order(folio)) { */
swap.val = round_down(swap.val, 1 << folio_order(folio)); nr_pages = folio_nr_pages(folio);
index = round_down(index, 1 << folio_order(folio)); if (nr_pages > 1) {
swap.val = round_down(swap.val, nr_pages);
index = round_down(index, nr_pages);
} }
/* /*
@ -2446,8 +2446,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
goto failed; goto failed;
} }
error = shmem_add_to_page_cache(folio, mapping, error = shmem_add_to_page_cache(folio, mapping, index,
round_down(index, nr_pages),
swp_to_radix_entry(swap), gfp); swp_to_radix_entry(swap), gfp);
if (error) if (error)
goto failed; goto failed;