mirror of https://github.com/torvalds/linux.git
mm: revert "mm/gup: clear the LRU flag of a page before adding to LRU batch"
This reverts commit 33dfe9204f29: now that collect_longterm_unpinnable_folios() is checking ref_count instead of lru, and mlock/munlock do not participate in the revised LRU flag clearing, those changes are misleading, and enlarge the window during which mlock/munlock may miss an mlock_count update. It is possible (I'd hesitate to claim probable) that the greater likelihood of missed mlock_count updates would explain the "Realtime threads delayed due to kcompactd0" observed on 6.12 in the Link below. If that is the case, this reversion will help; but a complete solution needs also a further patch, beyond the scope of this series. Included some 80-column cleanup around folio_batch_add_and_move(). The role of folio_test_clear_lru() (before taking per-memcg lru_lock) is questionable since 6.13 removed mem_cgroup_move_account() etc; but perhaps there are still some races which need it - not examined here. Link: https://lore.kernel.org/linux-mm/DU0PR01MB10385345F7153F334100981888259A@DU0PR01MB10385.eurprd01.prod.exchangelabs.com/ Link: https://lkml.kernel.org/r/05905d7b-ed14-68b1-79d8-bdec30367eba@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Chris Li <chrisl@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Keir Fraser <keirf@google.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Li Zhe <lizhe.67@bytedance.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Shivank Garg <shivankg@amd.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Xu <weixugc@google.com> Cc: Will Deacon <will@kernel.org> Cc: yangge <yangge1116@126.com> Cc: Yuanchu Xie <yuanchu@google.com> Cc: Yu Zhao <yuzhao@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a09a8a1fbb
commit
afb99e9f50
50
mm/swap.c
50
mm/swap.c
|
|
@ -164,6 +164,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
|||
for (i = 0; i < folio_batch_count(fbatch); i++) {
|
||||
struct folio *folio = fbatch->folios[i];
|
||||
|
||||
/* block memcg migration while the folio moves between lru */
|
||||
if (move_fn != lru_add && !folio_test_clear_lru(folio))
|
||||
continue;
|
||||
|
||||
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
|
||||
move_fn(lruvec, folio);
|
||||
|
||||
|
|
@ -176,14 +180,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
|
|||
}
|
||||
|
||||
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
||||
struct folio *folio, move_fn_t move_fn,
|
||||
bool on_lru, bool disable_irq)
|
||||
struct folio *folio, move_fn_t move_fn, bool disable_irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (on_lru && !folio_test_clear_lru(folio))
|
||||
return;
|
||||
|
||||
folio_get(folio);
|
||||
|
||||
if (disable_irq)
|
||||
|
|
@ -191,8 +191,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
|||
else
|
||||
local_lock(&cpu_fbatches.lock);
|
||||
|
||||
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
|
||||
lru_cache_disabled())
|
||||
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
|
||||
folio_test_large(folio) || lru_cache_disabled())
|
||||
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
|
||||
|
||||
if (disable_irq)
|
||||
|
|
@ -201,13 +201,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
|
|||
local_unlock(&cpu_fbatches.lock);
|
||||
}
|
||||
|
||||
#define folio_batch_add_and_move(folio, op, on_lru) \
|
||||
__folio_batch_add_and_move( \
|
||||
&cpu_fbatches.op, \
|
||||
folio, \
|
||||
op, \
|
||||
on_lru, \
|
||||
offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
|
||||
#define folio_batch_add_and_move(folio, op) \
|
||||
__folio_batch_add_and_move( \
|
||||
&cpu_fbatches.op, \
|
||||
folio, \
|
||||
op, \
|
||||
offsetof(struct cpu_fbatches, op) >= \
|
||||
offsetof(struct cpu_fbatches, lock_irq) \
|
||||
)
|
||||
|
||||
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
||||
|
|
@ -231,10 +231,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
|
|||
void folio_rotate_reclaimable(struct folio *folio)
|
||||
{
|
||||
if (folio_test_locked(folio) || folio_test_dirty(folio) ||
|
||||
folio_test_unevictable(folio))
|
||||
folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_move_tail, true);
|
||||
folio_batch_add_and_move(folio, lru_move_tail);
|
||||
}
|
||||
|
||||
void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
|
||||
|
|
@ -328,10 +328,11 @@ static void folio_activate_drain(int cpu)
|
|||
|
||||
void folio_activate(struct folio *folio)
|
||||
{
|
||||
if (folio_test_active(folio) || folio_test_unevictable(folio))
|
||||
if (folio_test_active(folio) || folio_test_unevictable(folio) ||
|
||||
!folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_activate, true);
|
||||
folio_batch_add_and_move(folio, lru_activate);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
@ -507,7 +508,7 @@ void folio_add_lru(struct folio *folio)
|
|||
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
|
||||
folio_set_active(folio);
|
||||
|
||||
folio_batch_add_and_move(folio, lru_add, false);
|
||||
folio_batch_add_and_move(folio, lru_add);
|
||||
}
|
||||
EXPORT_SYMBOL(folio_add_lru);
|
||||
|
||||
|
|
@ -685,13 +686,13 @@ void lru_add_drain_cpu(int cpu)
|
|||
void deactivate_file_folio(struct folio *folio)
|
||||
{
|
||||
/* Deactivating an unevictable folio will not accelerate reclaim */
|
||||
if (folio_test_unevictable(folio))
|
||||
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
if (lru_gen_enabled() && lru_gen_clear_refs(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_deactivate_file, true);
|
||||
folio_batch_add_and_move(folio, lru_deactivate_file);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -704,13 +705,13 @@ void deactivate_file_folio(struct folio *folio)
|
|||
*/
|
||||
void folio_deactivate(struct folio *folio)
|
||||
{
|
||||
if (folio_test_unevictable(folio))
|
||||
if (folio_test_unevictable(folio) || !folio_test_lru(folio))
|
||||
return;
|
||||
|
||||
if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_deactivate, true);
|
||||
folio_batch_add_and_move(folio, lru_deactivate);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -723,10 +724,11 @@ void folio_deactivate(struct folio *folio)
|
|||
void folio_mark_lazyfree(struct folio *folio)
|
||||
{
|
||||
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
|
||||
!folio_test_lru(folio) ||
|
||||
folio_test_swapcache(folio) || folio_test_unevictable(folio))
|
||||
return;
|
||||
|
||||
folio_batch_add_and_move(folio, lru_lazyfree, true);
|
||||
folio_batch_add_and_move(folio, lru_lazyfree);
|
||||
}
|
||||
|
||||
void lru_add_drain(void)
|
||||
|
|
|
|||
Loading…
Reference in New Issue