memcg: remove __mod_lruvec_state

__mod_lruvec_state() is already safe against irqs, so there is no need to
have a separate interface (i.e.  mod_lruvec_state) which wraps calls to it
with irq disabling and reenabling.  Let's rename __mod_lruvec_state() to
mod_lruvec_state().

Link: https://lkml.kernel.org/r/20251110232008.1352063-4-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Shakeel Butt 2025-11-10 15:20:07 -08:00 committed by Andrew Morton
parent 469241fe76
commit 5b3eb779a2
5 changed files with 18 additions and 34 deletions

View File

@ -44,7 +44,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
lockdep_assert_held(&lruvec->lru_lock);
WARN_ON_ONCE(nr_pages != (int)nr_pages);
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
}

View File

@ -520,19 +520,9 @@ static inline const char *vm_event_name(enum vm_event_item item)
#ifdef CONFIG_MEMCG
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
unsigned long flags;
local_irq_save(flags);
__mod_lruvec_state(lruvec, idx, val);
local_irq_restore(flags);
}
void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
@ -554,12 +544,6 @@ static inline void mod_lruvec_page_state(struct page *page,
#else
static inline void __mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{

View File

@ -757,7 +757,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
}
/**
* __mod_lruvec_state - update lruvec memory statistics
* mod_lruvec_state - update lruvec memory statistics
* @lruvec: the lruvec
* @idx: the stat item
* @val: delta to add to the counter, can be negative
@ -766,7 +766,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
* function updates the all three counters that are affected by a
* change of state at this level: per-node, per-cgroup, per-lruvec.
*/
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val)
{
/* Update node */
@ -794,7 +794,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
}
lruvec = mem_cgroup_lruvec(memcg, pgdat);
__mod_lruvec_state(lruvec, idx, val);
mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
EXPORT_SYMBOL(__lruvec_stat_mod_folio);
@ -818,7 +818,7 @@ void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
mod_node_page_state(pgdat, idx, val);
} else {
lruvec = mem_cgroup_lruvec(memcg, pgdat);
__mod_lruvec_state(lruvec, idx, val);
mod_lruvec_state(lruvec, idx, val);
}
rcu_read_unlock();
}

View File

@ -675,27 +675,27 @@ static int __folio_migrate_mapping(struct address_space *mapping,
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
if (folio_test_pmd_mappable(folio)) {
__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
}
}
#ifdef CONFIG_SWAP
if (folio_test_swapcache(folio)) {
__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
}
#endif
if (dirty && mapping_can_writeback(mapping)) {
__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
}
}

View File

@ -2018,7 +2018,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
@ -4744,7 +4744,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
reset_batch_size(walk);
}
__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);