mirror of https://github.com/torvalds/linux.git
memcg: remove __mod_lruvec_state
__mod_lruvec_state() is already safe against irqs, so there is no need to have a separate interface (i.e. mod_lruvec_state) which wraps calls to it with irq disabling and reenabling. Let's rename __mod_lruvec_state() to mod_lruvec_state(). Link: https://lkml.kernel.org/r/20251110232008.1352063-4-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
469241fe76
commit
5b3eb779a2
|
|
@ -44,7 +44,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
|
||||||
lockdep_assert_held(&lruvec->lru_lock);
|
lockdep_assert_held(&lruvec->lru_lock);
|
||||||
WARN_ON_ONCE(nr_pages != (int)nr_pages);
|
WARN_ON_ONCE(nr_pages != (int)nr_pages);
|
||||||
|
|
||||||
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
|
mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
|
||||||
__mod_zone_page_state(&pgdat->node_zones[zid],
|
__mod_zone_page_state(&pgdat->node_zones[zid],
|
||||||
NR_ZONE_LRU_BASE + lru, nr_pages);
|
NR_ZONE_LRU_BASE + lru, nr_pages);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -520,19 +520,9 @@ static inline const char *vm_event_name(enum vm_event_item item)
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG
|
#ifdef CONFIG_MEMCG
|
||||||
|
|
||||||
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||||
int val);
|
int val);
|
||||||
|
|
||||||
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
|
||||||
enum node_stat_item idx, int val)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
__mod_lruvec_state(lruvec, idx, val);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __lruvec_stat_mod_folio(struct folio *folio,
|
void __lruvec_stat_mod_folio(struct folio *folio,
|
||||||
enum node_stat_item idx, int val);
|
enum node_stat_item idx, int val);
|
||||||
|
|
||||||
|
|
@ -554,12 +544,6 @@ static inline void mod_lruvec_page_state(struct page *page,
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline void __mod_lruvec_state(struct lruvec *lruvec,
|
|
||||||
enum node_stat_item idx, int val)
|
|
||||||
{
|
|
||||||
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
||||||
enum node_stat_item idx, int val)
|
enum node_stat_item idx, int val)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -757,7 +757,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __mod_lruvec_state - update lruvec memory statistics
|
* mod_lruvec_state - update lruvec memory statistics
|
||||||
* @lruvec: the lruvec
|
* @lruvec: the lruvec
|
||||||
* @idx: the stat item
|
* @idx: the stat item
|
||||||
* @val: delta to add to the counter, can be negative
|
* @val: delta to add to the counter, can be negative
|
||||||
|
|
@ -766,7 +766,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
|
||||||
* function updates the all three counters that are affected by a
|
* function updates the all three counters that are affected by a
|
||||||
* change of state at this level: per-node, per-cgroup, per-lruvec.
|
* change of state at this level: per-node, per-cgroup, per-lruvec.
|
||||||
*/
|
*/
|
||||||
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||||
int val)
|
int val)
|
||||||
{
|
{
|
||||||
/* Update node */
|
/* Update node */
|
||||||
|
|
@ -794,7 +794,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
|
||||||
}
|
}
|
||||||
|
|
||||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||||
__mod_lruvec_state(lruvec, idx, val);
|
mod_lruvec_state(lruvec, idx, val);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__lruvec_stat_mod_folio);
|
EXPORT_SYMBOL(__lruvec_stat_mod_folio);
|
||||||
|
|
@ -818,7 +818,7 @@ void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
|
||||||
mod_node_page_state(pgdat, idx, val);
|
mod_node_page_state(pgdat, idx, val);
|
||||||
} else {
|
} else {
|
||||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||||
__mod_lruvec_state(lruvec, idx, val);
|
mod_lruvec_state(lruvec, idx, val);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
20
mm/migrate.c
20
mm/migrate.c
|
|
@ -675,27 +675,27 @@ static int __folio_migrate_mapping(struct address_space *mapping,
|
||||||
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
|
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
|
||||||
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
|
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
|
||||||
|
|
||||||
__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
|
mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
|
mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
|
||||||
if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
|
if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
|
||||||
__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
|
mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
|
mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
|
||||||
|
|
||||||
if (folio_test_pmd_mappable(folio)) {
|
if (folio_test_pmd_mappable(folio)) {
|
||||||
__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
|
mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
|
mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_SWAP
|
#ifdef CONFIG_SWAP
|
||||||
if (folio_test_swapcache(folio)) {
|
if (folio_test_swapcache(folio)) {
|
||||||
__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
|
mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
|
mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (dirty && mapping_can_writeback(mapping)) {
|
if (dirty && mapping_can_writeback(mapping)) {
|
||||||
__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
|
mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
|
||||||
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
|
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
|
||||||
__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
|
mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
|
||||||
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
|
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2018,7 +2018,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
|
||||||
spin_lock_irq(&lruvec->lru_lock);
|
spin_lock_irq(&lruvec->lru_lock);
|
||||||
move_folios_to_lru(lruvec, &folio_list);
|
move_folios_to_lru(lruvec, &folio_list);
|
||||||
|
|
||||||
__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
|
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
|
||||||
stat.nr_demoted);
|
stat.nr_demoted);
|
||||||
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
|
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
|
||||||
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
|
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
|
||||||
|
|
@ -4744,7 +4744,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||||
reset_batch_size(walk);
|
reset_batch_size(walk);
|
||||||
}
|
}
|
||||||
|
|
||||||
__mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
|
mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
|
||||||
stat.nr_demoted);
|
stat.nr_demoted);
|
||||||
|
|
||||||
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
|
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue