mirror of https://github.com/torvalds/linux.git
memcg: remove __lruvec_stat_mod_folio
__lruvec_stat_mod_folio() is already safe against irqs, so there is no need to have a separate interface (i.e. lruvec_stat_mod_folio) which wraps calls to it with irq disabling and reenabling. Let's rename __lruvec_stat_mod_folio() to lruvec_stat_mod_folio(). Link: https://lkml.kernel.org/r/20251110232008.1352063-5-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5b3eb779a2
commit
c1bd09994c
|
|
@ -523,19 +523,9 @@ static inline const char *vm_event_name(enum vm_event_item item)
|
|||
void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
int val);
|
||||
|
||||
void __lruvec_stat_mod_folio(struct folio *folio,
|
||||
void lruvec_stat_mod_folio(struct folio *folio,
|
||||
enum node_stat_item idx, int val);
|
||||
|
||||
static inline void lruvec_stat_mod_folio(struct folio *folio,
|
||||
enum node_stat_item idx, int val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__lruvec_stat_mod_folio(folio, idx, val);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void mod_lruvec_page_state(struct page *page,
|
||||
enum node_stat_item idx, int val)
|
||||
{
|
||||
|
|
@ -550,12 +540,6 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
|
|||
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
|
||||
}
|
||||
|
||||
static inline void __lruvec_stat_mod_folio(struct folio *folio,
|
||||
enum node_stat_item idx, int val)
|
||||
{
|
||||
mod_node_page_state(folio_pgdat(folio), idx, val);
|
||||
}
|
||||
|
||||
static inline void lruvec_stat_mod_folio(struct folio *folio,
|
||||
enum node_stat_item idx, int val)
|
||||
{
|
||||
|
|
@ -570,18 +554,6 @@ static inline void mod_lruvec_page_state(struct page *page,
|
|||
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
static inline void __lruvec_stat_add_folio(struct folio *folio,
|
||||
enum node_stat_item idx)
|
||||
{
|
||||
__lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
|
||||
}
|
||||
|
||||
static inline void __lruvec_stat_sub_folio(struct folio *folio,
|
||||
enum node_stat_item idx)
|
||||
{
|
||||
__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
|
||||
}
|
||||
|
||||
static inline void lruvec_stat_add_folio(struct folio *folio,
|
||||
enum node_stat_item idx)
|
||||
{
|
||||
|
|
|
|||
20
mm/filemap.c
20
mm/filemap.c
|
|
@ -182,13 +182,13 @@ static void filemap_unaccount_folio(struct address_space *mapping,
|
|||
|
||||
nr = folio_nr_pages(folio);
|
||||
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
|
||||
lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
|
||||
if (folio_test_swapbacked(folio)) {
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
|
||||
lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
|
||||
lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
|
||||
} else if (folio_test_pmd_mappable(folio)) {
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
|
||||
lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
|
||||
filemap_nr_thps_dec(mapping);
|
||||
}
|
||||
if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))
|
||||
|
|
@ -844,13 +844,13 @@ void replace_page_cache_folio(struct folio *old, struct folio *new)
|
|||
old->mapping = NULL;
|
||||
/* hugetlb pages do not participate in page cache accounting. */
|
||||
if (!folio_test_hugetlb(old))
|
||||
__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
|
||||
lruvec_stat_sub_folio(old, NR_FILE_PAGES);
|
||||
if (!folio_test_hugetlb(new))
|
||||
__lruvec_stat_add_folio(new, NR_FILE_PAGES);
|
||||
lruvec_stat_add_folio(new, NR_FILE_PAGES);
|
||||
if (folio_test_swapbacked(old))
|
||||
__lruvec_stat_sub_folio(old, NR_SHMEM);
|
||||
lruvec_stat_sub_folio(old, NR_SHMEM);
|
||||
if (folio_test_swapbacked(new))
|
||||
__lruvec_stat_add_folio(new, NR_SHMEM);
|
||||
lruvec_stat_add_folio(new, NR_SHMEM);
|
||||
xas_unlock_irq(&xas);
|
||||
if (free_folio)
|
||||
free_folio(old);
|
||||
|
|
@ -933,9 +933,9 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
|||
|
||||
/* hugetlb pages do not participate in page cache accounting */
|
||||
if (!huge) {
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
|
||||
lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
__lruvec_stat_mod_folio(folio,
|
||||
lruvec_stat_mod_folio(folio,
|
||||
NR_FILE_THPS, nr);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3783,10 +3783,10 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
|
|||
if (folio_test_pmd_mappable(folio) &&
|
||||
new_order < HPAGE_PMD_ORDER) {
|
||||
if (folio_test_swapbacked(folio)) {
|
||||
__lruvec_stat_mod_folio(folio,
|
||||
lruvec_stat_mod_folio(folio,
|
||||
NR_SHMEM_THPS, -nr);
|
||||
} else {
|
||||
__lruvec_stat_mod_folio(folio,
|
||||
lruvec_stat_mod_folio(folio,
|
||||
NR_FILE_THPS, -nr);
|
||||
filemap_nr_thps_dec(mapping);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2195,14 +2195,14 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
}
|
||||
|
||||
if (is_shmem)
|
||||
__lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
|
||||
lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
|
||||
else
|
||||
__lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
|
||||
lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
|
||||
|
||||
if (nr_none) {
|
||||
__lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
|
||||
lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
|
||||
/* nr_none is always 0 for non-shmem. */
|
||||
__lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
|
||||
lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -777,7 +777,7 @@ void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
|||
mod_memcg_lruvec_state(lruvec, idx, val);
|
||||
}
|
||||
|
||||
void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
|
||||
void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
|
||||
int val)
|
||||
{
|
||||
struct mem_cgroup *memcg;
|
||||
|
|
@ -797,7 +797,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
|
|||
mod_lruvec_state(lruvec, idx, val);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(__lruvec_stat_mod_folio);
|
||||
EXPORT_SYMBOL(lruvec_stat_mod_folio);
|
||||
|
||||
void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -2658,7 +2658,7 @@ static void folio_account_dirtied(struct folio *folio,
|
|||
inode_attach_wb(inode, folio);
|
||||
wb = inode_to_wb(inode);
|
||||
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
|
||||
lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
|
||||
__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
|
||||
__node_stat_mod_folio(folio, NR_DIRTIED, nr);
|
||||
wb_stat_mod(wb, WB_RECLAIMABLE, nr);
|
||||
|
|
|
|||
|
|
@ -1212,12 +1212,12 @@ static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
|
|||
|
||||
if (nr) {
|
||||
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
|
||||
__lruvec_stat_mod_folio(folio, idx, nr);
|
||||
lruvec_stat_mod_folio(folio, idx, nr);
|
||||
}
|
||||
if (nr_pmdmapped) {
|
||||
if (folio_test_anon(folio)) {
|
||||
idx = NR_ANON_THPS;
|
||||
__lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
|
||||
lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
|
||||
} else {
|
||||
/* NR_*_PMDMAPPED are not maintained per-memcg */
|
||||
idx = folio_test_swapbacked(folio) ?
|
||||
|
|
|
|||
|
|
@ -871,9 +871,9 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
|
|||
static void shmem_update_stats(struct folio *folio, int nr_pages)
|
||||
{
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
|
||||
lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
|
||||
lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
|
||||
lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
Loading…
Reference in New Issue