fs: cosmetic fixes to lru handling

1. inode_bit_waitqueue() was somehow placed between __inode_add_lru() and
   inode_add_lru(). move it up
2. assert ->i_lock is held in __inode_add_lru instead of just claiming it is
   needed
3. s/__inode_add_lru/__inode_lru_list_add/ for consistency with itself
   (inode_lru_list_del()) and similar routines for sb and io list
   management
4. push list presence check into inode_lru_list_del(), just like sb and
   io list

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://patch.msgid.link/20251029131428.654761-2-mjguzik@gmail.com
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Mateusz Guzik 2025-10-29 14:14:28 +01:00 committed by Christian Brauner
parent a27628f436
commit 4c6b40877b
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
7 changed files with 35 additions and 33 deletions

View File

@ -1452,7 +1452,7 @@ static void inode_sync_complete(struct inode *inode)
inode_state_clear(inode, I_SYNC); inode_state_clear(inode, I_SYNC);
/* If inode is clean an unused, put it into LRU now... */ /* If inode is clean an unused, put it into LRU now... */
inode_add_lru(inode); inode_lru_list_add(inode);
/* Called with inode->i_lock which ensures memory ordering. */ /* Called with inode->i_lock which ensures memory ordering. */
inode_wake_up_bit(inode, __I_SYNC); inode_wake_up_bit(inode, __I_SYNC);
} }

View File

@ -530,23 +530,6 @@ void ihold(struct inode *inode)
} }
EXPORT_SYMBOL(ihold); EXPORT_SYMBOL(ihold);
static void __inode_add_lru(struct inode *inode, bool rotate)
{
if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
if (!mapping_shrinkable(&inode->i_data))
return;
if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
inode_state_set(inode, I_REFERENCED);
}
struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe, struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
struct inode *inode, u32 bit) struct inode *inode, u32 bit)
{ {
@ -584,18 +567,38 @@ void wait_on_new_inode(struct inode *inode)
} }
EXPORT_SYMBOL(wait_on_new_inode); EXPORT_SYMBOL(wait_on_new_inode);
static void __inode_lru_list_add(struct inode *inode, bool rotate)
{
lockdep_assert_held(&inode->i_lock);
if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
return;
if (icount_read(inode))
return;
if (!(inode->i_sb->s_flags & SB_ACTIVE))
return;
if (!mapping_shrinkable(&inode->i_data))
return;
if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
else if (rotate)
inode_state_set(inode, I_REFERENCED);
}
/* /*
* Add inode to LRU if needed (inode is unused and clean). * Add inode to LRU if needed (inode is unused and clean).
*
* Needs inode->i_lock held.
*/ */
void inode_add_lru(struct inode *inode) void inode_lru_list_add(struct inode *inode)
{ {
__inode_add_lru(inode, false); __inode_lru_list_add(inode, false);
} }
static void inode_lru_list_del(struct inode *inode) static void inode_lru_list_del(struct inode *inode)
{ {
if (list_empty(&inode->i_lru))
return;
if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru)) if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_dec(nr_unused); this_cpu_dec(nr_unused);
} }
@ -1920,7 +1923,7 @@ static void iput_final(struct inode *inode)
if (!drop && if (!drop &&
!(inode_state_read(inode) & I_DONTCACHE) && !(inode_state_read(inode) & I_DONTCACHE) &&
(sb->s_flags & SB_ACTIVE)) { (sb->s_flags & SB_ACTIVE)) {
__inode_add_lru(inode, true); __inode_lru_list_add(inode, true);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
return; return;
} }
@ -1944,8 +1947,7 @@ static void iput_final(struct inode *inode)
inode_state_replace(inode, I_WILL_FREE, I_FREEING); inode_state_replace(inode, I_WILL_FREE, I_FREEING);
} }
if (!list_empty(&inode->i_lru)) inode_lru_list_del(inode);
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
evict(inode); evict(inode);

View File

@ -3502,7 +3502,7 @@ static inline void remove_inode_hash(struct inode *inode)
} }
extern void inode_sb_list_add(struct inode *inode); extern void inode_sb_list_add(struct inode *inode);
extern void inode_add_lru(struct inode *inode); extern void inode_lru_list_add(struct inode *inode);
extern int sb_set_blocksize(struct super_block *, int); extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int);

View File

@ -256,7 +256,7 @@ void filemap_remove_folio(struct folio *folio)
__filemap_remove_folio(folio, NULL); __filemap_remove_folio(folio, NULL);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio); filemap_free_folio(mapping, folio);
@ -335,7 +335,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
page_cache_delete_batch(mapping, fbatch); page_cache_delete_batch(mapping, fbatch);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
for (i = 0; i < folio_batch_count(fbatch); i++) for (i = 0; i < folio_batch_count(fbatch); i++)

View File

@ -46,7 +46,7 @@ static void clear_shadow_entries(struct address_space *mapping,
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
} }
@ -111,7 +111,7 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
out: out:
folio_batch_remove_exceptionals(fbatch); folio_batch_remove_exceptionals(fbatch);
@ -622,7 +622,7 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
__filemap_remove_folio(folio, NULL); __filemap_remove_folio(folio, NULL);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
filemap_free_folio(mapping, folio); filemap_free_folio(mapping, folio);

View File

@ -811,7 +811,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
__filemap_remove_folio(folio, shadow); __filemap_remove_folio(folio, shadow);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
if (free_folio) if (free_folio)

View File

@ -755,7 +755,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping->host != NULL) { if (mapping->host != NULL) {
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_lru_list_add(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
} }
ret = LRU_REMOVED_RETRY; ret = LRU_REMOVED_RETRY;