mirror of https://github.com/torvalds/linux.git
f2fs: Convert fsync_node_entry->page to folio
Convert all callers to set/get a folio instead of a page. Removes five calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
7d28f13c58
commit
963da02bc1
|
|
@ -359,7 +359,7 @@ static void f2fs_write_end_io(struct bio *bio)
|
||||||
|
|
||||||
dec_page_count(sbi, type);
|
dec_page_count(sbi, type);
|
||||||
if (f2fs_in_warm_node_list(sbi, folio))
|
if (f2fs_in_warm_node_list(sbi, folio))
|
||||||
f2fs_del_fsync_node_entry(sbi, &folio->page);
|
f2fs_del_fsync_node_entry(sbi, folio);
|
||||||
clear_page_private_gcing(&folio->page);
|
clear_page_private_gcing(&folio->page);
|
||||||
folio_end_writeback(folio);
|
folio_end_writeback(folio);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -324,7 +324,7 @@ struct inode_entry {
|
||||||
|
|
||||||
struct fsync_node_entry {
|
struct fsync_node_entry {
|
||||||
struct list_head list; /* list head */
|
struct list_head list; /* list head */
|
||||||
struct page *page; /* warm node page pointer */
|
struct folio *folio; /* warm node folio pointer */
|
||||||
unsigned int seq_id; /* sequence id */
|
unsigned int seq_id; /* sequence id */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -3735,7 +3735,7 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
|
||||||
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi,
|
bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi,
|
||||||
const struct folio *folio);
|
const struct folio *folio);
|
||||||
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
|
void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
|
||||||
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
|
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio);
|
||||||
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
|
void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
|
||||||
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
|
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
|
||||||
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
|
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
|
||||||
|
|
|
||||||
|
|
@ -325,7 +325,7 @@ void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
|
static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
|
||||||
struct page *page)
|
struct folio *folio)
|
||||||
{
|
{
|
||||||
struct fsync_node_entry *fn;
|
struct fsync_node_entry *fn;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
@ -334,8 +334,8 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
|
||||||
fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
|
fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
|
||||||
GFP_NOFS, true, NULL);
|
GFP_NOFS, true, NULL);
|
||||||
|
|
||||||
get_page(page);
|
folio_get(folio);
|
||||||
fn->page = page;
|
fn->folio = folio;
|
||||||
INIT_LIST_HEAD(&fn->list);
|
INIT_LIST_HEAD(&fn->list);
|
||||||
|
|
||||||
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
|
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
|
||||||
|
|
@ -348,19 +348,19 @@ static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
|
||||||
return seq_id;
|
return seq_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
|
void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
|
||||||
{
|
{
|
||||||
struct fsync_node_entry *fn;
|
struct fsync_node_entry *fn;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
|
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
|
||||||
list_for_each_entry(fn, &sbi->fsync_node_list, list) {
|
list_for_each_entry(fn, &sbi->fsync_node_list, list) {
|
||||||
if (fn->page == page) {
|
if (fn->folio == folio) {
|
||||||
list_del(&fn->list);
|
list_del(&fn->list);
|
||||||
sbi->fsync_node_num--;
|
sbi->fsync_node_num--;
|
||||||
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
|
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
|
||||||
kmem_cache_free(fsync_node_entry_slab, fn);
|
kmem_cache_free(fsync_node_entry_slab, fn);
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1727,7 +1727,7 @@ static int __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
|
||||||
|
|
||||||
/* should add to global list before clearing PAGECACHE status */
|
/* should add to global list before clearing PAGECACHE status */
|
||||||
if (f2fs_in_warm_node_list(sbi, folio)) {
|
if (f2fs_in_warm_node_list(sbi, folio)) {
|
||||||
seq = f2fs_add_fsync_node_entry(sbi, &folio->page);
|
seq = f2fs_add_fsync_node_entry(sbi, folio);
|
||||||
if (seq_id)
|
if (seq_id)
|
||||||
*seq_id = seq;
|
*seq_id = seq;
|
||||||
}
|
}
|
||||||
|
|
@ -2129,12 +2129,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
|
||||||
unsigned int seq_id)
|
unsigned int seq_id)
|
||||||
{
|
{
|
||||||
struct fsync_node_entry *fn;
|
struct fsync_node_entry *fn;
|
||||||
struct page *page;
|
|
||||||
struct list_head *head = &sbi->fsync_node_list;
|
struct list_head *head = &sbi->fsync_node_list;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int cur_seq_id = 0;
|
unsigned int cur_seq_id = 0;
|
||||||
|
|
||||||
while (seq_id && cur_seq_id < seq_id) {
|
while (seq_id && cur_seq_id < seq_id) {
|
||||||
|
struct folio *folio;
|
||||||
|
|
||||||
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
|
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
|
||||||
if (list_empty(head)) {
|
if (list_empty(head)) {
|
||||||
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
|
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
|
||||||
|
|
@ -2146,13 +2147,13 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cur_seq_id = fn->seq_id;
|
cur_seq_id = fn->seq_id;
|
||||||
page = fn->page;
|
folio = fn->folio;
|
||||||
get_page(page);
|
folio_get(folio);
|
||||||
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
|
spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
|
||||||
|
|
||||||
f2fs_wait_on_page_writeback(page, NODE, true, false);
|
f2fs_folio_wait_writeback(folio, NODE, true, false);
|
||||||
|
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
return filemap_check_errors(NODE_MAPPING(sbi));
|
return filemap_check_errors(NODE_MAPPING(sbi));
|
||||||
|
|
|
||||||
|
|
@ -3924,7 +3924,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||||
fscrypt_finalize_bounce_page(&fio->encrypted_page);
|
fscrypt_finalize_bounce_page(&fio->encrypted_page);
|
||||||
folio_end_writeback(folio);
|
folio_end_writeback(folio);
|
||||||
if (f2fs_in_warm_node_list(fio->sbi, folio))
|
if (f2fs_in_warm_node_list(fio->sbi, folio))
|
||||||
f2fs_del_fsync_node_entry(fio->sbi, fio->page);
|
f2fs_del_fsync_node_entry(fio->sbi, folio);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
|
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue