iomap: move bio read logic into helper function

Move the iomap_readpage_iter() bio read logic into a separate helper
function, iomap_bio_read_folio_range(). This is needed to make iomap
read/readahead more generically usable, especially for filesystems that
do not require CONFIG_BLOCK.

Additionally rename buffered write's iomap_read_folio_range() function
to iomap_bio_read_folio_range_sync() to better describe its synchronous
behavior.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Joanne Koong 2025-09-25 17:25:56 -07:00 committed by Christian Brauner
parent 7aa6bc3e87
commit 573c14c821
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
1 changed files with 40 additions and 30 deletions

View File

@ -367,36 +367,15 @@ struct iomap_readpage_ctx {
struct readahead_control *rac;
};
static int iomap_readpage_iter(struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx, loff_t pos, size_t plen)
{
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
struct folio *folio = ctx->cur_folio;
struct iomap_folio_state *ifs;
size_t poff, plen;
const struct iomap *iomap = &iter->iomap;
struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
loff_t length = iomap_length(iter);
sector_t sector;
int ret;
if (iomap->type == IOMAP_INLINE) {
ret = iomap_read_inline_data(iter, folio);
if (ret)
return ret;
return iomap_iter_advance(iter, length);
}
/* zero post-eof blocks as the page may be mapped */
ifs = ifs_alloc(iter->inode, folio, iter->flags);
iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
if (iomap_block_needs_zeroing(iter, pos)) {
folio_zero_range(folio, poff, plen);
iomap_set_range_uptodate(folio, poff, plen);
goto done;
}
ctx->cur_folio_in_bio = true;
if (ifs) {
@ -435,6 +414,37 @@ static int iomap_readpage_iter(struct iomap_iter *iter,
ctx->bio->bi_end_io = iomap_read_end_io;
bio_add_folio_nofail(ctx->bio, folio, plen, poff);
}
}
static int iomap_readpage_iter(struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
{
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
struct folio *folio = ctx->cur_folio;
size_t poff, plen;
int ret;
if (iomap->type == IOMAP_INLINE) {
ret = iomap_read_inline_data(iter, folio);
if (ret)
return ret;
return iomap_iter_advance(iter, length);
}
/* zero post-eof blocks as the page may be mapped */
ifs_alloc(iter->inode, folio, iter->flags);
iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
if (iomap_block_needs_zeroing(iter, pos)) {
folio_zero_range(folio, poff, plen);
iomap_set_range_uptodate(folio, poff, plen);
} else {
iomap_bio_read_folio_range(iter, ctx, pos, plen);
}
done:
/*
@ -559,7 +569,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_readahead);
static int iomap_read_folio_range(const struct iomap_iter *iter,
static int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
@ -572,7 +582,7 @@ static int iomap_read_folio_range(const struct iomap_iter *iter,
return submit_bio_wait(&bio);
}
#else
static int iomap_read_folio_range(const struct iomap_iter *iter,
static int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
WARN_ON_ONCE(1);
@ -749,7 +759,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter,
status = write_ops->read_folio_range(iter,
folio, block_start, plen);
else
status = iomap_read_folio_range(iter,
status = iomap_bio_read_folio_range_sync(iter,
folio, block_start, plen);
if (status)
return status;