mirror of https://github.com/torvalds/linux.git
iomap: add caller-provided callbacks for read and readahead
Add caller-provided callbacks for read and readahead so that it can be
used generically, especially by filesystems that are not block-based.
In particular, this:
* Modifies the read and readahead interface to take in a
struct iomap_read_folio_ctx that is publicly defined as:
struct iomap_read_folio_ctx {
const struct iomap_read_ops *ops;
struct folio *cur_folio;
struct readahead_control *rac;
void *read_ctx;
};
where struct iomap_read_ops is defined as:
struct iomap_read_ops {
int (*read_folio_range)(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx,
size_t len);
void (*read_submit)(struct iomap_read_folio_ctx *ctx);
};
read_folio_range() reads in the folio range and is required by the
caller to provide. read_submit() is optional and is used for
submitting any pending read requests.
* Modifies existing filesystems that use iomap for read and readahead to
use the new API, through the new statically inlined helpers
iomap_bio_read_folio() and iomap_bio_readahead(). There is no change
in functionality for those filesystems.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
fb7a10ac47
commit
b2f35ac414
|
|
@ -135,6 +135,28 @@ These ``struct kiocb`` flags are significant for buffered I/O with iomap:
|
|||
|
||||
* ``IOCB_DONTCACHE``: Turns on ``IOMAP_DONTCACHE``.
|
||||
|
||||
``struct iomap_read_ops``
|
||||
--------------------------
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct iomap_read_ops {
|
||||
int (*read_folio_range)(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx, size_t len);
|
||||
void (*submit_read)(struct iomap_read_folio_ctx *ctx);
|
||||
};
|
||||
|
||||
iomap calls these functions:
|
||||
|
||||
- ``read_folio_range``: Called to read in the range. This must be provided
|
||||
by the caller. The caller is responsible for calling
|
||||
iomap_finish_folio_read() after reading in the folio range. This should be
|
||||
done even if an error is encountered during the read. This returns 0 on
|
||||
success or a negative error on failure.
|
||||
|
||||
- ``submit_read``: Submit any pending read requests. This function is
|
||||
optional.
|
||||
|
||||
Internal per-Folio State
|
||||
------------------------
|
||||
|
||||
|
|
@ -182,6 +204,28 @@ The ``flags`` argument to ``->iomap_begin`` will be set to zero.
|
|||
The pagecache takes whatever locks it needs before calling the
|
||||
filesystem.
|
||||
|
||||
Both ``iomap_readahead`` and ``iomap_read_folio`` pass in a ``struct
|
||||
iomap_read_folio_ctx``:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
struct iomap_read_folio_ctx {
|
||||
const struct iomap_read_ops *ops;
|
||||
struct folio *cur_folio;
|
||||
struct readahead_control *rac;
|
||||
void *read_ctx;
|
||||
};
|
||||
|
||||
``iomap_readahead`` must set:
|
||||
* ``ops->read_folio_range()`` and ``rac``
|
||||
|
||||
``iomap_read_folio`` must set:
|
||||
* ``ops->read_folio_range()`` and ``cur_folio``
|
||||
|
||||
``ops->submit_read()`` and ``read_ctx`` are optional. ``read_ctx`` is used to
|
||||
pass in any custom data the caller needs accessible in the ops callbacks for
|
||||
fulfilling reads.
|
||||
|
||||
Buffered Writes
|
||||
---------------
|
||||
|
||||
|
|
|
|||
|
|
@ -540,12 +540,13 @@ const struct address_space_operations def_blk_aops = {
|
|||
#else /* CONFIG_BUFFER_HEAD */
|
||||
static int blkdev_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
return iomap_read_folio(folio, &blkdev_iomap_ops);
|
||||
iomap_bio_read_folio(folio, &blkdev_iomap_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void blkdev_readahead(struct readahead_control *rac)
|
||||
{
|
||||
iomap_readahead(rac, &blkdev_iomap_ops);
|
||||
iomap_bio_readahead(rac, &blkdev_iomap_ops);
|
||||
}
|
||||
|
||||
static ssize_t blkdev_writeback_range(struct iomap_writepage_ctx *wpc,
|
||||
|
|
|
|||
|
|
@ -371,7 +371,8 @@ static int erofs_read_folio(struct file *file, struct folio *folio)
|
|||
{
|
||||
trace_erofs_read_folio(folio, true);
|
||||
|
||||
return iomap_read_folio(folio, &erofs_iomap_ops);
|
||||
iomap_bio_read_folio(folio, &erofs_iomap_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void erofs_readahead(struct readahead_control *rac)
|
||||
|
|
@ -379,7 +380,7 @@ static void erofs_readahead(struct readahead_control *rac)
|
|||
trace_erofs_readahead(rac->mapping->host, readahead_index(rac),
|
||||
readahead_count(rac), true);
|
||||
|
||||
return iomap_readahead(rac, &erofs_iomap_ops);
|
||||
iomap_bio_readahead(rac, &erofs_iomap_ops);
|
||||
}
|
||||
|
||||
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
|
||||
|
|
|
|||
|
|
@ -424,11 +424,11 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
|
|||
struct inode *inode = folio->mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
if (!gfs2_is_jdata(ip) ||
|
||||
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
|
||||
error = iomap_read_folio(folio, &gfs2_iomap_ops);
|
||||
iomap_bio_read_folio(folio, &gfs2_iomap_ops);
|
||||
} else if (gfs2_is_stuffed(ip)) {
|
||||
error = stuffed_read_folio(ip, folio);
|
||||
} else {
|
||||
|
|
@ -503,7 +503,7 @@ static void gfs2_readahead(struct readahead_control *rac)
|
|||
else if (gfs2_is_jdata(ip))
|
||||
mpage_readahead(rac, gfs2_block_map);
|
||||
else
|
||||
iomap_readahead(rac, &gfs2_iomap_ops);
|
||||
iomap_bio_readahead(rac, &gfs2_iomap_ops);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -328,8 +328,8 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static void iomap_finish_folio_read(struct folio *folio, size_t off,
|
||||
size_t len, int error)
|
||||
void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
|
||||
int error)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
bool uptodate = !error;
|
||||
|
|
@ -349,6 +349,7 @@ static void iomap_finish_folio_read(struct folio *folio, size_t off,
|
|||
if (finished)
|
||||
folio_end_read(folio, uptodate);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
|
||||
|
||||
static void iomap_read_end_io(struct bio *bio)
|
||||
{
|
||||
|
|
@ -360,12 +361,6 @@ static void iomap_read_end_io(struct bio *bio)
|
|||
bio_put(bio);
|
||||
}
|
||||
|
||||
struct iomap_read_folio_ctx {
|
||||
struct folio *cur_folio;
|
||||
void *read_ctx;
|
||||
struct readahead_control *rac;
|
||||
};
|
||||
|
||||
static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct bio *bio = ctx->read_ctx;
|
||||
|
|
@ -374,7 +369,7 @@ static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
|
|||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
||||
static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx, size_t plen)
|
||||
{
|
||||
struct folio *folio = ctx->cur_folio;
|
||||
|
|
@ -412,8 +407,15 @@ static void iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
|||
bio_add_folio_nofail(bio, folio, plen, poff);
|
||||
ctx->read_ctx = bio;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct iomap_read_ops iomap_bio_read_ops = {
|
||||
.read_folio_range = iomap_bio_read_folio_range,
|
||||
.submit_read = iomap_bio_submit_read,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
|
||||
|
||||
static void iomap_read_init(struct folio *folio)
|
||||
{
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
|
|
@ -544,7 +546,9 @@ static int iomap_read_folio_iter(struct iomap_iter *iter,
|
|||
if (!*bytes_pending)
|
||||
iomap_read_init(folio);
|
||||
*bytes_pending += plen;
|
||||
iomap_bio_read_folio_range(iter, ctx, plen);
|
||||
ret = ctx->ops->read_folio_range(iter, ctx, plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = iomap_iter_advance(iter, plen);
|
||||
|
|
@ -556,26 +560,25 @@ static int iomap_read_folio_iter(struct iomap_iter *iter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
|
||||
int iomap_read_folio(const struct iomap_ops *ops,
|
||||
struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct folio *folio = ctx->cur_folio;
|
||||
struct iomap_iter iter = {
|
||||
.inode = folio->mapping->host,
|
||||
.pos = folio_pos(folio),
|
||||
.len = folio_size(folio),
|
||||
};
|
||||
struct iomap_read_folio_ctx ctx = {
|
||||
.cur_folio = folio,
|
||||
};
|
||||
size_t bytes_pending = 0;
|
||||
int ret;
|
||||
|
||||
trace_iomap_readpage(iter.inode, 1);
|
||||
|
||||
while ((ret = iomap_iter(&iter, ops)) > 0)
|
||||
iter.status = iomap_read_folio_iter(&iter, &ctx,
|
||||
&bytes_pending);
|
||||
iter.status = iomap_read_folio_iter(&iter, ctx, &bytes_pending);
|
||||
|
||||
iomap_bio_submit_read(&ctx);
|
||||
if (ctx->ops->submit_read)
|
||||
ctx->ops->submit_read(ctx);
|
||||
|
||||
iomap_read_end(folio, bytes_pending);
|
||||
|
||||
|
|
@ -615,8 +618,8 @@ static int iomap_readahead_iter(struct iomap_iter *iter,
|
|||
|
||||
/**
|
||||
* iomap_readahead - Attempt to read pages from a file.
|
||||
* @rac: Describes the pages to be read.
|
||||
* @ops: The operations vector for the filesystem.
|
||||
* @ctx: The ctx used for issuing readahead.
|
||||
*
|
||||
* This function is for filesystems to call to implement their readahead
|
||||
* address_space operation.
|
||||
|
|
@ -628,28 +631,28 @@ static int iomap_readahead_iter(struct iomap_iter *iter,
|
|||
* function is called with memalloc_nofs set, so allocations will not cause
|
||||
* the filesystem to be reentered.
|
||||
*/
|
||||
void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
|
||||
void iomap_readahead(const struct iomap_ops *ops,
|
||||
struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct readahead_control *rac = ctx->rac;
|
||||
struct iomap_iter iter = {
|
||||
.inode = rac->mapping->host,
|
||||
.pos = readahead_pos(rac),
|
||||
.len = readahead_length(rac),
|
||||
};
|
||||
struct iomap_read_folio_ctx ctx = {
|
||||
.rac = rac,
|
||||
};
|
||||
size_t cur_bytes_pending;
|
||||
|
||||
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
|
||||
|
||||
while (iomap_iter(&iter, ops) > 0)
|
||||
iter.status = iomap_readahead_iter(&iter, &ctx,
|
||||
iter.status = iomap_readahead_iter(&iter, ctx,
|
||||
&cur_bytes_pending);
|
||||
|
||||
iomap_bio_submit_read(&ctx);
|
||||
if (ctx->ops->submit_read)
|
||||
ctx->ops->submit_read(ctx);
|
||||
|
||||
if (ctx.cur_folio)
|
||||
iomap_read_end(ctx.cur_folio, cur_bytes_pending);
|
||||
if (ctx->cur_folio)
|
||||
iomap_read_end(ctx->cur_folio, cur_bytes_pending);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_readahead);
|
||||
|
||||
|
|
|
|||
|
|
@ -742,14 +742,15 @@ xfs_vm_read_folio(
|
|||
struct file *unused,
|
||||
struct folio *folio)
|
||||
{
|
||||
return iomap_read_folio(folio, &xfs_read_iomap_ops);
|
||||
iomap_bio_read_folio(folio, &xfs_read_iomap_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_vm_readahead(
|
||||
struct readahead_control *rac)
|
||||
{
|
||||
iomap_readahead(rac, &xfs_read_iomap_ops);
|
||||
iomap_bio_readahead(rac, &xfs_read_iomap_ops);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
|||
|
|
@ -112,12 +112,13 @@ static const struct iomap_ops zonefs_write_iomap_ops = {
|
|||
|
||||
static int zonefs_read_folio(struct file *unused, struct folio *folio)
|
||||
{
|
||||
return iomap_read_folio(folio, &zonefs_read_iomap_ops);
|
||||
iomap_bio_read_folio(folio, &zonefs_read_iomap_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zonefs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
iomap_readahead(rac, &zonefs_read_iomap_ops);
|
||||
iomap_bio_readahead(rac, &zonefs_read_iomap_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ struct inode;
|
|||
struct iomap_iter;
|
||||
struct iomap_dio;
|
||||
struct iomap_writepage_ctx;
|
||||
struct iomap_read_folio_ctx;
|
||||
struct iov_iter;
|
||||
struct kiocb;
|
||||
struct page;
|
||||
|
|
@ -337,8 +338,10 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
|
|||
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
|
||||
const struct iomap_ops *ops,
|
||||
const struct iomap_write_ops *write_ops, void *private);
|
||||
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
|
||||
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
|
||||
int iomap_read_folio(const struct iomap_ops *ops,
|
||||
struct iomap_read_folio_ctx *ctx);
|
||||
void iomap_readahead(const struct iomap_ops *ops,
|
||||
struct iomap_read_folio_ctx *ctx);
|
||||
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
|
||||
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
|
||||
|
|
@ -465,6 +468,8 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
|||
loff_t pos, loff_t end_pos, unsigned int dirty_len);
|
||||
int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
|
||||
|
||||
void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
|
||||
int error);
|
||||
void iomap_start_folio_write(struct inode *inode, struct folio *folio,
|
||||
size_t len);
|
||||
void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
||||
|
|
@ -473,6 +478,34 @@ void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
|
|||
int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
|
||||
int iomap_writepages(struct iomap_writepage_ctx *wpc);
|
||||
|
||||
struct iomap_read_folio_ctx {
|
||||
const struct iomap_read_ops *ops;
|
||||
struct folio *cur_folio;
|
||||
struct readahead_control *rac;
|
||||
void *read_ctx;
|
||||
};
|
||||
|
||||
struct iomap_read_ops {
|
||||
/*
|
||||
* Read in a folio range.
|
||||
*
|
||||
* The caller is responsible for calling iomap_finish_folio_read() after
|
||||
* reading in the folio range. This should be done even if an error is
|
||||
* encountered during the read.
|
||||
*
|
||||
* Returns 0 on success or a negative error on failure.
|
||||
*/
|
||||
int (*read_folio_range)(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx, size_t len);
|
||||
|
||||
/*
|
||||
* Submit any pending read requests.
|
||||
*
|
||||
* This is optional.
|
||||
*/
|
||||
void (*submit_read)(struct iomap_read_folio_ctx *ctx);
|
||||
};
|
||||
|
||||
/*
|
||||
* Flags for direct I/O ->end_io:
|
||||
*/
|
||||
|
|
@ -538,4 +571,30 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
|
|||
|
||||
extern struct bio_set iomap_ioend_bioset;
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern const struct iomap_read_ops iomap_bio_read_ops;
|
||||
|
||||
static inline void iomap_bio_read_folio(struct folio *folio,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct iomap_read_folio_ctx ctx = {
|
||||
.ops = &iomap_bio_read_ops,
|
||||
.cur_folio = folio,
|
||||
};
|
||||
|
||||
iomap_read_folio(ops, &ctx);
|
||||
}
|
||||
|
||||
static inline void iomap_bio_readahead(struct readahead_control *rac,
|
||||
const struct iomap_ops *ops)
|
||||
{
|
||||
struct iomap_read_folio_ctx ctx = {
|
||||
.ops = &iomap_bio_read_ops,
|
||||
.rac = rac,
|
||||
};
|
||||
|
||||
iomap_readahead(ops, &ctx);
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
#endif /* LINUX_IOMAP_H */
|
||||
|
|
|
|||
Loading…
Reference in New Issue