mirror of https://github.com/torvalds/linux.git
iomap: simplify ->read_folio_range() error handling for reads
Instead of requiring that the caller calls iomap_finish_folio_read() even if the ->read_folio_range() callback returns an error, account for this internally in iomap instead, which makes the interface simpler and makes it match writeback's ->read_folio_range() error handling expectations. Signed-off-by: Joanne Koong <joannelkoong@gmail.com> Link: https://patch.msgid.link/20251111193658.3495942-6-joannelkoong@gmail.com Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
6b1fd2281f
commit
f8eaf79406
|
|
@ -149,10 +149,9 @@ These ``struct kiocb`` flags are significant for buffered I/O with iomap:
|
|||
iomap calls these functions:
|
||||
|
||||
- ``read_folio_range``: Called to read in the range. This must be provided
|
||||
by the caller. The caller is responsible for calling
|
||||
iomap_finish_folio_read() after reading in the folio range. This should be
|
||||
done even if an error is encountered during the read. This returns 0 on
|
||||
success or a negative error on failure.
|
||||
by the caller. If this succeeds, iomap_finish_folio_read() must be called
|
||||
after the range is read in, regardless of whether the read succeeded or
|
||||
failed.
|
||||
|
||||
- ``submit_read``: Submit any pending read requests. This function is
|
||||
optional.
|
||||
|
|
|
|||
|
|
@ -922,13 +922,6 @@ static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
|
|||
|
||||
if (ctx->rac) {
|
||||
ret = fuse_handle_readahead(folio, ctx->rac, data, pos, len);
|
||||
/*
|
||||
* If fuse_handle_readahead was successful, fuse_readpages_end
|
||||
* will do the iomap_finish_folio_read, else we need to call it
|
||||
* here
|
||||
*/
|
||||
if (ret)
|
||||
iomap_finish_folio_read(folio, off, len, ret);
|
||||
} else {
|
||||
/*
|
||||
* for non-readahead read requests, do reads synchronously
|
||||
|
|
@ -936,7 +929,8 @@ static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
|
|||
* out-of-order reads
|
||||
*/
|
||||
ret = fuse_do_readfolio(file, folio, off, len);
|
||||
iomap_finish_folio_read(folio, off, len, ret);
|
||||
if (!ret)
|
||||
iomap_finish_folio_read(folio, off, len, ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -398,7 +398,8 @@ static void iomap_read_init(struct folio *folio)
|
|||
* has already finished reading in the entire folio.
|
||||
*/
|
||||
spin_lock_irq(&ifs->state_lock);
|
||||
ifs->read_bytes_pending += len + 1;
|
||||
WARN_ON_ONCE(ifs->read_bytes_pending != 0);
|
||||
ifs->read_bytes_pending = len + 1;
|
||||
spin_unlock_irq(&ifs->state_lock);
|
||||
}
|
||||
}
|
||||
|
|
@ -414,43 +415,47 @@ static void iomap_read_init(struct folio *folio)
|
|||
*/
|
||||
static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
|
||||
{
|
||||
struct iomap_folio_state *ifs;
|
||||
struct iomap_folio_state *ifs = folio->private;
|
||||
|
||||
/*
|
||||
* If there are no bytes submitted, this means we are responsible for
|
||||
* unlocking the folio here, since no IO helper has taken ownership of
|
||||
* it.
|
||||
*/
|
||||
if (!bytes_submitted) {
|
||||
folio_unlock(folio);
|
||||
return;
|
||||
}
|
||||
|
||||
ifs = folio->private;
|
||||
if (ifs) {
|
||||
bool end_read, uptodate;
|
||||
/*
|
||||
* Subtract any bytes that were initially accounted to
|
||||
* read_bytes_pending but skipped for IO.
|
||||
* The +1 accounts for the bias we added in iomap_read_init().
|
||||
*/
|
||||
size_t bytes_not_submitted = folio_size(folio) + 1 -
|
||||
bytes_submitted;
|
||||
|
||||
spin_lock_irq(&ifs->state_lock);
|
||||
ifs->read_bytes_pending -= bytes_not_submitted;
|
||||
/*
|
||||
* If !ifs->read_bytes_pending, this means all pending reads
|
||||
* by the IO helper have already completed, which means we need
|
||||
* to end the folio read here. If ifs->read_bytes_pending != 0,
|
||||
* the IO helper will end the folio read.
|
||||
*/
|
||||
end_read = !ifs->read_bytes_pending;
|
||||
if (!ifs->read_bytes_pending) {
|
||||
WARN_ON_ONCE(bytes_submitted);
|
||||
end_read = true;
|
||||
} else {
|
||||
/*
|
||||
* Subtract any bytes that were initially accounted to
|
||||
* read_bytes_pending but skipped for IO. The +1
|
||||
* accounts for the bias we added in iomap_read_init().
|
||||
*/
|
||||
size_t bytes_not_submitted = folio_size(folio) + 1 -
|
||||
bytes_submitted;
|
||||
ifs->read_bytes_pending -= bytes_not_submitted;
|
||||
/*
|
||||
* If !ifs->read_bytes_pending, this means all pending
|
||||
* reads by the IO helper have already completed, which
|
||||
* means we need to end the folio read here. If
|
||||
* ifs->read_bytes_pending != 0, the IO helper will end
|
||||
* the folio read.
|
||||
*/
|
||||
end_read = !ifs->read_bytes_pending;
|
||||
}
|
||||
if (end_read)
|
||||
uptodate = ifs_is_fully_uptodate(folio, ifs);
|
||||
spin_unlock_irq(&ifs->state_lock);
|
||||
if (end_read)
|
||||
folio_end_read(folio, uptodate);
|
||||
} else if (!bytes_submitted) {
|
||||
/*
|
||||
* If there were no bytes submitted, this means we are
|
||||
* responsible for unlocking the folio here, since no IO helper
|
||||
* has taken ownership of it. If there were bytes submitted,
|
||||
* then the IO helper will end the read via
|
||||
* iomap_finish_folio_read().
|
||||
*/
|
||||
folio_unlock(folio);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -498,10 +503,10 @@ static int iomap_read_folio_iter(struct iomap_iter *iter,
|
|||
} else {
|
||||
if (!*bytes_submitted)
|
||||
iomap_read_init(folio);
|
||||
*bytes_submitted += plen;
|
||||
ret = ctx->ops->read_folio_range(iter, ctx, plen);
|
||||
if (ret)
|
||||
return ret;
|
||||
*bytes_submitted += plen;
|
||||
}
|
||||
|
||||
ret = iomap_iter_advance(iter, plen);
|
||||
|
|
|
|||
|
|
@ -495,9 +495,8 @@ struct iomap_read_ops {
|
|||
/*
|
||||
* Read in a folio range.
|
||||
*
|
||||
* The caller is responsible for calling iomap_finish_folio_read() after
|
||||
* reading in the folio range. This should be done even if an error is
|
||||
* encountered during the read.
|
||||
* If this succeeds, iomap_finish_folio_read() must be called after the
|
||||
* range is read in, regardless of whether the read succeeded or failed.
|
||||
*
|
||||
* Returns 0 on success or a negative error on failure.
|
||||
*/
|
||||
|
|
|
|||
Loading…
Reference in New Issue