Merge patch series "enable iomap dio write completions from interrupt context v2"

Christoph Hellwig <hch@lst.de> says:

Currently iomap defers all write completions to interrupt context.  This
was based on my assumption that no one cares about the latency of those
to simplify the code vs the old direct-io.c.  It turns out someone cared,
as Avi reported a lot of context switches with ScyllaDB, which at least
in older kernels with workqueue scheduling issues caused really high
tail latencies.

Fortunately allowing the direct completions is pretty easy with all the
other iomap changes we had since.

While doing this I've also found dead code which gets removed (patch 1)
and an incorrect assumption in zonefs that read completions are called
in user context, which it assumes for it's error handling.  Fix this by
always calling error completions from user context (patch 2).
Against the vfs-6.19.iomap branch.

* patches from https://patch.msgid.link/20251113170633.1453259-1-hch@lst.de:
  iomap: invert the polarity of IOMAP_DIO_INLINE_COMP
  iomap: support write completions from interrupt context
  iomap: rework REQ_FUA selection
  iomap: always run error completions in user context
  fs, iomap: remove IOCB_DIO_CALLER_COMP

Link: https://patch.msgid.link/20251113170633.1453259-1-hch@lst.de
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2025-11-14 12:47:01 +01:00
commit 5ec58e6acd
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
5 changed files with 115 additions and 153 deletions

View File

@ -490,10 +490,6 @@ These ``struct kiocb`` flags are significant for direct I/O with iomap:
Only meaningful for asynchronous I/O, and only if the entire I/O can Only meaningful for asynchronous I/O, and only if the entire I/O can
be issued as a single ``struct bio``. be issued as a single ``struct bio``.
* ``IOCB_DIO_CALLER_COMP``: Try to run I/O completion from the caller's
process context.
See ``linux/fs.h`` for more details.
Filesystems should call ``iomap_dio_rw`` from ``->read_iter`` and Filesystems should call ``iomap_dio_rw`` from ``->read_iter`` and
``->write_iter``, and set ``FMODE_CAN_ODIRECT`` in the ``->open`` ``->write_iter``, and set ``FMODE_CAN_ODIRECT`` in the ``->open``
function for the file. function for the file.

View File

@ -227,12 +227,6 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
!(file->f_mode & FMODE_CAN_ODIRECT)) !(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL; return -EINVAL;
/*
* Stacked filesystems don't support deferred completions, don't copy
* this property in case it is set by the issuer.
*/
flags &= ~IOCB_DIO_CALLER_COMP;
old_cred = override_creds(ctx->cred); old_cred = override_creds(ctx->cred);
if (is_sync_kiocb(iocb)) { if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags); rwf_t rwf = iocb_to_rw_flags(flags);

View File

@ -16,9 +16,8 @@
* Private flags for iomap_dio, must not overlap with the public ones in * Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h: * iomap.h:
*/ */
#define IOMAP_DIO_NO_INVALIDATE (1U << 25) #define IOMAP_DIO_NO_INVALIDATE (1U << 26)
#define IOMAP_DIO_CALLER_COMP (1U << 26) #define IOMAP_DIO_COMP_WORK (1U << 27)
#define IOMAP_DIO_INLINE_COMP (1U << 27)
#define IOMAP_DIO_WRITE_THROUGH (1U << 28) #define IOMAP_DIO_WRITE_THROUGH (1U << 28)
#define IOMAP_DIO_NEED_SYNC (1U << 29) #define IOMAP_DIO_NEED_SYNC (1U << 29)
#define IOMAP_DIO_WRITE (1U << 30) #define IOMAP_DIO_WRITE (1U << 30)
@ -140,11 +139,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
} }
EXPORT_SYMBOL_GPL(iomap_dio_complete); EXPORT_SYMBOL_GPL(iomap_dio_complete);
static ssize_t iomap_dio_deferred_complete(void *data)
{
return iomap_dio_complete(data);
}
static void iomap_dio_complete_work(struct work_struct *work) static void iomap_dio_complete_work(struct work_struct *work)
{ {
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
@ -179,33 +173,33 @@ static void iomap_dio_done(struct iomap_dio *dio)
WRITE_ONCE(dio->submit.waiter, NULL); WRITE_ONCE(dio->submit.waiter, NULL);
blk_wake_io_task(waiter); blk_wake_io_task(waiter);
} else if (dio->flags & IOMAP_DIO_INLINE_COMP) { return;
WRITE_ONCE(iocb->private, NULL); }
iomap_dio_complete_work(&dio->aio.work);
} else if (dio->flags & IOMAP_DIO_CALLER_COMP) {
/*
* If this dio is flagged with IOMAP_DIO_CALLER_COMP, then
* schedule our completion that way to avoid an async punt to a
* workqueue.
*/
/* only polled IO cares about private cleared */
iocb->private = dio;
iocb->dio_complete = iomap_dio_deferred_complete;
/* /*
* Invoke ->ki_complete() directly. We've assigned our * Always run error completions in user context. These are not
* dio_complete callback handler, and since the issuer set * performance critical and some code relies on taking sleeping locks
* IOCB_DIO_CALLER_COMP, we know their ki_complete handler will * for error handling.
* notice ->dio_complete being set and will defer calling that */
* handler until it can be done from a safe task context. if (dio->error)
* dio->flags |= IOMAP_DIO_COMP_WORK;
* Note that the 'res' being passed in here is not important
* for this case. The actual completion value of the request /*
* will be gotten from dio_complete when that is run by the * Never invalidate pages from this context to avoid deadlocks with
* issuer. * buffered I/O completions when called from the ioend workqueue,
*/ * or avoid sleeping when called directly from ->bi_end_io.
iocb->ki_complete(iocb, 0); * Tough luck if you hit the tiny race with someone dirtying the range
} else { * right between this check and the actual completion.
*/
if ((dio->flags & IOMAP_DIO_WRITE) &&
!(dio->flags & IOMAP_DIO_COMP_WORK)) {
if (dio->iocb->ki_filp->f_mapping->nrpages)
dio->flags |= IOMAP_DIO_COMP_WORK;
else
dio->flags |= IOMAP_DIO_NO_INVALIDATE;
}
if (dio->flags & IOMAP_DIO_COMP_WORK) {
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(iocb->ki_filp);
/* /*
@ -216,7 +210,11 @@ static void iomap_dio_done(struct iomap_dio *dio)
*/ */
INIT_WORK(&dio->aio.work, iomap_dio_complete_work); INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
return;
} }
WRITE_ONCE(iocb->private, NULL);
iomap_dio_complete_work(&dio->aio.work);
} }
void iomap_dio_bio_end_io(struct bio *bio) void iomap_dio_bio_end_io(struct bio *bio)
@ -252,16 +250,9 @@ u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend)
/* /*
* Try to avoid another context switch for the completion given * Try to avoid another context switch for the completion given
* that we are already called from the ioend completion * that we are already called from the ioend completion
* workqueue, but never invalidate pages from this thread to * workqueue.
* avoid deadlocks with buffered I/O completions. Tough luck if
* you hit the tiny race with someone dirtying the range now
* between this check and the actual completion.
*/ */
if (!dio->iocb->ki_filp->f_mapping->nrpages) { dio->flags &= ~IOMAP_DIO_COMP_WORK;
dio->flags |= IOMAP_DIO_INLINE_COMP;
dio->flags |= IOMAP_DIO_NO_INVALIDATE;
}
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
iomap_dio_done(dio); iomap_dio_done(dio);
} }
@ -306,23 +297,6 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
return 0; return 0;
} }
/*
* Use a FUA write if we need datasync semantics and this is a pure data I/O
* that doesn't require any metadata updates (including after I/O completion
* such as unwritten extent conversion) and the underlying device either
* doesn't have a volatile write cache or supports FUA.
* This allows us to avoid cache flushes on I/O completion.
*/
static inline bool iomap_dio_can_use_fua(const struct iomap *iomap,
struct iomap_dio *dio)
{
if (iomap->flags & (IOMAP_F_SHARED | IOMAP_F_DIRTY))
return false;
if (!(dio->flags & IOMAP_DIO_WRITE_THROUGH))
return false;
return !bdev_write_cache(iomap->bdev) || bdev_fua(iomap->bdev);
}
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio) static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{ {
const struct iomap *iomap = &iter->iomap; const struct iomap *iomap = &iter->iomap;
@ -351,7 +325,24 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
return -EINVAL; return -EINVAL;
if (dio->flags & IOMAP_DIO_WRITE) { if (dio->flags & IOMAP_DIO_WRITE) {
bio_opf |= REQ_OP_WRITE; bool need_completion_work = true;
switch (iomap->type) {
case IOMAP_MAPPED:
/*
* Directly mapped I/O does not inherently need to do
* work at I/O completion time. But there are various
* cases below where this will get set again.
*/
need_completion_work = false;
break;
case IOMAP_UNWRITTEN:
dio->flags |= IOMAP_DIO_UNWRITTEN;
need_zeroout = true;
break;
default:
break;
}
if (iomap->flags & IOMAP_F_ATOMIC_BIO) { if (iomap->flags & IOMAP_F_ATOMIC_BIO) {
/* /*
@ -364,35 +355,54 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
bio_opf |= REQ_ATOMIC; bio_opf |= REQ_ATOMIC;
} }
if (iomap->type == IOMAP_UNWRITTEN) { if (iomap->flags & IOMAP_F_SHARED) {
dio->flags |= IOMAP_DIO_UNWRITTEN; /*
* Unsharing of needs to update metadata at I/O
* completion time.
*/
need_completion_work = true;
dio->flags |= IOMAP_DIO_COW;
}
if (iomap->flags & IOMAP_F_NEW) {
/*
* Newly allocated blocks might need recording in
* metadata at I/O completion time.
*/
need_completion_work = true;
need_zeroout = true; need_zeroout = true;
} }
if (iomap->flags & IOMAP_F_SHARED) /*
dio->flags |= IOMAP_DIO_COW; * Use a FUA write if we need datasync semantics and this is a
* pure overwrite that doesn't require any metadata updates.
if (iomap->flags & IOMAP_F_NEW) *
need_zeroout = true; * This allows us to avoid cache flushes on I/O completion.
else if (iomap->type == IOMAP_MAPPED && */
iomap_dio_can_use_fua(iomap, dio)) if (dio->flags & IOMAP_DIO_WRITE_THROUGH) {
bio_opf |= REQ_FUA; if (!need_completion_work &&
!(iomap->flags & IOMAP_F_DIRTY) &&
if (!(bio_opf & REQ_FUA)) (!bdev_write_cache(iomap->bdev) ||
dio->flags &= ~IOMAP_DIO_WRITE_THROUGH; bdev_fua(iomap->bdev)))
bio_opf |= REQ_FUA;
else
dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
}
/* /*
* We can only do deferred completion for pure overwrites that * We can only do inline completion for pure overwrites that
* don't require additional I/O at completion time. * don't require additional I/O at completion time.
* *
* This rules out writes that need zeroing or extent conversion, * This rules out writes that need zeroing or metdata updates to
* extend the file size, or issue metadata I/O or cache flushes * convert unwritten or shared extents.
* during completion processing. *
* Writes that extend i_size are also not supported, but this is
* handled in __iomap_dio_rw().
*/ */
if (need_zeroout || (pos >= i_size_read(inode)) || if (need_completion_work)
((dio->flags & IOMAP_DIO_NEED_SYNC) && dio->flags |= IOMAP_DIO_COMP_WORK;
!(bio_opf & REQ_FUA)))
dio->flags &= ~IOMAP_DIO_CALLER_COMP; bio_opf |= REQ_OP_WRITE;
} else { } else {
bio_opf |= REQ_OP_READ; bio_opf |= REQ_OP_READ;
} }
@ -413,7 +423,7 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
* ones we set for inline and deferred completions. If none of those * ones we set for inline and deferred completions. If none of those
* are available for this IO, clear the polled flag. * are available for this IO, clear the polled flag.
*/ */
if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP))) if (dio->flags & IOMAP_DIO_COMP_WORK)
dio->iocb->ki_flags &= ~IOCB_HIPRI; dio->iocb->ki_flags &= ~IOCB_HIPRI;
if (need_zeroout) { if (need_zeroout) {
@ -653,9 +663,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags |= IOMAP_DIO_FSBLOCK_ALIGNED; dio->flags |= IOMAP_DIO_FSBLOCK_ALIGNED;
if (iov_iter_rw(iter) == READ) { if (iov_iter_rw(iter) == READ) {
/* reads can always complete inline */
dio->flags |= IOMAP_DIO_INLINE_COMP;
if (iomi.pos >= dio->i_size) if (iomi.pos >= dio->i_size)
goto out_free_dio; goto out_free_dio;
@ -669,15 +676,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_WRITE; iomi.flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE; dio->flags |= IOMAP_DIO_WRITE;
/*
* Flag as supporting deferred completions, if the issuer
* groks it. This can avoid a workqueue punt for writes.
* We may later clear this flag if we need to do other IO
* as part of this IO completion.
*/
if (iocb->ki_flags & IOCB_DIO_CALLER_COMP)
dio->flags |= IOMAP_DIO_CALLER_COMP;
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) { if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
ret = -EAGAIN; ret = -EAGAIN;
if (iomi.pos >= dio->i_size || if (iomi.pos >= dio->i_size ||
@ -706,6 +704,12 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags |= IOMAP_DIO_WRITE_THROUGH; dio->flags |= IOMAP_DIO_WRITE_THROUGH;
} }
/*
* i_size updates must to happen from process context.
*/
if (iomi.pos + iomi.len > dio->i_size)
dio->flags |= IOMAP_DIO_COMP_WORK;
/* /*
* Try to invalidate cache pages for the range we are writing. * Try to invalidate cache pages for the range we are writing.
* If this invalidation fails, let the caller fall back to * If this invalidation fails, let the caller fall back to
@ -778,9 +782,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* If all the writes we issued were already written through to the * If all the writes we issued were already written through to the
* media, we don't need to flush the cache on IO completion. Clear the * media, we don't need to flush the cache on IO completion. Clear the
* sync flag for this case. * sync flag for this case.
*
* Otherwise clear the inline completion flag if any sync work is
* needed, as that needs to be performed from process context.
*/ */
if (dio->flags & IOMAP_DIO_WRITE_THROUGH) if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
dio->flags &= ~IOMAP_DIO_NEED_SYNC; dio->flags &= ~IOMAP_DIO_NEED_SYNC;
else if (dio->flags & IOMAP_DIO_NEED_SYNC)
dio->flags |= IOMAP_DIO_COMP_WORK;
/* /*
* We are about to drop our additional submission reference, which * We are about to drop our additional submission reference, which

View File

@ -367,23 +367,9 @@ struct readahead_control;
#define IOCB_NOIO (1 << 20) #define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */ /* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21) #define IOCB_ALLOC_CACHE (1 << 21)
/*
* IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
* iocb completion can be passed back to the owner for execution from a safe
* context rather than needing to be punted through a workqueue. If this
* flag is set, the bio completion handling may set iocb->dio_complete to a
* handler function and iocb->private to context information for that handler.
* The issuer should call the handler with that context information from task
* context to complete the processing of the iocb. Note that while this
* provides a task context for the dio_complete() callback, it should only be
* used on the completion side for non-IO generating completions. It's fine to
* call blocking functions from this callback, but they should not wait for
* unrelated IO (like cache flushing, new IO generation, etc).
*/
#define IOCB_DIO_CALLER_COMP (1 << 22)
/* kiocb is a read or write operation submitted by fs/aio.c. */ /* kiocb is a read or write operation submitted by fs/aio.c. */
#define IOCB_AIO_RW (1 << 23) #define IOCB_AIO_RW (1 << 22)
#define IOCB_HAS_METADATA (1 << 24) #define IOCB_HAS_METADATA (1 << 23)
/* for use in trace events */ /* for use in trace events */
#define TRACE_IOCB_STRINGS \ #define TRACE_IOCB_STRINGS \
@ -400,7 +386,6 @@ struct readahead_control;
{ IOCB_WAITQ, "WAITQ" }, \ { IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \ { IOCB_NOIO, "NOIO" }, \
{ IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
{ IOCB_DIO_CALLER_COMP, "CALLER_COMP" }, \
{ IOCB_AIO_RW, "AIO_RW" }, \ { IOCB_AIO_RW, "AIO_RW" }, \
{ IOCB_HAS_METADATA, "AIO_HAS_METADATA" } { IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
@ -412,23 +397,13 @@ struct kiocb {
int ki_flags; int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */ u16 ki_ioprio; /* See linux/ioprio.h */
u8 ki_write_stream; u8 ki_write_stream;
union {
/* /*
* Only used for async buffered reads, where it denotes the * Only used for async buffered reads, where it denotes the page
* page waitqueue associated with completing the read. Valid * waitqueue associated with completing the read.
* IFF IOCB_WAITQ is set. * Valid IFF IOCB_WAITQ is set.
*/ */
struct wait_page_queue *ki_waitq; struct wait_page_queue *ki_waitq;
/*
* Can be used for O_DIRECT IO, where the completion handling
* is punted back to the issuer of the IO. May only be set
* if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
* must then check for presence of this handler when ki_complete
* is invoked. The data passed in to this handler must be
* assigned to ->private when dio_complete is assigned.
*/
ssize_t (*dio_complete)(void *data);
};
}; };
static inline bool is_sync_kiocb(struct kiocb *kiocb) static inline bool is_sync_kiocb(struct kiocb *kiocb)

View File

@ -277,7 +277,6 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} else { } else {
rw->kiocb.ki_ioprio = get_current_ioprio(); rw->kiocb.ki_ioprio = get_current_ioprio();
} }
rw->kiocb.dio_complete = NULL;
rw->kiocb.ki_flags = 0; rw->kiocb.ki_flags = 0;
rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream); rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream);
@ -566,15 +565,6 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw) void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
long res = kiocb->dio_complete(rw->kiocb.private);
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
}
io_req_io_end(req); io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
@ -589,10 +579,8 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb); struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
struct io_kiocb *req = cmd_to_io_kiocb(rw); struct io_kiocb *req = cmd_to_io_kiocb(rw);
if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) { __io_complete_rw_common(req, res);
__io_complete_rw_common(req, res); io_req_set_res(req, io_fixup_rw_res(req, res), 0);
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
}
req->io_task_work.func = io_req_rw_complete; req->io_task_work.func = io_req_rw_complete;
__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
} }