block: accumulate memory segment gaps per bio

The blk-mq dma iterator has an optimization for requests that align to
the device's iommu merge boundary. This boundary may be larger than the
device's virtual boundary, but the code had been depending on that queue
limit to know ahead of time if the request is guaranteed to align to
that optimization.

Rather than rely on that queue limit, which many devices may not report,
save the lowest set bit of any boundary gap between each segment in the
bio while checking the segments. The request stores the value for
merging and quickly checking per io if the request can use iova
optimizations.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Keith Busch 2025-10-14 08:04:55 -07:00 committed by Jens Axboe
parent 0739c2c6a0
commit 2f6b2565d4
8 changed files with 77 additions and 5 deletions

View File

@ -253,6 +253,7 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
bio->bi_write_hint = 0; bio->bi_write_hint = 0;
bio->bi_write_stream = 0; bio->bi_write_stream = 0;
bio->bi_status = 0; bio->bi_status = 0;
bio->bi_bvec_gap_bit = 0;
bio->bi_iter.bi_sector = 0; bio->bi_iter.bi_sector = 0;
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_idx = 0; bio->bi_iter.bi_idx = 0;

View File

@ -459,6 +459,8 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
if (rq->bio) { if (rq->bio) {
if (!ll_back_merge_fn(rq, bio, nr_segs)) if (!ll_back_merge_fn(rq, bio, nr_segs))
return -EINVAL; return -EINVAL;
rq->phys_gap_bit = bio_seg_gap(rq->q, rq->biotail, bio,
rq->phys_gap_bit);
rq->biotail->bi_next = bio; rq->biotail->bi_next = bio;
rq->biotail = bio; rq->biotail = bio;
rq->__data_len += bio->bi_iter.bi_size; rq->__data_len += bio->bi_iter.bi_size;
@ -469,6 +471,7 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
rq->nr_phys_segments = nr_segs; rq->nr_phys_segments = nr_segs;
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
rq->__data_len = bio->bi_iter.bi_size; rq->__data_len = bio->bi_iter.bi_size;
rq->phys_gap_bit = bio->bi_bvec_gap_bit;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_append_bio); EXPORT_SYMBOL(blk_rq_append_bio);

View File

@ -302,6 +302,12 @@ static unsigned int bio_split_alignment(struct bio *bio,
return lim->logical_block_size; return lim->logical_block_size;
} }
static inline unsigned int bvec_seg_gap(struct bio_vec *bvprv,
struct bio_vec *bv)
{
return bv->bv_offset | (bvprv->bv_offset + bvprv->bv_len);
}
/** /**
* bio_split_io_at - check if and where to split a bio * bio_split_io_at - check if and where to split a bio
* @bio: [in] bio to be split * @bio: [in] bio to be split
@ -319,8 +325,8 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
unsigned *segs, unsigned max_bytes, unsigned len_align_mask) unsigned *segs, unsigned max_bytes, unsigned len_align_mask)
{ {
struct bio_vec bv, bvprv, *bvprvp = NULL; struct bio_vec bv, bvprv, *bvprvp = NULL;
unsigned nsegs = 0, bytes = 0, gaps = 0;
struct bvec_iter iter; struct bvec_iter iter;
unsigned nsegs = 0, bytes = 0;
bio_for_each_bvec(bv, bio, iter) { bio_for_each_bvec(bv, bio, iter) {
if (bv.bv_offset & lim->dma_alignment || if (bv.bv_offset & lim->dma_alignment ||
@ -331,8 +337,11 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
* If the queue doesn't support SG gaps and adding this * If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it. * offset would create a gap, disallow it.
*/ */
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset)) if (bvprvp) {
goto split; if (bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
goto split;
gaps |= bvec_seg_gap(bvprvp, &bv);
}
if (nsegs < lim->max_segments && if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes && bytes + bv.bv_len <= max_bytes &&
@ -350,6 +359,7 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
} }
*segs = nsegs; *segs = nsegs;
bio->bi_bvec_gap_bit = ffs(gaps);
return 0; return 0;
split: split:
if (bio->bi_opf & REQ_ATOMIC) if (bio->bi_opf & REQ_ATOMIC)
@ -385,6 +395,7 @@ int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
* big IO can be trival, disable iopoll when split needed. * big IO can be trival, disable iopoll when split needed.
*/ */
bio_clear_polled(bio); bio_clear_polled(bio);
bio->bi_bvec_gap_bit = ffs(gaps);
return bytes >> SECTOR_SHIFT; return bytes >> SECTOR_SHIFT;
} }
EXPORT_SYMBOL_GPL(bio_split_io_at); EXPORT_SYMBOL_GPL(bio_split_io_at);
@ -721,6 +732,21 @@ static bool blk_atomic_write_mergeable_rqs(struct request *rq,
return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC); return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
} }
u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
u8 gaps_bit)
{
struct bio_vec pb, nb;
gaps_bit = min_not_zero(gaps_bit, prev->bi_bvec_gap_bit);
gaps_bit = min_not_zero(gaps_bit, next->bi_bvec_gap_bit);
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
if (!biovec_phys_mergeable(q, &pb, &nb))
gaps_bit = min_not_zero(gaps_bit, ffs(bvec_seg_gap(&pb, &nb)));
return gaps_bit;
}
/* /*
* For non-mq, this has to be called with the request spinlock acquired. * For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held. * For mq with scheduling, the appropriate queue wide lock should be held.
@ -785,6 +811,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (next->start_time_ns < req->start_time_ns) if (next->start_time_ns < req->start_time_ns)
req->start_time_ns = next->start_time_ns; req->start_time_ns = next->start_time_ns;
req->phys_gap_bit = bio_seg_gap(req->q, req->biotail, next->bio,
min_not_zero(next->phys_gap_bit,
req->phys_gap_bit));
req->biotail->bi_next = next->bio; req->biotail->bi_next = next->bio;
req->biotail = next->biotail; req->biotail = next->biotail;
@ -908,6 +937,8 @@ enum bio_merge_status bio_attempt_back_merge(struct request *req,
if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
blk_zone_write_plug_bio_merged(bio); blk_zone_write_plug_bio_merged(bio);
req->phys_gap_bit = bio_seg_gap(req->q, req->biotail, bio,
req->phys_gap_bit);
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size; req->__data_len += bio->bi_iter.bi_size;
@ -942,6 +973,8 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
blk_update_mixed_merge(req, bio, true); blk_update_mixed_merge(req, bio, true);
req->phys_gap_bit = bio_seg_gap(req->q, bio, req->bio,
req->phys_gap_bit);
bio->bi_next = req->bio; bio->bi_next = req->bio;
req->bio = bio; req->bio = bio;

View File

@ -79,8 +79,7 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter,
static inline bool blk_can_dma_map_iova(struct request *req, static inline bool blk_can_dma_map_iova(struct request *req,
struct device *dma_dev) struct device *dma_dev)
{ {
return !((queue_virt_boundary(req->q) + 1) & return !(req_phys_gap_mask(req) & dma_get_merge_boundary(dma_dev));
dma_get_merge_boundary(dma_dev));
} }
static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec) static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)

View File

@ -376,6 +376,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->queuelist);
rq->q = q; rq->q = q;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->phys_gap_bit = 0;
INIT_HLIST_NODE(&rq->hash); INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node); RB_CLEAR_NODE(&rq->rb_node);
rq->tag = BLK_MQ_NO_TAG; rq->tag = BLK_MQ_NO_TAG;
@ -668,6 +669,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
goto out_queue_exit; goto out_queue_exit;
} }
rq->__data_len = 0; rq->__data_len = 0;
rq->phys_gap_bit = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
return rq; return rq;
@ -748,6 +750,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag); rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
blk_mq_rq_time_init(rq, alloc_time_ns); blk_mq_rq_time_init(rq, alloc_time_ns);
rq->__data_len = 0; rq->__data_len = 0;
rq->phys_gap_bit = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
return rq; return rq;
@ -2674,6 +2677,8 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
rq->__sector = bio->bi_iter.bi_sector; rq->__sector = bio->bi_iter.bi_sector;
rq->__data_len = bio->bi_iter.bi_size; rq->__data_len = bio->bi_iter.bi_size;
rq->phys_gap_bit = bio->bi_bvec_gap_bit;
rq->nr_phys_segments = nr_segs; rq->nr_phys_segments = nr_segs;
if (bio_integrity(bio)) if (bio_integrity(bio))
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
@ -3380,6 +3385,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
} }
rq->nr_phys_segments = rq_src->nr_phys_segments; rq->nr_phys_segments = rq_src->nr_phys_segments;
rq->nr_integrity_segments = rq_src->nr_integrity_segments; rq->nr_integrity_segments = rq_src->nr_integrity_segments;
rq->phys_gap_bit = rq_src->phys_gap_bit;
if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
goto free_and_out; goto free_and_out;

View File

@ -324,6 +324,8 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs); gfp_t gfp, struct bio_set *bs);
int bio_split_io_at(struct bio *bio, const struct queue_limits *lim, int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
unsigned *segs, unsigned max_bytes, unsigned len_align); unsigned *segs, unsigned max_bytes, unsigned len_align);
u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
u8 gaps_bit);
/** /**
* bio_next_split - get next @sectors from a bio, splitting if necessary * bio_next_split - get next @sectors from a bio, splitting if necessary

View File

@ -152,6 +152,14 @@ struct request {
unsigned short nr_phys_segments; unsigned short nr_phys_segments;
unsigned short nr_integrity_segments; unsigned short nr_integrity_segments;
/*
* The lowest set bit for address gaps between physical segments. This
* provides information necessary for dma optimization opprotunities,
* like for testing if the segments can be coalesced against the
* device's iommu granule.
*/
unsigned char phys_gap_bit;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION #ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *crypt_ctx; struct bio_crypt_ctx *crypt_ctx;
struct blk_crypto_keyslot *crypt_keyslot; struct blk_crypto_keyslot *crypt_keyslot;
@ -208,6 +216,14 @@ struct request {
void *end_io_data; void *end_io_data;
}; };
/*
* Returns a mask with all bits starting at req->phys_gap_bit set to 1.
*/
static inline unsigned long req_phys_gap_mask(const struct request *req)
{
return ~(((1 << req->phys_gap_bit) >> 1) - 1);
}
static inline enum req_op req_op(const struct request *req) static inline enum req_op req_op(const struct request *req)
{ {
return req->cmd_flags & REQ_OP_MASK; return req->cmd_flags & REQ_OP_MASK;

View File

@ -218,6 +218,18 @@ struct bio {
enum rw_hint bi_write_hint; enum rw_hint bi_write_hint;
u8 bi_write_stream; u8 bi_write_stream;
blk_status_t bi_status; blk_status_t bi_status;
/*
* The bvec gap bit indicates the lowest set bit in any address offset
* between all bi_io_vecs. This field is initialized only after the bio
* is split to the hardware limits (see bio_split_io_at()). The value
* may be used to consider DMA optimization when performing that
* mapping. The value is compared to a power of two mask where the
* result depends on any bit set within the mask, so saving the lowest
* bit is sufficient to know if any segment gap collides with the mask.
*/
u8 bi_bvec_gap_bit;
atomic_t __bi_remaining; atomic_t __bi_remaining;
struct bvec_iter bi_iter; struct bvec_iter bi_iter;