blk-throttle: Introduce flag "BIO_TG_BPS_THROTTLED"

Subsequent patches will split the single queue into separate bps and iops
queues. To prevent IO that has already passed through the bps queue at a
single tg level from being counted toward bps wait time again, we introduce
"BIO_TG_BPS_THROTTLED" flag. Since throttle and QoS operate at different
levels, we reuse the value as "BIO_QOS_THROTTLED".

We set this flag when charge bps and clear it when charge iops, as the bio
will move to the upper-level tg or be dispatched.

This patch does not involve functional changes.

Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Zizhi Wo <wozizhi@huaweicloud.com>
Link: https://lore.kernel.org/r/20250506020935.655574-5-wozizhi@huaweicloud.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Zizhi Wo 2025-05-06 10:09:31 +08:00 committed by Jens Axboe
parent a404be5399
commit c4da7bf54b
2 changed files with 15 additions and 2 deletions

View File

@ -792,12 +792,16 @@ static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
unsigned int bio_size = throtl_bio_data_size(bio); unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */ /* Charge the bio to the group */
if (!bio_flagged(bio, BIO_BPS_THROTTLED)) if (!bio_flagged(bio, BIO_BPS_THROTTLED) &&
!bio_flagged(bio, BIO_TG_BPS_THROTTLED)) {
bio_set_flag(bio, BIO_TG_BPS_THROTTLED);
tg->bytes_disp[bio_data_dir(bio)] += bio_size; tg->bytes_disp[bio_data_dir(bio)] += bio_size;
}
} }
static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio) static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
{ {
bio_clear_flag(bio, BIO_TG_BPS_THROTTLED);
tg->io_disp[bio_data_dir(bio)]++; tg->io_disp[bio_data_dir(bio)]++;
} }
@ -823,7 +827,8 @@ static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio
/* no need to throttle if this bio's bytes have been accounted */ /* no need to throttle if this bio's bytes have been accounted */
if (bps_limit == U64_MAX || tg->flags & THROTL_TG_CANCELING || if (bps_limit == U64_MAX || tg->flags & THROTL_TG_CANCELING ||
bio_flagged(bio, BIO_BPS_THROTTLED)) bio_flagged(bio, BIO_BPS_THROTTLED) ||
bio_flagged(bio, BIO_TG_BPS_THROTTLED))
return 0; return 0;
tg_update_slice(tg, rw); tg_update_slice(tg, rw);

View File

@ -296,6 +296,14 @@ enum {
* of this bio. */ * of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
/*
* This bio has completed bps throttling at the single tg granularity,
* which is different from BIO_BPS_THROTTLED. When the bio is enqueued
* into the sq->queued of the upper tg, or is about to be dispatched,
* this flag needs to be cleared. Since blk-throttle and rq_qos are not
* on the same hierarchical level, reuse the value.
*/
BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED, BIO_REMAPPED,
BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */