mirror of https://github.com/torvalds/linux.git
blk-throttle: Split throtl_charge_bio() into bps and iops functions
Split throtl_charge_bio() to facilitate subsequent patches that will separately charge bps and iops after queue separation. Signed-off-by: Zizhi Wo <wozizhi@huawei.com> Reviewed-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: Zizhi Wo <wozizhi@huaweicloud.com> Link: https://lore.kernel.org/r/20250506020935.655574-4-wozizhi@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3660cd4228
commit
a404be5399
|
|
@ -787,6 +787,20 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||
return jiffy_wait;
|
||||
}
|
||||
|
||||
static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
{
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
/* Charge the bio to the group */
|
||||
if (!bio_flagged(bio, BIO_BPS_THROTTLED))
|
||||
tg->bytes_disp[bio_data_dir(bio)] += bio_size;
|
||||
}
|
||||
|
||||
static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
{
|
||||
tg->io_disp[bio_data_dir(bio)]++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If previous slice expired, start a new one otherwise renew/extend existing
|
||||
* slice to make sure it is at least throtl_slice interval long since now. New
|
||||
|
|
@ -859,18 +873,6 @@ static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
|
|||
return max(bps_wait, iops_wait);
|
||||
}
|
||||
|
||||
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
{
|
||||
bool rw = bio_data_dir(bio);
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
/* Charge the bio to the group */
|
||||
if (!bio_flagged(bio, BIO_BPS_THROTTLED))
|
||||
tg->bytes_disp[rw] += bio_size;
|
||||
|
||||
tg->io_disp[rw]++;
|
||||
}
|
||||
|
||||
/**
|
||||
* throtl_add_bio_tg - add a bio to the specified throtl_grp
|
||||
* @bio: bio to add
|
||||
|
|
@ -957,7 +959,8 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
|
|||
bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
|
||||
sq->nr_queued[rw]--;
|
||||
|
||||
throtl_charge_bio(tg, bio);
|
||||
throtl_charge_bps_bio(tg, bio);
|
||||
throtl_charge_iops_bio(tg, bio);
|
||||
|
||||
/*
|
||||
* If our parent is another tg, we just need to transfer @bio to
|
||||
|
|
@ -1684,7 +1687,8 @@ bool __blk_throtl_bio(struct bio *bio)
|
|||
while (true) {
|
||||
if (tg_within_limit(tg, bio, rw)) {
|
||||
/* within limits, let's charge and dispatch directly */
|
||||
throtl_charge_bio(tg, bio);
|
||||
throtl_charge_bps_bio(tg, bio);
|
||||
throtl_charge_iops_bio(tg, bio);
|
||||
|
||||
/*
|
||||
* We need to trim slice even when bios are not being
|
||||
|
|
@ -1707,7 +1711,8 @@ bool __blk_throtl_bio(struct bio *bio)
|
|||
* control algorithm is adaptive, and extra IO bytes
|
||||
* will be throttled for paying the debt
|
||||
*/
|
||||
throtl_charge_bio(tg, bio);
|
||||
throtl_charge_bps_bio(tg, bio);
|
||||
throtl_charge_iops_bio(tg, bio);
|
||||
} else {
|
||||
/* if above limits, break to queue */
|
||||
break;
|
||||
|
|
|
|||
Loading…
Reference in New Issue