mirror of https://github.com/torvalds/linux.git
block/blk-throttle: Remove throtl_slice from struct throtl_data
throtl_slice is now a constant. Remove the variable and use the constant directly where needed. Cc: Yu Kuai <yukuai@kernel.org> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Khazhismel Kumykov <khazhy@google.com> Reviewed-by: Yu Kuai <yukuai@fnnas.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
20d0b359c7
commit
6483faa393
|
|
@ -38,8 +38,6 @@ struct throtl_data
|
||||||
/* Total Number of queued bios on READ and WRITE lists */
|
/* Total Number of queued bios on READ and WRITE lists */
|
||||||
unsigned int nr_queued[2];
|
unsigned int nr_queued[2];
|
||||||
|
|
||||||
unsigned int throtl_slice;
|
|
||||||
|
|
||||||
/* Work for dispatching throttled bios */
|
/* Work for dispatching throttled bios */
|
||||||
struct work_struct dispatch_work;
|
struct work_struct dispatch_work;
|
||||||
};
|
};
|
||||||
|
|
@ -446,7 +444,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
|
||||||
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
|
static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
|
||||||
unsigned long expires)
|
unsigned long expires)
|
||||||
{
|
{
|
||||||
unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
|
unsigned long max_expire = jiffies + 8 * DFL_THROTL_SLICE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since we are adjusting the throttle limit dynamically, the sleep
|
* Since we are adjusting the throttle limit dynamically, the sleep
|
||||||
|
|
@ -514,7 +512,7 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
|
||||||
if (time_after(start, tg->slice_start[rw]))
|
if (time_after(start, tg->slice_start[rw]))
|
||||||
tg->slice_start[rw] = start;
|
tg->slice_start[rw] = start;
|
||||||
|
|
||||||
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
|
tg->slice_end[rw] = jiffies + DFL_THROTL_SLICE;
|
||||||
throtl_log(&tg->service_queue,
|
throtl_log(&tg->service_queue,
|
||||||
"[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
|
"[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
|
||||||
rw == READ ? 'R' : 'W', tg->slice_start[rw],
|
rw == READ ? 'R' : 'W', tg->slice_start[rw],
|
||||||
|
|
@ -529,7 +527,7 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
|
||||||
tg->io_disp[rw] = 0;
|
tg->io_disp[rw] = 0;
|
||||||
}
|
}
|
||||||
tg->slice_start[rw] = jiffies;
|
tg->slice_start[rw] = jiffies;
|
||||||
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
|
tg->slice_end[rw] = jiffies + DFL_THROTL_SLICE;
|
||||||
|
|
||||||
throtl_log(&tg->service_queue,
|
throtl_log(&tg->service_queue,
|
||||||
"[%c] new slice start=%lu end=%lu jiffies=%lu",
|
"[%c] new slice start=%lu end=%lu jiffies=%lu",
|
||||||
|
|
@ -540,7 +538,7 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
|
||||||
static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
|
static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
|
||||||
unsigned long jiffy_end)
|
unsigned long jiffy_end)
|
||||||
{
|
{
|
||||||
tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
|
tg->slice_end[rw] = roundup(jiffy_end, DFL_THROTL_SLICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
|
static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
|
||||||
|
|
@ -671,12 +669,12 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
|
||||||
* sooner, then we need to reduce slice_end. A high bogus slice_end
|
* sooner, then we need to reduce slice_end. A high bogus slice_end
|
||||||
* is bad because it does not allow new slice to start.
|
* is bad because it does not allow new slice to start.
|
||||||
*/
|
*/
|
||||||
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
|
throtl_set_slice_end(tg, rw, jiffies + DFL_THROTL_SLICE);
|
||||||
|
|
||||||
time_elapsed = rounddown(jiffies - tg->slice_start[rw],
|
time_elapsed = rounddown(jiffies - tg->slice_start[rw],
|
||||||
tg->td->throtl_slice);
|
DFL_THROTL_SLICE);
|
||||||
/* Don't trim slice until at least 2 slices are used */
|
/* Don't trim slice until at least 2 slices are used */
|
||||||
if (time_elapsed < tg->td->throtl_slice * 2)
|
if (time_elapsed < DFL_THROTL_SLICE * 2)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -687,7 +685,7 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
|
||||||
* lower rate than expected. Therefore, other than the above rounddown,
|
* lower rate than expected. Therefore, other than the above rounddown,
|
||||||
* one extra slice is preserved for deviation.
|
* one extra slice is preserved for deviation.
|
||||||
*/
|
*/
|
||||||
time_elapsed -= tg->td->throtl_slice;
|
time_elapsed -= DFL_THROTL_SLICE;
|
||||||
bytes_trim = throtl_trim_bps(tg, rw, time_elapsed);
|
bytes_trim = throtl_trim_bps(tg, rw, time_elapsed);
|
||||||
io_trim = throtl_trim_iops(tg, rw, time_elapsed);
|
io_trim = throtl_trim_iops(tg, rw, time_elapsed);
|
||||||
if (!bytes_trim && !io_trim)
|
if (!bytes_trim && !io_trim)
|
||||||
|
|
@ -697,7 +695,7 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
|
||||||
|
|
||||||
throtl_log(&tg->service_queue,
|
throtl_log(&tg->service_queue,
|
||||||
"[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
|
"[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
|
||||||
rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
|
rw == READ ? 'R' : 'W', time_elapsed / DFL_THROTL_SLICE,
|
||||||
bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
|
bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
|
||||||
jiffies);
|
jiffies);
|
||||||
}
|
}
|
||||||
|
|
@ -768,7 +766,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
|
||||||
jiffy_elapsed = jiffies - tg->slice_start[rw];
|
jiffy_elapsed = jiffies - tg->slice_start[rw];
|
||||||
|
|
||||||
/* Round up to the next throttle slice, wait time must be nonzero */
|
/* Round up to the next throttle slice, wait time must be nonzero */
|
||||||
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
|
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, DFL_THROTL_SLICE);
|
||||||
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd);
|
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd);
|
||||||
if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
|
if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -794,9 +792,9 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
||||||
|
|
||||||
/* Slice has just started. Consider one slice interval */
|
/* Slice has just started. Consider one slice interval */
|
||||||
if (!jiffy_elapsed)
|
if (!jiffy_elapsed)
|
||||||
jiffy_elapsed_rnd = tg->td->throtl_slice;
|
jiffy_elapsed_rnd = DFL_THROTL_SLICE;
|
||||||
|
|
||||||
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
|
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, DFL_THROTL_SLICE);
|
||||||
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd);
|
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd);
|
||||||
/* Need to consider the case of bytes_allowed overflow. */
|
/* Need to consider the case of bytes_allowed overflow. */
|
||||||
if ((bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
|
if ((bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
|
||||||
|
|
@ -848,7 +846,7 @@ static void tg_update_slice(struct throtl_grp *tg, bool rw)
|
||||||
sq_queued(&tg->service_queue, rw) == 0)
|
sq_queued(&tg->service_queue, rw) == 0)
|
||||||
throtl_start_new_slice(tg, rw, true);
|
throtl_start_new_slice(tg, rw, true);
|
||||||
else
|
else
|
||||||
throtl_extend_slice(tg, rw, jiffies + tg->td->throtl_slice);
|
throtl_extend_slice(tg, rw, jiffies + DFL_THROTL_SLICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio)
|
static unsigned long tg_dispatch_bps_time(struct throtl_grp *tg, struct bio *bio)
|
||||||
|
|
@ -1333,12 +1331,8 @@ static int blk_throtl_init(struct gendisk *disk)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
q->td = NULL;
|
q->td = NULL;
|
||||||
kfree(td);
|
kfree(td);
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
td->throtl_slice = DFL_THROTL_SLICE;
|
|
||||||
|
|
||||||
out:
|
|
||||||
blk_mq_unquiesce_queue(disk->queue);
|
blk_mq_unquiesce_queue(disk->queue);
|
||||||
blk_mq_unfreeze_queue(disk->queue, memflags);
|
blk_mq_unfreeze_queue(disk->queue, memflags);
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue