mirror of https://github.com/torvalds/linux.git
vdso/vsyscall: Avoid slow division loop in auxiliary clock update
The call to __iter_div_u64_rem() in vdso_time_update_aux() is a wrapper
around subtraction. It cannot be used to divide large numbers, as that
introduces long, computationally expensive delays. A regular u64 division
is also not possible in the timekeeper update path as it can be too slow.
Instead of splitting the ktime_t offset into into second and subsecond
components during the timekeeper update fast-path, do it together with the
adjustment of tk->offs_aux in the slow-path. Equivalent to the handling of
offs_boot and monotonic_to_boot.
Reuse the storage of monotonic_to_boot for the new field, as it is not used
by auxiliary timekeepers.
Fixes: 380b84e168 ("vdso/vsyscall: Update auxiliary clock data in the datapage")
Reported-by: Miroslav Lichvar <mlichvar@redhat.com>
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250825-vdso-auxclock-division-v1-1-a1d32a16a313@linutronix.de
Closes: https://lore.kernel.org/lkml/aKwsNNWsHJg8IKzj@localhost/
This commit is contained in:
parent
b320789d68
commit
762af5a2aa
|
|
@ -76,6 +76,7 @@ struct tk_read_base {
|
||||||
* @cs_was_changed_seq: The sequence number of clocksource change events
|
* @cs_was_changed_seq: The sequence number of clocksource change events
|
||||||
* @clock_valid: Indicator for valid clock
|
* @clock_valid: Indicator for valid clock
|
||||||
* @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
|
* @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
|
||||||
|
* @monotonic_to_aux: CLOCK_MONOTONIC to CLOCK_AUX offset
|
||||||
* @cycle_interval: Number of clock cycles in one NTP interval
|
* @cycle_interval: Number of clock cycles in one NTP interval
|
||||||
* @xtime_interval: Number of clock shifted nano seconds in one NTP
|
* @xtime_interval: Number of clock shifted nano seconds in one NTP
|
||||||
* interval.
|
* interval.
|
||||||
|
|
@ -117,6 +118,9 @@ struct tk_read_base {
|
||||||
* @offs_aux is used by the auxiliary timekeepers which do not utilize any
|
* @offs_aux is used by the auxiliary timekeepers which do not utilize any
|
||||||
* of the regular timekeeper offset fields.
|
* of the regular timekeeper offset fields.
|
||||||
*
|
*
|
||||||
|
* @monotonic_to_aux is a timespec64 representation of @offs_aux to
|
||||||
|
* accelerate the VDSO update for CLOCK_AUX.
|
||||||
|
*
|
||||||
* The cacheline ordering of the structure is optimized for in kernel usage of
|
* The cacheline ordering of the structure is optimized for in kernel usage of
|
||||||
* the ktime_get() and ktime_get_ts64() family of time accessors. Struct
|
* the ktime_get() and ktime_get_ts64() family of time accessors. Struct
|
||||||
* timekeeper is prepended in the core timekeeping code with a sequence count,
|
* timekeeper is prepended in the core timekeeping code with a sequence count,
|
||||||
|
|
@ -159,7 +163,10 @@ struct timekeeper {
|
||||||
u8 cs_was_changed_seq;
|
u8 cs_was_changed_seq;
|
||||||
u8 clock_valid;
|
u8 clock_valid;
|
||||||
|
|
||||||
struct timespec64 monotonic_to_boot;
|
union {
|
||||||
|
struct timespec64 monotonic_to_boot;
|
||||||
|
struct timespec64 monotonic_to_aux;
|
||||||
|
};
|
||||||
|
|
||||||
u64 cycle_interval;
|
u64 cycle_interval;
|
||||||
u64 xtime_interval;
|
u64 xtime_interval;
|
||||||
|
|
|
||||||
|
|
@ -83,6 +83,12 @@ static inline bool tk_is_aux(const struct timekeeper *tk)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline void tk_update_aux_offs(struct timekeeper *tk, ktime_t offs)
|
||||||
|
{
|
||||||
|
tk->offs_aux = offs;
|
||||||
|
tk->monotonic_to_aux = ktime_to_timespec64(offs);
|
||||||
|
}
|
||||||
|
|
||||||
/* flag for if timekeeping is suspended */
|
/* flag for if timekeeping is suspended */
|
||||||
int __read_mostly timekeeping_suspended;
|
int __read_mostly timekeeping_suspended;
|
||||||
|
|
||||||
|
|
@ -1506,7 +1512,7 @@ static int __timekeeping_inject_offset(struct tk_data *tkd, const struct timespe
|
||||||
timekeeping_restore_shadow(tkd);
|
timekeeping_restore_shadow(tkd);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
tks->offs_aux = offs;
|
tk_update_aux_offs(tks, offs);
|
||||||
}
|
}
|
||||||
|
|
||||||
timekeeping_update_from_shadow(tkd, TK_UPDATE_ALL);
|
timekeeping_update_from_shadow(tkd, TK_UPDATE_ALL);
|
||||||
|
|
@ -2937,7 +2943,7 @@ static int aux_clock_set(const clockid_t id, const struct timespec64 *tnew)
|
||||||
* xtime ("realtime") is not applicable for auxiliary clocks and
|
* xtime ("realtime") is not applicable for auxiliary clocks and
|
||||||
* kept in sync with "monotonic".
|
* kept in sync with "monotonic".
|
||||||
*/
|
*/
|
||||||
aux_tks->offs_aux = ktime_sub(timespec64_to_ktime(*tnew), tnow);
|
tk_update_aux_offs(aux_tks, ktime_sub(timespec64_to_ktime(*tnew), tnow));
|
||||||
|
|
||||||
timekeeping_update_from_shadow(aux_tkd, TK_UPDATE_ALL);
|
timekeeping_update_from_shadow(aux_tkd, TK_UPDATE_ALL);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -159,10 +159,10 @@ void vdso_time_update_aux(struct timekeeper *tk)
|
||||||
if (clock_mode != VDSO_CLOCKMODE_NONE) {
|
if (clock_mode != VDSO_CLOCKMODE_NONE) {
|
||||||
fill_clock_configuration(vc, &tk->tkr_mono);
|
fill_clock_configuration(vc, &tk->tkr_mono);
|
||||||
|
|
||||||
vdso_ts->sec = tk->xtime_sec;
|
vdso_ts->sec = tk->xtime_sec + tk->monotonic_to_aux.tv_sec;
|
||||||
|
|
||||||
nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
||||||
nsec += tk->offs_aux;
|
nsec += tk->monotonic_to_aux.tv_nsec;
|
||||||
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
|
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
|
||||||
nsec = nsec << tk->tkr_mono.shift;
|
nsec = nsec << tk->tkr_mono.shift;
|
||||||
vdso_ts->nsec = nsec;
|
vdso_ts->nsec = nsec;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue