From 2cb0b5fbf9da575dc9d129a3047808f1cb3ab9cc Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Sun, 16 Jul 2023 09:46:52 +0000 Subject: [PATCH] kern: Make time_second and time_uptime macros that work atomically. These use atomic load on platforms with atomic 64-bit load, and seqlocks on platforms without. This has the unfortunate side effect of slightly reducing the real times available on 32-bit platforms, from ending some time in the year 584942417218 AD, available on 64-bit platforms, to ending some time in the year 584942417355 AD. But during that slightly shorter time, 32-bit platforms can avoid bugs arising from non-atomic access to time_uptime and time_second. Note: All platforms still have non-atomic access problems for bintime, binuptime, nanotime, nanouptime, &c. This can be addressed by putting a seqlock around timebasebin and possibly some other variable -- to be done in a later change. XXX kernel ABI change -- deleting symbols --- share/man/man9/time_second.9 | 32 +++++++--- sys/kern/kern_tc.c | 116 +++++++++++++++++++++++++++++++++-- sys/sys/timevar.h | 48 +++++++++++++-- 3 files changed, 179 insertions(+), 17 deletions(-) diff --git a/share/man/man9/time_second.9 b/share/man/man9/time_second.9 index 6ad289b11e39..b3f581eb1f3b 100644 --- a/share/man/man9/time_second.9 +++ b/share/man/man9/time_second.9 @@ -32,23 +32,27 @@ .\" .\" <> .\" -.Dd April 17, 2020 +.Dd July 16, 2023 .Dt TIME_SECOND 9 .Os .Sh NAME .Nm time_second , -.Nm time_uptime +.Nm time_uptime , +.Nm time_uptime32 .Nd system time variables .Sh SYNOPSIS .In sys/time.h -.Vt extern time_t time_second; -.Vt extern time_t time_uptime; +.Vt extern const time_t time_second; +.Vt extern const time_t time_uptime; +.Vt extern const uint32_t time_uptime32; .Sh DESCRIPTION The .Va time_second variable is the system's -.Dq wall time -clock. +.Dq wall clock , +giving the number of seconds since midnight (0 hour), +January 1, 1970, (proleptic) UTC, +minus the number of leap seconds. It is set at boot by .Xr inittodr 9 , and is updated periodically via @@ -64,9 +68,19 @@ It is set at boot, and is updated periodically. (It is not updated by .Xr settimeofday 2 . ) .Pp -All of these variables contain times -expressed in seconds since midnight (0 hour), -January 1, 1970, UTC. +The +.Va time_uptime32 +variable is the low-order 32 bits of +.Va time_uptime , +which is cheaper to read on 32-bit platforms. +.Pp +You must only read the variables +.Va time_second , +.Va time_uptime , +and +.Va time_uptime32 ; +you may not write to them or take their addresses. +They may be implemented as macros. .Pp The .Xr bintime 9 , diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c index 53a22fc6a6f6..55ed3263fd50 100644 --- a/sys/kern/kern_tc.c +++ b/sys/kern/kern_tc.c @@ -47,10 +47,12 @@ __KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.62 2021/06/02 21:34:58 riastradh Exp $ #endif #include + #include #include #include #include +#include #include #include /* XXX just to get AB_VERBOSE */ #include @@ -131,8 +133,18 @@ static struct timehands *volatile timehands = &th0; struct timecounter *timecounter = &dummy_timecounter; static struct timecounter *timecounters = &dummy_timecounter; -volatile time_t time_second __cacheline_aligned = 1; -volatile time_t time_uptime __cacheline_aligned = 1; +volatile time_t time__second __cacheline_aligned = 1; +volatile time_t time__uptime __cacheline_aligned = 1; + +#ifndef __HAVE_ATOMIC64_LOADSTORE +static volatile struct { + uint32_t lo, hi; +} time__uptime32 __cacheline_aligned = { + .lo = 1, +}, time__second32 __cacheline_aligned = { + .lo = 1, +}; +#endif static struct bintime timebasebin; @@ -143,6 +155,103 @@ static u_int timecounter_mods; static volatile int timecounter_removals = 1; static u_int timecounter_bad; +#ifdef __HAVE_ATOMIC64_LOADSTORE + +static inline void +setrealuptime(time_t second, time_t uptime) +{ + + atomic_store_relaxed(&time__second, second); + atomic_store_relaxed(&time__uptime, uptime); +} + +#else + +static inline void +setrealuptime(time_t second, time_t uptime) +{ + uint32_t seclo = second & 0xffffffff, sechi = second >> 32; + uint32_t uplo = uptime & 0xffffffff, uphi = uptime >> 32; + + KDASSERT(mutex_owned(&timecounter_lock)); + + /* + * Fast path -- no wraparound, just updating the low bits, so + * no need for seqlocked access. + */ + if (__predict_true(sechi == time__second32.hi) && + __predict_true(uphi == time__uptime32.hi)) { + atomic_store_relaxed(&time__second32.lo, seclo); + atomic_store_relaxed(&time__uptime32.lo, uplo); + return; + } + + atomic_store_relaxed(&time__second32.hi, 0xffffffff); + atomic_store_relaxed(&time__uptime32.hi, 0xffffffff); + membar_producer(); + atomic_store_relaxed(&time__second32.lo, seclo); + atomic_store_relaxed(&time__uptime32.lo, uplo); + membar_producer(); + atomic_store_relaxed(&time__second32.hi, sechi); + atomic_store_relaxed(&time__second32.lo, seclo); +} + +time_t +getrealtime(void) +{ + uint32_t lo, hi; + + do { + for (;;) { + hi = atomic_load_relaxed(&time__second32.hi); + if (__predict_true(hi != 0xffffffff)) + break; + SPINLOCK_BACKOFF_HOOK; + } + membar_consumer(); + lo = atomic_load_relaxed(&time__second32.lo); + membar_consumer(); + } while (hi != atomic_load_relaxed(&time__second32.hi)); + + return ((time_t)hi << 32) | lo; +} + +time_t +getuptime(void) +{ + uint32_t lo, hi; + + do { + for (;;) { + hi = atomic_load_relaxed(&time__uptime32.hi); + if (__predict_true(hi != 0xffffffff)) + break; + SPINLOCK_BACKOFF_HOOK; + } + membar_consumer(); + lo = atomic_load_relaxed(&time__uptime32.lo); + membar_consumer(); + } while (hi != atomic_load_relaxed(&time__uptime32.hi)); + + return ((time_t)hi << 32) | lo; +} + +time_t +getboottime(void) +{ + + return getrealtime() - getuptime(); +} + +uint32_t +getuptime32(void) +{ + + return atomic_load_relaxed(&time__uptime32.lo); +} + +#endif /* !defined(__HAVE_ATOMIC64_LOADSTORE) */ + /* * sysctl helper routine for kern.timercounter.hardware */ @@ -878,8 +987,7 @@ tc_windup(void) * Go live with the new struct timehands. Ensure changes are * globally visible before changing. */ - time_second = th->th_microtime.tv_sec; - time_uptime = th->th_offset.sec; + setrealuptime(th->th_microtime.tv_sec, th->th_offset.sec); membar_producer(); timehands = th; diff --git a/sys/sys/timevar.h b/sys/sys/timevar.h index 5daed09cdfb0..1baf682a8c44 100644 --- a/sys/sys/timevar.h +++ b/sys/sys/timevar.h @@ -60,6 +60,7 @@ #ifndef _SYS_TIMEVAR_H_ #define _SYS_TIMEVAR_H_ +#include #include #include #include @@ -234,8 +235,47 @@ void itimer_gettime(const struct itimer *, struct itimerspec *); void ptimer_tick(struct lwp *, bool); void ptimers_free(struct proc *, int); -extern volatile time_t time_second; /* current second in the epoch */ -extern volatile time_t time_uptime; /* system uptime in seconds */ +extern volatile time_t time__second; /* current second in the epoch */ +extern volatile time_t time__uptime; /* system uptime in seconds */ + +#define time_second getrealtime() +#define time_uptime getuptime() +#define time_uptime32 getuptime32() + +#ifdef __HAVE_ATOMIC64_LOADSTORE + +static inline time_t +getrealtime(void) +{ + return atomic_load_relaxed(&time__second); +} + +static inline time_t +getuptime(void) +{ + return atomic_load_relaxed(&time__uptime); +} + +static inline time_t +getboottime(void) +{ + return getrealtime() - getuptime(); +} + +static inline uint32_t +getuptime32(void) +{ + return getuptime() & 0xffffffff; +} + +#else + +time_t getrealtime(void); +time_t getuptime(void); +time_t getboottime(void); +uint32_t getuptime32(void); + +#endif extern int time_adjusted; @@ -248,13 +288,13 @@ extern int time_adjusted; static __inline time_t time_mono_to_wall(time_t t) { - return t - time_uptime + time_second; + return t + getboottime(); } static __inline time_t time_wall_to_mono(time_t t) { - return t - time_second + time_uptime; + return t - getboottime(); } #endif /* !_SYS_TIMEVAR_H_ */