From 13f046c8d3900d717784308b28bccd26a120986e Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Tue, 1 Jun 2021 00:26:08 +0000 Subject: [PATCH] x86: Count suspend/resume cycles and avoid TSC warnings across them. - New variable acpi_md_sleep_epoch counts number of suspend/resume cycles, incremented while all non-primary CPUs are quiescent and interrupts are blocked by splhigh so nothing will read it concurrently between when we resume and when we update it. - When we cache the last tsc reading, cache the sleep epoch too. Only warn about backwards TSC if we observe, after the sequence, prevtsc = rdtsc(); prevepoch = acpi_md_sleep_epoch; epoch = acpi_md_sleep_epoch; tsc = rdtsc(); that tsc < prevtsc when prevepoch = epoch. --- sys/arch/amd64/include/proc.h | 1 + sys/arch/x86/acpi/acpi_wakeup.c | 3 +++ sys/arch/x86/include/acpi_machdep.h | 4 ++++ sys/arch/x86/x86/tsc.c | 27 ++++++++++++++++++++++++++- 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/sys/arch/amd64/include/proc.h b/sys/arch/amd64/include/proc.h index 4ac7ad89784a..9d551d84e529 100644 --- a/sys/arch/amd64/include/proc.h +++ b/sys/arch/amd64/include/proc.h @@ -47,6 +47,7 @@ struct vm_page; struct mdlwp { volatile uint64_t md_tsc; /* last TSC reading */ + volatile unsigned md_tsc_epoch; /* epoch when we last read TSC */ struct trapframe *md_regs; /* registers on current frame */ int md_flags; /* machine-dependent flags */ volatile int md_astpending; diff --git a/sys/arch/x86/acpi/acpi_wakeup.c b/sys/arch/x86/acpi/acpi_wakeup.c index 271587a88a48..a17fa60246aa 100644 --- a/sys/arch/x86/acpi/acpi_wakeup.c +++ b/sys/arch/x86/acpi/acpi_wakeup.c @@ -111,6 +111,8 @@ int acpi_md_vbios_reset = 0; /* Referenced by dev/pci/vga_pci.c */ int acpi_md_vesa_modenum = 0; /* Referenced by arch/x86/x86/genfb_machdep.c */ static int acpi_md_beep_on_reset = 0; +unsigned acpi_md_sleep_epoch = 0; + static int acpi_md_s4bios(void); static int sysctl_md_acpi_vbios_reset(SYSCTLFN_ARGS); static int sysctl_md_acpi_beep_on_reset(SYSCTLFN_ARGS); @@ -334,6 +336,7 @@ acpi_md_sleep(int state) /* Go get some sleep */ if (acpi_md_sleep_prepare(state)) goto out; + acpi_md_sleep_epoch++; /* * Sleeping and having bad nightmares about what could go wrong diff --git a/sys/arch/x86/include/acpi_machdep.h b/sys/arch/x86/include/acpi_machdep.h index ea39810f7f94..94fc5966ba17 100644 --- a/sys/arch/x86/include/acpi_machdep.h +++ b/sys/arch/x86/include/acpi_machdep.h @@ -41,6 +41,8 @@ /* * Machine-dependent code for ACPI. */ +#include + #include #include #include @@ -84,4 +86,6 @@ uint32_t acpi_md_ncpus(void); struct acpi_softc; void acpi_md_callback(struct acpi_softc *); +extern unsigned acpi_md_sleep_epoch; + #endif /* !_X86_ACPI_MACHDEP_H_ */ diff --git a/sys/arch/x86/x86/tsc.c b/sys/arch/x86/x86/tsc.c index f940a1a2820e..b538eb74618a 100644 --- a/sys/arch/x86/x86/tsc.c +++ b/sys/arch/x86/x86/tsc.c @@ -29,6 +29,8 @@ #include __KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.54 2021/02/19 02:15:58 christos Exp $"); +#include "acpica.h" + #include #include #include @@ -46,6 +48,10 @@ __KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.54 2021/02/19 02:15:58 christos Exp $"); #include #include +#if NACPICA > 0 +#include +#endif + #include "tsc.h" #define TSC_SYNC_ROUNDS 1000 @@ -411,18 +417,34 @@ tsc_get_timecount(struct timecounter *tc) static int lastwarn; uint64_t cur, prev; lwp_t *l = curlwp; + unsigned epoch, prevepoch; int ticks; /* * Previous value must be read before the counter and stored to * after, because this routine can be called from interrupt context * and may run over the top of an existing invocation. Ordering is - * guaranteed by "volatile" on md_tsc. + * guaranteed by "volatile" on md_tsc. On suspend/resume, the tsc + * is reset, so we suppress the warning if our cache is from before + * the last suspend/resume. */ prev = l->l_md.md_tsc; + +#if NACPICA > 0 + prevepoch = l->l_md.md_tsc_epoch; + epoch = acpi_md_sleep_epoch; cur = cpu_counter(); + if (__predict_false(epoch != acpi_md_sleep_epoch)) + return (uint32_t)cur; +#else + prevepoch = 0; + epoch = 0; + cur = cpu_counter(); +#endif + if (__predict_false(cur < prev)) { if ((cur >> 63) == (prev >> 63) && + epoch == prevepoch && __cpu_simple_lock_try(&lock)) { ticks = getticks(); if (ticks - lastwarn >= hz) { @@ -435,6 +457,9 @@ tsc_get_timecount(struct timecounter *tc) } } l->l_md.md_tsc = cur; +#if NACPICA > 0 + l->l_md.md_tsc_epoch = epoch; +#endif return (uint32_t)cur; #else return cpu_counter32();