From 4609a40ee46abf9b87df78e0cbce99ddfead051e Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 24 Feb 2023 16:42:22 +0000 Subject: [PATCH] aarch64: curcpu() audit. Sprinkle KASSERT (or KDASSERT in hot paths) for kpreempt_disabled() when we use curcpu() and it's not immediately obvious that the caller has preemption disabled. Note unsafe curcpu()s for syscall event counting. Not sure this is worth changing. Possible bugs fixed: - cpu_irq and cpu_fiq could be preempted while trying to run softints on this CPU. - data_abort_handler might incorrectly think it was invoked in interrupt context when it was only preempted and migrated to another CPU. - pmap_fault_fixup might report the wrong CPU logs. - aarch64_dcache_{inv,wb,wbinv}_all might use the wrong CPU's cache parameters. XXX Not sure it is sensible to call this with kpreemption enabled at all. Should maybe turn it into a KASSERT and fix the callers. (However, we don't currently run with kpreemption on aarch64, so these are not yet real bugs fixed except if you patch it to build with __HAVE_PREEMPTION.) --- sys/arch/aarch64/aarch64/aarch32_syscall.c | 2 +- sys/arch/aarch64/aarch64/cpu_machdep.c | 4 ++ sys/arch/aarch64/aarch64/cpufunc.c | 45 +++++++++++++++++++--- sys/arch/aarch64/aarch64/fault.c | 5 ++- sys/arch/aarch64/aarch64/pmap_machdep.c | 8 +++- sys/arch/aarch64/aarch64/syscall.c | 2 +- sys/arch/aarch64/aarch64/trap.c | 37 ++++++++++++++++++ sys/arch/aarch64/include/cpu.h | 3 ++ 8 files changed, 95 insertions(+), 11 deletions(-) diff --git a/sys/arch/aarch64/aarch64/aarch32_syscall.c b/sys/arch/aarch64/aarch64/aarch32_syscall.c index 066a21b66ab3..77f8c8164bfb 100644 --- a/sys/arch/aarch64/aarch64/aarch32_syscall.c +++ b/sys/arch/aarch64/aarch64/aarch32_syscall.c @@ -69,7 +69,7 @@ EMULNAME(syscall)(struct trapframe *tf) LWP_CACHE_CREDS(l, p); - curcpu()->ci_data.cpu_nsyscall++; + curcpu()->ci_data.cpu_nsyscall++; /* XXX unsafe curcpu() */ thumbmode = (tf->tf_spsr & SPSR_A32_T) ? true : false; #ifdef SYSCALL_CODE_REG diff --git a/sys/arch/aarch64/aarch64/cpu_machdep.c b/sys/arch/aarch64/aarch64/cpu_machdep.c index 057e773adf75..d1b476dd1447 100644 --- a/sys/arch/aarch64/aarch64/cpu_machdep.c +++ b/sys/arch/aarch64/aarch64/cpu_machdep.c @@ -116,6 +116,10 @@ dosoftints(void) const uint32_t softiplmask = SOFTIPLMASK(opl); int s; +#ifdef __HAVE_PREEMPTION + KDASSERT(kpreempt_disabled()); +#endif + s = splhigh(); KASSERT(s == opl); for (;;) { diff --git a/sys/arch/aarch64/aarch64/cpufunc.c b/sys/arch/aarch64/aarch64/cpufunc.c index 6305ad25cec3..c9d9a9ce981a 100644 --- a/sys/arch/aarch64/aarch64/cpufunc.c +++ b/sys/arch/aarch64/aarch64/cpufunc.c @@ -365,10 +365,17 @@ ln_dcache_inv_all(int level, struct aarch64_cache_unit *cunit) void aarch64_dcache_wbinv_all(void) { - struct cpu_info * const ci = curcpu(); - struct aarch64_cache_info * const cinfo = ci->ci_cacheinfo; + struct cpu_info *ci; + struct aarch64_cache_info *cinfo; int level; +#ifdef __HAVE_PREEMPTION + kpreempt_disable(); +#endif + + ci = curcpu(); + cinfo = ci->ci_cacheinfo; + for (level = 0; level < MAX_CACHE_LEVEL; level++) { if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE) break; @@ -377,15 +384,26 @@ aarch64_dcache_wbinv_all(void) ln_dcache_wbinv_all(level, &cinfo[level].dcache); } dsb(ish); + +#ifdef __HAVE_PREEMPTION + kpreempt_enable(); +#endif } void aarch64_dcache_inv_all(void) { - struct cpu_info * const ci = curcpu(); - struct aarch64_cache_info * const cinfo = ci->ci_cacheinfo; + struct cpu_info *ci; + struct aarch64_cache_info *cinfo; int level; +#ifdef __HAVE_PREEMPTION + kpreempt_disable(); +#endif + + ci = curcpu(); + cinfo = ci->ci_cacheinfo; + for (level = 0; level < MAX_CACHE_LEVEL; level++) { if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE) break; @@ -394,15 +412,26 @@ aarch64_dcache_inv_all(void) ln_dcache_inv_all(level, &cinfo[level].dcache); } dsb(ish); + +#ifdef __HAVE_PREEMPTION + kpreempt_enable(); +#endif } void aarch64_dcache_wb_all(void) { - struct cpu_info * const ci = curcpu(); - struct aarch64_cache_info * const cinfo = ci->ci_cacheinfo; + struct cpu_info *ci; + struct aarch64_cache_info *cinfo; int level; +#ifdef __HAVE_PREEMPTION + kpreempt_disable(); +#endif + + ci = curcpu(); + cinfo = ci->ci_cacheinfo; + for (level = 0; level < MAX_CACHE_LEVEL; level++) { if (cinfo[level].cacheable == CACHE_CACHEABLE_NONE) break; @@ -411,6 +440,10 @@ aarch64_dcache_wb_all(void) ln_dcache_wb_all(level, &cinfo[level].dcache); } dsb(ish); + +#ifdef __HAVE_PREEMPTION + kpreempt_enable(); +#endif } int diff --git a/sys/arch/aarch64/aarch64/fault.c b/sys/arch/aarch64/aarch64/fault.c index 373da1a7e5cf..9b72a02f55db 100644 --- a/sys/arch/aarch64/aarch64/fault.c +++ b/sys/arch/aarch64/aarch64/fault.c @@ -226,7 +226,10 @@ data_abort_handler(struct trapframe *tf, uint32_t eclass) do_fault: /* faultbail path? */ - if (curcpu()->ci_intr_depth == 0) { + kpreempt_disable(); + const bool intrdepthzero = (curcpu()->ci_intr_depth == 0); + kpreempt_enable(); + if (intrdepthzero) { fb = cpu_disable_onfault(); if (fb != NULL) { cpu_jump_onfault(tf, fb, error); diff --git a/sys/arch/aarch64/aarch64/pmap_machdep.c b/sys/arch/aarch64/aarch64/pmap_machdep.c index 2067cbb3bae7..dfcbaf0e34ca 100644 --- a/sys/arch/aarch64/aarch64/pmap_machdep.c +++ b/sys/arch/aarch64/aarch64/pmap_machdep.c @@ -138,6 +138,8 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, bool user) KASSERT(!user || (pm != pmap_kernel())); + kpreempt_disable(); + UVMHIST_LOG(pmaphist, " pm=%#jx, va=%#jx, ftype=%#jx, user=%jd", (uintptr_t)pm, va, ftype, user); UVMHIST_LOG(pmaphist, " ti=%#jx pai=%#jx asid=%#jx", @@ -145,8 +147,6 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, bool user) (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())), (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); - kpreempt_disable(); - bool fixed = false; pt_entry_t * const ptep = pmap_pte_lookup(pm, va); if (ptep == NULL) { @@ -525,6 +525,8 @@ pmap_md_xtab_activate(pmap_t pm, struct lwp *l) UVMHIST_FUNC(__func__); UVMHIST_CALLARGS(pmaphist, " (pm=%#jx l=%#jx)", (uintptr_t)pm, (uintptr_t)l, 0, 0); + KASSERT(kpreempt_disabled()); + /* * Assume that TTBR1 has only global mappings and TTBR0 only * has non-global mappings. To prevent speculation from doing @@ -565,6 +567,8 @@ pmap_md_xtab_deactivate(pmap_t pm) { UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); + KASSERT(kpreempt_disabled()); + struct cpu_info * const ci = curcpu(); /* * Disable translation table walks from TTBR0 while no pmap has been diff --git a/sys/arch/aarch64/aarch64/syscall.c b/sys/arch/aarch64/aarch64/syscall.c index 64dc383998a6..03e3b7513902 100644 --- a/sys/arch/aarch64/aarch64/syscall.c +++ b/sys/arch/aarch64/aarch64/syscall.c @@ -94,7 +94,7 @@ EMULNAME(syscall)(struct trapframe *tf) LWP_CACHE_CREDS(l, p); - curcpu()->ci_data.cpu_nsyscall++; + curcpu()->ci_data.cpu_nsyscall++; /* XXX unsafe curcpu() */ register_t *params = (void *)tf->tf_reg; size_t nargs = NARGREG; diff --git a/sys/arch/aarch64/aarch64/trap.c b/sys/arch/aarch64/aarch64/trap.c index 1a93f580a163..e56e8a1e4518 100644 --- a/sys/arch/aarch64/aarch64/trap.c +++ b/sys/arch/aarch64/aarch64/trap.c @@ -535,14 +535,33 @@ cpu_irq(struct trapframe *tf) /* disable trace */ reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS); #endif + + /* + * Prevent preemption once we enable traps, until we have + * finished running hard and soft interrupt handlers. This + * guarantees ci = curcpu() remains stable and we don't + * accidentally try to run its pending soft interrupts on + * another CPU. + */ +#ifdef __HAVE_PREEMPTION + kpreempt_disable(); +#endif + /* enable traps */ daif_enable(DAIF_D|DAIF_A); + /* run hard interrupt handlers */ ci->ci_intr_depth++; ARM_IRQ_HANDLER(tf); ci->ci_intr_depth--; + /* run soft interrupt handlers */ cpu_dosoftints(); + + /* all done, preempt as you please */ +#ifdef __HAVE_PREEMPTION + kpreempt_enable(); +#endif } void @@ -562,14 +581,32 @@ cpu_fiq(struct trapframe *tf) /* disable trace */ reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS); + /* + * Prevent preemption once we enable traps, until we have + * finished running hard and soft interrupt handlers. This + * guarantees ci = curcpu() remains stable and we don't + * accidentally try to run its pending soft interrupts on + * another CPU. + */ +#ifdef __HAVE_PREEMPTION + kpreempt_disable(); +#endif + /* enable traps */ daif_enable(DAIF_D|DAIF_A); + /* run hard interrupt handlers */ ci->ci_intr_depth++; ARM_FIQ_HANDLER(tf); ci->ci_intr_depth--; + /* run soft interrupt handlers */ cpu_dosoftints(); + + /* all done, preempt as you please */ +#ifdef __HAVE_PREEMPTION + kpreempt_enable(); +#endif } #ifdef COMPAT_NETBSD32 diff --git a/sys/arch/aarch64/include/cpu.h b/sys/arch/aarch64/include/cpu.h index 1e55ae1fe681..aa047610b193 100644 --- a/sys/arch/aarch64/include/cpu.h +++ b/sys/arch/aarch64/include/cpu.h @@ -242,6 +242,9 @@ static inline void cpu_dosoftints(void) { #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) +#ifdef __HAVE_PREEMPTION + KDASSERT(kpreempt_disabled()); +#endif cpu_dosoftints_ci(curcpu()); #endif }