From d59344a1a6ba38e5fc96e9e26fabe70ab0aeca02 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 10 Mar 2022 00:34:39 +0000 Subject: [PATCH] kern: Fix synchronization of clearing LP_RUNNING and lwp_free. 1. membar_sync is not necessary here -- only a store-release is required. 2. membar_consumer _before_ loading l->l_pflag is not enough; a load-acquire is required. Actually it's not really clear to me why any barriers are needed, since the store-release and load-acquire should be implied by releasing and acquiring the lwp lock (and maybe we could spin with the lock instead of reading l->l_pflag unlocked). But maybe there's something subtle about access to l->l_mutex that's not obvious here. --- sys/kern/kern_lwp.c | 10 ++++++---- sys/kern/kern_synch.c | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/sys/kern/kern_lwp.c b/sys/kern/kern_lwp.c index b7aa4e6ea921..9e5aa66cc2c9 100644 --- a/sys/kern/kern_lwp.c +++ b/sys/kern/kern_lwp.c @@ -1024,9 +1024,11 @@ lwp_startup(struct lwp *prev, struct lwp *new_lwp) */ lock = prev->l_mutex; if (__predict_false(prev->l_stat == LSZOMB)) { - membar_sync(); + atomic_store_release(&prev->l_pflag, + prev->l_pflag & ~LP_RUNNING); + } else { + prev->l_pflag &= ~LP_RUNNING; } - prev->l_pflag &= ~LP_RUNNING; mutex_spin_exit(lock); /* Correct spin mutex count after mi_switch(). */ @@ -1247,8 +1249,8 @@ lwp_free(struct lwp *l, bool recycle, bool last) * In the unlikely event that the LWP is still on the CPU, * then spin until it has switched away. */ - membar_consumer(); - while (__predict_false((l->l_pflag & LP_RUNNING) != 0)) { + while (__predict_false((atomic_load_acquire(&l->l_pflag) & LP_RUNNING) + != 0)) { SPINLOCK_BACKOFF_HOOK; } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 7a6ca60016a2..4b2546d0a6c9 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -792,9 +792,11 @@ mi_switch(lwp_t *l) KASSERT((prevlwp->l_pflag & LP_RUNNING) != 0); lock = prevlwp->l_mutex; if (__predict_false(prevlwp->l_stat == LSZOMB)) { - membar_sync(); + atomic_store_release(&prevlwp->l_pflag, + prevlwp->l_pflag & ~LP_RUNNING); + } else { + prevlwp->l_pflag &= ~LP_RUNNING; } - prevlwp->l_pflag &= ~LP_RUNNING; mutex_spin_exit(lock); /*