diff --git a/sys/external/bsd/drm2/include/linux/ww_mutex.h b/sys/external/bsd/drm2/include/linux/ww_mutex.h index e87ded9dd20a..08e62376d629 100644 --- a/sys/external/bsd/drm2/include/linux/ww_mutex.h +++ b/sys/external/bsd/drm2/include/linux/ww_mutex.h @@ -40,13 +40,17 @@ #include #include +#define WW_CLASS_MAGIC 0x5ffea5f47eeb5a28 + struct ww_class { atomic64_t wwc_ticket; + uint64_t wwc_magic; }; #define DEFINE_WW_CLASS(CLASS) \ struct ww_class CLASS = { \ .wwc_ticket = ATOMIC64_INIT(0), \ + .wwc_magic = WW_CLASS_MAGIC, \ } struct ww_acquire_ctx { @@ -56,6 +60,7 @@ struct ww_acquire_ctx { unsigned wwx_acquired; bool wwx_acquire_done; struct rb_node wwx_rb_node; + uint64_t wwx_magic; }; enum ww_mutex_state { @@ -80,6 +85,7 @@ struct ww_mutex { struct ww_class *wwm_class; struct rb_tree wwm_waiters; kcondvar_t wwm_cv; + uint64_t wwm_magic; }; /* XXX Make the nm output a little more greppable... */ diff --git a/sys/external/bsd/drm2/linux/linux_ww_mutex.c b/sys/external/bsd/drm2/linux/linux_ww_mutex.c index f403570120b5..e6817d587256 100644 --- a/sys/external/bsd/drm2/linux/linux_ww_mutex.c +++ b/sys/external/bsd/drm2/linux/linux_ww_mutex.c @@ -53,6 +53,33 @@ __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.14 2022/03/18 23:33:41 riastrad LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \ (uintptr_t)__builtin_return_address(0), 0) +#define WW_CTX_MAGIC(ctx, class) \ + (((uintptr_t)(ctx) * (uint64_t)0x543c189312a56787) + \ + (uintptr_t)(class)) + +#define WW_MUTEX_MAGIC(mutex, class) \ + (((uintptr_t)(mutex) * (uint64_t)0xf03ab6d76dbf25d5) + \ + (uintptr_t)(class)) + +#define CHECK_CLASS(class) \ + KDASSERTMSG((class)->wwc_magic == WW_CLASS_MAGIC, "class=%p", (class)) + +#define CHECK_CTX(ctx) do \ +{ \ + KDASSERTMSG(((ctx)->wwx_magic == \ + WW_CTX_MAGIC((ctx), (ctx)->wwx_class)), \ + "ctx=%p", (ctx)); \ + CHECK_CLASS((ctx)->wwx_class); \ +} while (0) + +#define CHECK_MUTEX(mutex) do \ +{ \ + KDASSERTMSG(((mutex)->wwm_magic == \ + WW_MUTEX_MAGIC((mutex), (mutex)->wwm_class)), \ + "mutex=%p", (mutex)); \ + CHECK_CLASS((mutex)->wwm_class); \ +} while (0) + static int ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb) { @@ -62,7 +89,7 @@ ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb) if (ctx_a->wwx_ticket < ctx_b->wwx_ticket) return -1; if (ctx_a->wwx_ticket > ctx_b->wwx_ticket) - return -1; + return +1; return 0; } @@ -76,7 +103,7 @@ ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn, if (ctx->wwx_ticket < ticket) return -1; if (ctx->wwx_ticket > ticket) - return -1; + return +1; return 0; } @@ -91,17 +118,26 @@ void ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class) { + CHECK_CLASS(class); + ctx->wwx_class = class; ctx->wwx_owner = curlwp; ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket); ctx->wwx_acquired = 0; ctx->wwx_acquire_done = false; +#ifdef DEBUG + ctx->wwx_magic = WW_CTX_MAGIC(ctx, class); +#endif + + CHECK_CTX(ctx); } void ww_acquire_done(struct ww_acquire_ctx *ctx) { + CHECK_CTX(ctx); + KASSERTMSG((ctx->wwx_owner == curlwp), "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp); @@ -112,6 +148,8 @@ static void ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) { + CHECK_CTX(ctx); + /* * If caller has invoked ww_acquire_done, we must already hold * this mutex. @@ -127,11 +165,17 @@ void ww_acquire_fini(struct ww_acquire_ctx *ctx) { + CHECK_CTX(ctx); + KASSERTMSG((ctx->wwx_owner == curlwp), "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp); KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks", ctx, ctx->wwx_acquired); +#ifdef DEBUG + explicit_memset(&ctx->wwx_magic, 0xe9, sizeof(ctx->wwx_magic)); +#endif + ctx->wwx_acquired = ~0U; /* Fail if called again. */ ctx->wwx_owner = NULL; } @@ -194,6 +238,8 @@ void ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class) { + CHECK_CLASS(class); + /* * XXX Apparently Linux takes these with spin locks held. That * strikes me as a bad idea, but so it is... @@ -207,6 +253,11 @@ ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class) mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops, (uintptr_t)__builtin_return_address(0)); #endif +#ifdef DEBUG + mutex->wwm_magic = WW_MUTEX_MAGIC(mutex, class); +#endif + + CHECK_MUTEX(mutex); } /* @@ -220,8 +271,12 @@ void ww_mutex_destroy(struct ww_mutex *mutex) { + CHECK_MUTEX(mutex); KASSERT(mutex->wwm_state == WW_UNLOCKED); +#ifdef DEBUG + explicit_memset(&mutex->wwm_magic, 0xc2, sizeof(mutex->wwm_magic)); +#endif #ifdef LOCKDEBUG LOCKDEBUG_FREE(mutex->wwm_debug, mutex); #endif @@ -249,6 +304,8 @@ ww_mutex_is_locked(struct ww_mutex *mutex) { int locked; + CHECK_MUTEX(mutex); + mutex_enter(&mutex->wwm_lock); switch (mutex->wwm_state) { case WW_UNLOCKED: @@ -286,8 +343,10 @@ ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state) KASSERT(mutex_owned(&mutex->wwm_lock)); KASSERT(mutex->wwm_state == state); - do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock); - while (mutex->wwm_state == state); + do { + cv_wait(&mutex->wwm_cv, &mutex->wwm_lock); + CHECK_MUTEX(mutex); + } while (mutex->wwm_state == state); } /* @@ -313,6 +372,7 @@ ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state) do { /* XXX errno NetBSD->Linux */ ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock); + CHECK_MUTEX(mutex); if (ret) { KASSERTMSG((ret == -EINTR || ret == -ERESTART), "ret=%d", ret); @@ -350,6 +410,7 @@ ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) KASSERT((mutex->wwm_state == WW_CTX) || (mutex->wwm_state == WW_WANTOWN)); KASSERT(mutex->wwm_u.ctx != ctx); + CHECK_CTX(mutex->wwm_u.ctx); KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class), "ww mutex class mismatch: %p != %p", ctx->wwx_class, mutex->wwm_u.ctx->wwx_class); @@ -363,8 +424,10 @@ ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)", ctx->wwx_ticket, ctx, collision->wwx_ticket, collision); - do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock); - while (!(((mutex->wwm_state == WW_CTX) || + do { + cv_wait(&mutex->wwm_cv, &mutex->wwm_lock); + CHECK_MUTEX(mutex); + } while (!(((mutex->wwm_state == WW_CTX) || (mutex->wwm_state == WW_WANTOWN)) && (mutex->wwm_u.ctx == ctx))); @@ -398,6 +461,7 @@ ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) KASSERT((mutex->wwm_state == WW_CTX) || (mutex->wwm_state == WW_WANTOWN)); KASSERT(mutex->wwm_u.ctx != ctx); + CHECK_CTX(mutex->wwm_u.ctx); KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class), "ww mutex class mismatch: %p != %p", ctx->wwx_class, mutex->wwm_u.ctx->wwx_class); @@ -414,6 +478,7 @@ ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) do { /* XXX errno NetBSD->Linux */ ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock); + CHECK_MUTEX(mutex); if (ret) { KASSERTMSG((ret == -EINTR || ret == -ERESTART), "ret=%d", ret); @@ -459,6 +524,7 @@ retry: switch (mutex->wwm_state) { mutex->wwm_state = WW_WANTOWN; /* FALLTHROUGH */ case WW_WANTOWN: + CHECK_CTX(mutex->wwm_u.ctx); KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp), "locking %p against myself: %p", mutex, curlwp); ww_mutex_state_wait(mutex, WW_WANTOWN); @@ -510,6 +576,7 @@ retry: switch (mutex->wwm_state) { mutex->wwm_state = WW_WANTOWN; /* FALLTHROUGH */ case WW_WANTOWN: + CHECK_CTX(mutex->wwm_u.ctx); KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp), "locking %p against myself: %p", mutex, curlwp); ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN); @@ -551,6 +618,10 @@ ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) { int ret; + CHECK_MUTEX(mutex); + if (ctx) + CHECK_CTX(ctx); + /* * We do not WW_WANTLOCK at the beginning because we may * correctly already hold it, if we have a context, in which @@ -601,6 +672,7 @@ retry: switch (mutex->wwm_state) { KASSERT(mutex->wwm_u.ctx != NULL); KASSERT((mutex->wwm_u.ctx == ctx) || (mutex->wwm_u.ctx->wwx_owner != curlwp)); + CHECK_CTX(mutex->wwm_u.ctx); if (mutex->wwm_u.ctx == ctx) { /* @@ -671,6 +743,10 @@ ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) { int ret; + CHECK_MUTEX(mutex); + if (ctx) + CHECK_CTX(ctx); + /* * We do not WW_WANTLOCK at the beginning because we may * correctly already hold it, if we have a context, in which @@ -729,6 +805,7 @@ retry: switch (mutex->wwm_state) { KASSERT(mutex->wwm_u.ctx != NULL); KASSERT((mutex->wwm_u.ctx == ctx) || (mutex->wwm_u.ctx->wwx_owner != curlwp)); + CHECK_CTX(mutex->wwm_u.ctx); if (mutex->wwm_u.ctx == ctx) { /* @@ -797,6 +874,10 @@ void ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) { + CHECK_MUTEX(mutex); + if (ctx) + CHECK_CTX(ctx); + /* Caller must not try to lock against self here. */ WW_WANTLOCK(mutex); ASSERT_SLEEPABLE(); @@ -843,6 +924,7 @@ retry: switch (mutex->wwm_state) { KASSERT(mutex->wwm_u.ctx != NULL); KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp), "locking %p against myself: %p", mutex, curlwp); + CHECK_CTX(mutex->wwm_u.ctx); /* * Owned by another party, of any priority. Ask that party to @@ -875,6 +957,10 @@ ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex, { int ret; + CHECK_MUTEX(mutex); + if (ctx) + CHECK_CTX(ctx); + WW_WANTLOCK(mutex); ASSERT_SLEEPABLE(); @@ -963,6 +1049,8 @@ ww_mutex_trylock(struct ww_mutex *mutex) { int ret; + CHECK_MUTEX(mutex); + mutex_enter(&mutex->wwm_lock); if (mutex->wwm_state == WW_UNLOCKED) { mutex->wwm_state = WW_OWNED; @@ -1021,6 +1109,7 @@ ww_mutex_unlock_release(struct ww_mutex *mutex) KASSERT((mutex->wwm_state == WW_CTX) || (mutex->wwm_state == WW_WANTOWN)); KASSERT(mutex->wwm_u.ctx != NULL); + CHECK_CTX(mutex->wwm_u.ctx); KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp), "ww_mutex %p ctx %p held by %p, not by self (%p)", mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner, @@ -1040,6 +1129,8 @@ ww_mutex_unlock(struct ww_mutex *mutex) { struct ww_acquire_ctx *ctx; + CHECK_MUTEX(mutex); + mutex_enter(&mutex->wwm_lock); WW_UNLOCKED(mutex); KASSERTMSG(mutex->wwm_state != WW_UNLOCKED, "mutex %p", mutex); @@ -1059,6 +1150,7 @@ ww_mutex_unlock(struct ww_mutex *mutex) * unlock it. */ if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) { + CHECK_CTX(ctx); mutex->wwm_state = WW_CTX; mutex->wwm_u.ctx = ctx; } else { @@ -1086,6 +1178,8 @@ ww_mutex_locking_ctx(struct ww_mutex *mutex) { struct ww_acquire_ctx *ctx; + CHECK_MUTEX(mutex); + mutex_enter(&mutex->wwm_lock); switch (mutex->wwm_state) { case WW_UNLOCKED: @@ -1095,6 +1189,7 @@ ww_mutex_locking_ctx(struct ww_mutex *mutex) case WW_CTX: case WW_WANTOWN: ctx = mutex->wwm_u.ctx; + CHECK_CTX(ctx); break; default: panic("wait/wound mutex %p in bad state: %d",