Index: sys/external/bsd/drm2/include/linux/ww_mutex.h =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/include/linux/ww_mutex.h,v retrieving revision 1.10 diff -p -u -r1.10 ww_mutex.h --- sys/external/bsd/drm2/include/linux/ww_mutex.h 8 Jan 2015 23:35:47 -0000 1.10 +++ sys/external/bsd/drm2/include/linux/ww_mutex.h 20 May 2015 12:37:36 -0000 @@ -70,6 +70,9 @@ struct ww_mutex { struct ww_class *wwm_class; struct rb_tree wwm_waiters; kcondvar_t wwm_cv; +#ifdef LOCKDEBUG + bool wwm_debug; +#endif }; /* XXX Make the nm output a little more greppable... */ Index: sys/external/bsd/drm2/linux/linux_ww_mutex.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/linux/linux_ww_mutex.c,v retrieving revision 1.1 diff -p -u -r1.1 linux_ww_mutex.c --- sys/external/bsd/drm2/linux/linux_ww_mutex.c 8 Jan 2015 23:35:47 -0000 1.1 +++ sys/external/bsd/drm2/linux/linux_ww_mutex.c 20 May 2015 12:37:36 -0000 @@ -35,12 +35,23 @@ __KERNEL_RCSID(0, "$NetBSD: linux_ww_mut #include #include #include +#include #include #include #include #include +#define WW_WANTLOCK(WW) \ + LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \ + (uintptr_t)__builtin_return_address(0), 0) +#define WW_LOCKED(WW) \ + LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \ + (uintptr_t)__builtin_return_address(0), 0) +#define WW_UNLOCKED(WW) \ + LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \ + (uintptr_t)__builtin_return_address(0), 0) + static int ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb) { @@ -109,6 +120,51 @@ ww_acquire_fini(struct ww_acquire_ctx *c ctx->wwx_owner = NULL; } +static void +ww_dump(volatile void *cookie) +{ + volatile struct ww_mutex *mutex = cookie; + + printf_nolog("%-13s: ", "state"); + switch (mutex->wwm_state) { + case WW_UNLOCKED: + printf_nolog("unlocked\n"); + break; + case WW_OWNED: + printf_nolog("owned by lwp\n"); + printf_nolog("%-13s: %p\n", "owner", mutex->wwm_u.owner); + printf_nolog("%-13s: %s\n", "waiters", + cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv)) + ? "yes" : "no"); + break; + case WW_CTX: + printf_nolog("owned via ctx\n"); + printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx); + printf_nolog("%-13s: %p\n", "lwp", + mutex->wwm_u.ctx->wwx_owner); + printf_nolog("%-13s: %s\n", "waiters", + cv_has_waiters(__UNVOLATILE(&mutex->wwm_cv)) + ? "yes" : "no"); + break; + case WW_WANTOWN: + printf_nolog("owned via ctx\n"); + printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx); + printf_nolog("%-13s: %p\n", "lwp", + mutex->wwm_u.ctx->wwx_owner); + printf_nolog("%-13s: %s\n", "waiters", "yes (noctx)"); + break; + default: + printf_nolog("unknown\n"); + break; + } +} + +static const lockops_t ww_lockops = { + .lo_name = "Wait/wound mutex", + .lo_type = LOCKOPS_SLEEP, + .lo_dump = ww_dump, +}; + void ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class) { @@ -122,17 +178,25 @@ ww_mutex_init(struct ww_mutex *mutex, st mutex->wwm_class = class; rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops); cv_init(&mutex->wwm_cv, "linuxwwm"); +#ifdef LOCKDEBUG + mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops, + (uintptr_t)__builtin_return_address(0)); +#endif } void ww_mutex_destroy(struct ww_mutex *mutex) { + KASSERT(mutex->wwm_state == WW_UNLOCKED); + +#ifdef LOCKDEBUG + LOCKDEBUG_FREE(mutex->wwm_debug, mutex); +#endif cv_destroy(&mutex->wwm_cv); #if 0 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops); #endif - KASSERT(mutex->wwm_state == WW_UNLOCKED); mutex_destroy(&mutex->wwm_lock); } @@ -267,6 +331,7 @@ retry: switch (mutex->wwm_state) { case WW_UNLOCKED: mutex->wwm_state = WW_OWNED; mutex->wwm_u.owner = curlwp; + WW_LOCKED(mutex); break; case WW_OWNED: KASSERTMSG((mutex->wwm_u.owner != curlwp), @@ -301,6 +366,7 @@ retry: switch (mutex->wwm_state) { case WW_UNLOCKED: mutex->wwm_state = WW_OWNED; mutex->wwm_u.owner = curlwp; + WW_LOCKED(mutex); break; case WW_OWNED: KASSERTMSG((mutex->wwm_u.owner != curlwp), @@ -335,6 +401,7 @@ int ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) { + WW_WANTLOCK(mutex); ASSERT_SLEEPABLE(); if (ctx == NULL) { @@ -357,6 +424,7 @@ retry: switch (mutex->wwm_state) { case WW_UNLOCKED: mutex->wwm_state = WW_CTX; mutex->wwm_u.ctx = ctx; + WW_LOCKED(mutex); goto locked; case WW_OWNED: KASSERTMSG((mutex->wwm_u.owner != curlwp), @@ -415,6 +483,7 @@ ww_mutex_lock_interruptible(struct ww_mu { int ret; + WW_WANTLOCK(mutex); ASSERT_SLEEPABLE(); if (ctx == NULL) @@ -435,6 +504,7 @@ retry: switch (mutex->wwm_state) { case WW_UNLOCKED: mutex->wwm_state = WW_CTX; mutex->wwm_u.ctx = ctx; + WW_LOCKED(mutex); goto locked; case WW_OWNED: KASSERTMSG((mutex->wwm_u.owner != curlwp), @@ -499,6 +569,7 @@ void ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) { + WW_WANTLOCK(mutex); ASSERT_SLEEPABLE(); if (ctx == NULL) { @@ -524,6 +595,7 @@ retry: switch (mutex->wwm_state) { case WW_UNLOCKED: mutex->wwm_state = WW_CTX; mutex->wwm_u.ctx = ctx; + WW_LOCKED(mutex); goto locked; case WW_OWNED: KASSERTMSG((mutex->wwm_u.owner != curlwp), @@ -561,6 +633,7 @@ ww_mutex_lock_slow_interruptible(struct { int ret; + WW_WANTLOCK(mutex); ASSERT_SLEEPABLE(); if (ctx == NULL) @@ -584,6 +657,7 @@ retry: switch (mutex->wwm_state) { case WW_UNLOCKED: mutex->wwm_state = WW_CTX; mutex->wwm_u.ctx = ctx; + WW_LOCKED(mutex); goto locked; case WW_OWNED: KASSERTMSG((mutex->wwm_u.owner != curlwp), @@ -632,6 +706,7 @@ ww_mutex_trylock(struct ww_mutex *mutex) if (mutex->wwm_state == WW_UNLOCKED) { mutex->wwm_state = WW_OWNED; mutex->wwm_u.owner = curlwp; + WW_LOCKED(mutex); ret = 1; } else { KASSERTMSG(((mutex->wwm_state != WW_OWNED) || @@ -702,6 +777,7 @@ ww_mutex_unlock(struct ww_mutex *mutex) mutex->wwm_state = WW_UNLOCKED; break; } + WW_UNLOCKED(mutex); cv_broadcast(&mutex->wwm_cv); mutex_exit(&mutex->wwm_lock); }