From: dlg Date: Sat, 2 May 2015 10:59:47 +0000 (+0000) Subject: rework hppa mutexes. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=423042ddb9827fbafb9ca22cfdde3830cb8da11e;p=openbsd rework hppa mutexes. this is largely based on src/sys/arch/alpha/alpha/mutex.c r1.14 and src/sys/arch/sgi/sgi/mutex.c r1.15 always and explicitely record which cpu owns the lock (or NULL if noone owns it). improve the mutex diagnostics/asserts so they operate on the mtx_owner field rather than mtx_lock. previously the asserts would assume the lock cpu owns the lock if any of them own the lock, which blows up badly. hppa hasnt got good atomic cpu opcodes, so this still relies on ldcws to serialise access to the lock. while im here i also shuffled the code. on MULTIPROCESSOR systems instead of duplicating code between mtx_enter and mtx_enter_try, mtx_enter simply loops on mtx_enter_try until it succeeds. this also provides an alternative implementation of mutexes on !MULTIPROCESSOR systems that avoids interlocking opcodes. mutexes wont contend on UP boxes, theyre basically wrappers around spls. we can just do the splraise, stash the owner as a guard value for DIAGNOSTIC and return. similarly, mtx_enter_try on UP will never fail, so we can just call mtx_enter and return 1. tested by and ok kettenis@ jsing@ --- diff --git a/sys/arch/hppa/hppa/mutex.c b/sys/arch/hppa/hppa/mutex.c index ab1df14070e..81721865a1d 100644 --- a/sys/arch/hppa/hppa/mutex.c +++ b/sys/arch/hppa/hppa/mutex.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.c,v 1.13 2014/06/17 15:43:27 guenther Exp $ */ +/* $OpenBSD: mutex.c,v 1.14 2015/05/02 10:59:47 dlg Exp $ */ /* * Copyright (c) 2004 Artur Grabowski @@ -34,82 +34,109 @@ #include -static inline int -try_lock(struct mutex *mtx) -{ - volatile int *lock = (int *)(((vaddr_t)mtx->mtx_lock + 0xf) & ~0xf); - volatile register_t ret = 0; - - /* Note: lock must be 16-byte aligned. */ - asm volatile ( - "ldcws 0(%2), %0" - : "=&r" (ret), "+m" (lock) - : "r" (lock) - ); +int __mtx_enter_try(struct mutex *); - return ret; -} +#ifdef MULTIPROCESSOR +/* Note: lock must be 16-byte aligned. */ +#define __mtx_lock(mtx) ((int *)(((vaddr_t)mtx->mtx_lock + 0xf) & ~0xf)) +#endif void __mtx_init(struct mutex *mtx, int wantipl) { +#ifdef MULTIPROCESSOR mtx->mtx_lock[0] = 1; mtx->mtx_lock[1] = 1; mtx->mtx_lock[2] = 1; mtx->mtx_lock[3] = 1; +#endif mtx->mtx_wantipl = wantipl; mtx->mtx_oldipl = IPL_NONE; + mtx->mtx_owner = NULL; } +#ifdef MULTIPROCESSOR void mtx_enter(struct mutex *mtx) { - int s; - - for (;;) { - if (mtx->mtx_wantipl != IPL_NONE) - s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { - membar_enter(); - if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); -#ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; -#endif - return; - } - if (mtx->mtx_wantipl != IPL_NONE) - splx(s); - } + while (mtx_enter_try(mtx) == 0) + ; } int mtx_enter_try(struct mutex *mtx) { + struct cpu_info *ci = curcpu(); + volatile int *lock = __mtx_lock(mtx); + int ret; int s; - + if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { - membar_enter(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + asm volatile ( + "ldcws 0(%2), %0" + : "=&r" (ret), "+m" (lock) + : "r" (lock) + ); + + if (ret) { + mtx->mtx_owner = ci; if (mtx->mtx_wantipl != IPL_NONE) mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); #ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; + ci->ci_mutex_level++; #endif - return 1; + membar_enter(); + + return (1); } + if (mtx->mtx_wantipl != IPL_NONE) splx(s); - return 0; + return (0); } +#else +void +mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif void mtx_leave(struct mutex *mtx) { +#ifdef MULTIPROCESSOR + volatile int *lock = __mtx_lock(mtx); +#endif int s; MUTEX_ASSERT_LOCKED(mtx); @@ -119,12 +146,10 @@ mtx_leave(struct mutex *mtx) #endif s = mtx->mtx_oldipl; mtx->mtx_owner = NULL; +#ifdef MULTIPROCESSOR + *lock = 1; membar_exit(); - - mtx->mtx_lock[0] = 1; - mtx->mtx_lock[1] = 1; - mtx->mtx_lock[2] = 1; - mtx->mtx_lock[3] = 1; +#endif if (mtx->mtx_wantipl != IPL_NONE) splx(s); diff --git a/sys/arch/hppa/include/mutex.h b/sys/arch/hppa/include/mutex.h index d0d990af5df..36be0577813 100644 --- a/sys/arch/hppa/include/mutex.h +++ b/sys/arch/hppa/include/mutex.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.h,v 1.5 2014/01/30 15:18:51 kettenis Exp $ */ +/* $OpenBSD: mutex.h,v 1.6 2015/05/02 10:59:47 dlg Exp $ */ /* * Copyright (c) 2004 Artur Grabowski @@ -28,12 +28,13 @@ #ifndef _MACHINE_MUTEX_H_ #define _MACHINE_MUTEX_H_ -#define MUTEX_LOCKED { 0, 0, 0, 0 } #define MUTEX_UNLOCKED { 1, 1, 1, 1 } /* Note: mtx_lock must be 16-byte aligned. */ struct mutex { +#ifdef MULTIPROCESSOR volatile int mtx_lock[4]; +#endif int mtx_wantipl; int mtx_oldipl; void *mtx_owner; @@ -49,25 +50,23 @@ struct mutex { #ifdef MULTIPROCESSOR #define __MUTEX_IPL(ipl) \ (((ipl) > IPL_NONE && (ipl) < IPL_AUDIO) ? IPL_AUDIO : (ipl)) +#define MUTEX_INITIALIZER(ipl) { MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL } #else #define __MUTEX_IPL(ipl) (ipl) +#define MUTEX_INITIALIZER(ipl) { __MUTEX_IPL((ipl)), 0, NULL } #endif -#define MUTEX_INITIALIZER(ipl) { MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL } - void __mtx_init(struct mutex *, int); #define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) #ifdef DIAGNOSTIC #define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_lock[0] == 1 && (mtx)->mtx_lock[1] == 1 && \ - (mtx)->mtx_lock[2] == 1 && (mtx)->mtx_lock[3] == 1) \ + if ((mtx)->mtx_owner != curcpu()) \ panic("mutex %p not held in %s", (mtx), __func__); \ } while (0) #define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_lock[0] != 1 && (mtx)->mtx_lock[1] != 1 && \ - (mtx)->mtx_lock[2] != 1 && (mtx)->mtx_lock[3] != 1) \ + if ((mtx)->mtx_owner == curcpu()) \ panic("mutex %p held in %s", (mtx), __func__); \ } while (0) #else