From: dlg Date: Tue, 21 Apr 2015 01:31:51 +0000 (+0000) Subject: rework sgi mutexes to use the owner pointer as the lock. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=d06b71a3a1220c705cf63f9c674be4d2114e07cc;p=openbsd rework sgi mutexes to use the owner pointer as the lock. this is like src/sys/arch/alpha/alpha/mutex.c r1.14. this changes sgi mutexes so they record which cpu owns the lock rather than just if the lock is held or not. the diagnostics compare the owner to the current cpus curcpu() address so they can actually tell if the current cpu holds the lock instead of whether any cpu holds the lock. instead of using custom asm to implement a cas this uses atomic_cas_ptr. while im here i also shuffled the code. on MULTIPROCESSOR systems instead of duplicating code between mtx_enter and mtx_enter_try, mtx_enter simply loops on mtx_enter_try until it succeeds. this also provides an alternative implementation of mutexes on !MULTIPROCESSOR systems that avoids interlocking opcodes. mutexes wont contend on UP boxes, theyre basically wrappers around spls. we can just do the splraise, stash the owner as a guard value for DIAGNOSTIC and return. similarly, mtx_enter_try on UP will never fail, so we can just call mtx_enter and return 1. tested by and ok miod@ --- diff --git a/sys/arch/sgi/include/mutex.h b/sys/arch/sgi/include/mutex.h index 1727d60f032..8d44c879270 100644 --- a/sys/arch/sgi/include/mutex.h +++ b/sys/arch/sgi/include/mutex.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.h,v 1.7 2014/02/10 20:30:05 kettenis Exp $ */ +/* $OpenBSD: mutex.h,v 1.8 2015/04/21 01:31:51 dlg Exp $ */ /* * Copyright (c) 2004 Artur Grabowski @@ -29,10 +29,9 @@ #define _MACHINE_MUTEX_H_ struct mutex { - int mtx_lock; + void *mtx_owner; int mtx_wantipl; int mtx_oldipl; - void *mtx_owner; }; /* @@ -49,19 +48,19 @@ struct mutex { #define __MUTEX_IPL(ipl) (ipl) #endif -#define MUTEX_INITIALIZER(ipl) { 0, __MUTEX_IPL((ipl)), IPL_NONE } +#define MUTEX_INITIALIZER(ipl) { NULL, __MUTEX_IPL((ipl)), IPL_NONE } void __mtx_init(struct mutex *, int); #define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) #ifdef DIAGNOSTIC #define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_lock == 0) \ + if ((mtx)->mtx_owner != curcpu()) \ panic("mutex %p not held in %s", (mtx), __func__); \ } while (0) #define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_lock != 0) \ + if ((mtx)->mtx_owner == curcpu()) \ panic("mutex %p held in %s", (mtx), __func__); \ } while (0) #else diff --git a/sys/arch/sgi/sgi/mutex.c b/sys/arch/sgi/sgi/mutex.c index b7fb7b16074..c5764e82600 100644 --- a/sys/arch/sgi/sgi/mutex.c +++ b/sys/arch/sgi/sgi/mutex.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.c,v 1.14 2014/02/10 20:30:05 kettenis Exp $ */ +/* $OpenBSD: mutex.c,v 1.15 2015/04/21 01:31:51 dlg Exp $ */ /* * Copyright (c) 2004 Artur Grabowski @@ -28,87 +28,84 @@ #include #include #include +#include #include -static inline int -try_lock(struct mutex *mtx) -{ -#ifdef MULTIPROCESSOR - int tmp, ret = 0; - - asm volatile ( - ".set noreorder\n" - "1:\n" - "ll %0, %2\n" /* tmp = mtx->mtx_lock */ - "bnez %0, 2f\n" - " li %1, 0\n" /* ret = 0 */ - "li %1, 1\n" /* ret = 1 */ - "sc %1, %2\n" /* mtx->mtx_lock = 1 */ - "beqz %1, 1b\n" /* update failed */ - " nop\n" - "2:\n" - ".set reorder\n" - : "+r"(tmp), "+r"(ret) - : "m"(mtx->mtx_lock)); - - return ret; -#else /* MULTIPROCESSOR */ - mtx->mtx_lock = 1; - return 1; -#endif /* MULTIPROCESSOR */ -} void __mtx_init(struct mutex *mtx, int wantipl) { - mtx->mtx_lock = 0; + mtx->mtx_owner = NULL; mtx->mtx_wantipl = wantipl; mtx->mtx_oldipl = IPL_NONE; } +#ifdef MULTIPROCESSOR void mtx_enter(struct mutex *mtx) { - int s; - - for (;;) { - if (mtx->mtx_wantipl != IPL_NONE) - s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { - if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); -#ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; -#endif - return; - } - if (mtx->mtx_wantipl != IPL_NONE) - splx(s); - } + while (mtx_enter_try(mtx) == 0) + ; } int mtx_enter_try(struct mutex *mtx) { + struct cpu_info *owner, *ci = curcpu(); int s; if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { + + owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); +#ifdef DIAGNOSTIC + if (__predict_false(owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (owner == NULL) { if (mtx->mtx_wantipl != IPL_NONE) mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); #ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; + ci->ci_mutex_level++; #endif - return 1; + membar_enter(); + return (1); } + if (mtx->mtx_wantipl != IPL_NONE) splx(s); - return 0; + + return (0); } +#else +void +mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif void mtx_leave(struct mutex *mtx) @@ -116,12 +113,16 @@ mtx_leave(struct mutex *mtx) int s; MUTEX_ASSERT_LOCKED(mtx); + +#ifdef MULTIPROCESSOR + membar_exit(); +#endif #ifdef DIAGNOSTIC curcpu()->ci_mutex_level--; #endif + s = mtx->mtx_oldipl; mtx->mtx_owner = NULL; - mtx->mtx_lock = 0; if (mtx->mtx_wantipl != IPL_NONE) splx(s); }