From: mpi Date: Thu, 25 Jan 2018 15:06:29 +0000 (+0000) Subject: Move common mutex implementations to a MI place. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=710c2d98ef6b48aed52cf1712826535b248055bf;p=openbsd Move common mutex implementations to a MI place. Archs not yet converted can to the jump by defining __USE_MI_MUTEX. ok visa@ --- diff --git a/sys/arch/alpha/conf/files.alpha b/sys/arch/alpha/conf/files.alpha index 5a5d118c4b5..f6f455c854b 100644 --- a/sys/arch/alpha/conf/files.alpha +++ b/sys/arch/alpha/conf/files.alpha @@ -1,4 +1,4 @@ -# $OpenBSD: files.alpha,v 1.105 2017/11/02 14:04:24 mpi Exp $ +# $OpenBSD: files.alpha,v 1.106 2018/01/25 15:06:29 mpi Exp $ # $NetBSD: files.alpha,v 1.32 1996/11/25 04:03:21 cgd Exp $ # # alpha-specific configuration info @@ -293,7 +293,6 @@ file arch/alpha/alpha/fp_complete.c !no_ieee file arch/alpha/alpha/vm_machdep.c file arch/alpha/alpha/disksubr.c file arch/alpha/dev/bus_dma.c -file arch/alpha/alpha/mutex.c # # Network protocol checksum routines diff --git a/sys/arch/alpha/include/mutex.h b/sys/arch/alpha/include/mutex.h index 20078d4b7f7..b5b66f12fb0 100644 --- a/sys/arch/alpha/include/mutex.h +++ b/sys/arch/alpha/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.10 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.11 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64 index 2ac3517699d..b28df143bb5 100644 --- a/sys/arch/amd64/conf/files.amd64 +++ b/sys/arch/amd64/conf/files.amd64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.amd64,v 1.94 2018/01/12 20:14:21 deraadt Exp $ +# $OpenBSD: files.amd64,v 1.95 2018/01/25 15:06:29 mpi Exp $ maxpartitions 16 maxusers 2 16 128 @@ -29,7 +29,6 @@ file arch/amd64/amd64/fpu.c file arch/amd64/amd64/softintr.c file arch/amd64/amd64/i8259.c file arch/amd64/amd64/cacheinfo.c -file arch/amd64/amd64/mutex.c file arch/amd64/amd64/vector.S file arch/amd64/amd64/copy.S file arch/amd64/amd64/spl.S diff --git a/sys/arch/amd64/include/mutex.h b/sys/arch/amd64/include/mutex.h index 20078d4b7f7..b5b66f12fb0 100644 --- a/sys/arch/amd64/include/mutex.h +++ b/sys/arch/amd64/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.10 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.11 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/arm64/conf/files.arm64 b/sys/arch/arm64/conf/files.arm64 index e2134f24851..b58bb1f6c5c 100644 --- a/sys/arch/arm64/conf/files.arm64 +++ b/sys/arch/arm64/conf/files.arm64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.arm64,v 1.17 2018/01/10 23:27:18 kettenis Exp $ +# $OpenBSD: files.arm64,v 1.18 2018/01/25 15:06:29 mpi Exp $ maxpartitions 16 maxusers 2 8 64 @@ -32,7 +32,6 @@ file arch/arm64/arm64/exception.S file arch/arm64/arm64/trampoline.S file arch/arm64/arm64/trap.c file arch/arm64/arm64/ast.c -file arch/arm64/arm64/arm64_mutex.c file arch/arm64/arm64/cpufunc_asm.S file arch/arm64/arm64/support.S diff --git a/sys/arch/arm64/include/mutex.h b/sys/arch/arm64/include/mutex.h index 5247385e106..212d863cd44 100644 --- a/sys/arch/arm64/include/mutex.h +++ b/sys/arch/arm64/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.4 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.5 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/i386/conf/files.i386 b/sys/arch/i386/conf/files.i386 index 76651889efe..7962e8d33dc 100644 --- a/sys/arch/i386/conf/files.i386 +++ b/sys/arch/i386/conf/files.i386 @@ -1,4 +1,4 @@ -# $OpenBSD: files.i386,v 1.236 2017/12/20 11:08:44 mpi Exp $ +# $OpenBSD: files.i386,v 1.237 2018/01/25 15:06:29 mpi Exp $ # # new style config file for i386 architecture # @@ -21,7 +21,6 @@ file arch/i386/i386/est.c !small_kernel file arch/i386/i386/gdt.c file arch/i386/i386/in_cksum.s file arch/i386/i386/machdep.c -file arch/i386/i386/mutex.c file arch/i386/i386/hibernate_machdep.c hibernate file arch/i386/i386/via.c file arch/i386/i386/locore.s diff --git a/sys/arch/i386/include/mutex.h b/sys/arch/i386/include/mutex.h index d3525fc2eff..db1ff31a572 100644 --- a/sys/arch/i386/include/mutex.h +++ b/sys/arch/i386/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.12 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.13 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/mips64/conf/files.mips64 b/sys/arch/mips64/conf/files.mips64 index 441c9f683e8..8460a512a28 100644 --- a/sys/arch/mips64/conf/files.mips64 +++ b/sys/arch/mips64/conf/files.mips64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.mips64,v 1.28 2017/10/21 06:11:22 visa Exp $ +# $OpenBSD: files.mips64,v 1.29 2018/01/25 15:06:29 mpi Exp $ file arch/mips64/mips64/arcbios.c arcbios file arch/mips64/mips64/clock.c clock @@ -13,7 +13,6 @@ file arch/mips64/mips64/softintr.c file arch/mips64/mips64/sys_machdep.c file arch/mips64/mips64/trap.c file arch/mips64/mips64/vm_machdep.c -file arch/mips64/mips64/mutex.c file arch/mips64/mips64/cache_loongson2.c cpu_loongson2 file arch/mips64/mips64/cache_loongson3.c cpu_loongson3 diff --git a/sys/arch/mips64/include/mutex.h b/sys/arch/mips64/include/mutex.h index a77a60eadc3..212d863cd44 100644 --- a/sys/arch/mips64/include/mutex.h +++ b/sys/arch/mips64/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.4 2018/01/12 09:19:33 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.5 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/powerpc/conf/files.powerpc b/sys/arch/powerpc/conf/files.powerpc index 3762f7997a9..a05878c88e2 100644 --- a/sys/arch/powerpc/conf/files.powerpc +++ b/sys/arch/powerpc/conf/files.powerpc @@ -1,4 +1,4 @@ -# $OpenBSD: files.powerpc,v 1.54 2016/03/05 17:41:55 mpi Exp $ +# $OpenBSD: files.powerpc,v 1.55 2018/01/25 15:06:29 mpi Exp $ # file arch/powerpc/powerpc/setjmp.S ddb @@ -13,7 +13,6 @@ file arch/powerpc/powerpc/process_machdep.c file arch/powerpc/powerpc/sys_machdep.c file arch/powerpc/powerpc/trap.c file arch/powerpc/powerpc/vm_machdep.c -file arch/powerpc/powerpc/mutex.c file arch/powerpc/powerpc/lock_machdep.c multiprocessor file arch/powerpc/powerpc/intr.c file arch/powerpc/powerpc/softintr.c diff --git a/sys/arch/powerpc/include/mutex.h b/sys/arch/powerpc/include/mutex.h index 2aeefb1a7b4..b32e5a751ff 100644 --- a/sys/arch/powerpc/include/mutex.h +++ b/sys/arch/powerpc/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.8 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.9 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index f067a508bc7..fe670a73e8d 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -1,6 +1,30 @@ -/* $OpenBSD: kern_lock.c,v 1.52 2017/12/04 09:51:03 mpi Exp $ */ +/* $OpenBSD: kern_lock.c,v 1.53 2018/01/25 15:06:29 mpi Exp $ */ -/* +/* + * Copyright (c) 2004 Artur Grabowski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* * Copyright (c) 1995 * The Regents of the University of California. All rights reserved. * @@ -273,3 +297,113 @@ __mp_lock_held(struct __mp_lock *mpl, struct cpu_info *ci) #endif /* __USE_MI_MPLOCK */ #endif /* MULTIPROCESSOR */ + + +#ifdef __USE_MI_MUTEX +void +__mtx_init(struct mutex *mtx, int wantipl) +{ + mtx->mtx_owner = NULL; + mtx->mtx_wantipl = wantipl; + mtx->mtx_oldipl = IPL_NONE; +} + +#ifdef MULTIPROCESSOR +void +__mtx_enter(struct mutex *mtx) +{ +#ifdef MP_LOCKDEBUG + int nticks = __mp_lock_spinout; +#endif + + while (__mtx_enter_try(mtx) == 0) { + CPU_BUSY_CYCLE(); + +#ifdef MP_LOCKDEBUG + if (--nticks == 0) { + db_printf("%s: %p lock spun out", __func__, mtx); + db_enter(); + nticks = __mp_lock_spinout; + } +#endif + } +} + +int +__mtx_enter_try(struct mutex *mtx) +{ + struct cpu_info *owner, *ci = curcpu(); + int s; + + if (mtx->mtx_wantipl != IPL_NONE) + s = splraise(mtx->mtx_wantipl); + + owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); +#ifdef DIAGNOSTIC + if (__predict_false(owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (owner == NULL) { + membar_enter_after_atomic(); + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = s; +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif + return (1); + } + + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + + return (0); +} +#else +void +__mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +__mtx_enter_try(struct mutex *mtx) +{ + __mtx_enter(mtx); + return (1); +} +#endif + +void +__mtx_leave(struct mutex *mtx) +{ + int s; + + MUTEX_ASSERT_LOCKED(mtx); + +#ifdef DIAGNOSTIC + curcpu()->ci_mutex_level--; +#endif + + s = mtx->mtx_oldipl; +#ifdef MULTIPROCESSOR + membar_exit_before_atomic(); +#endif + mtx->mtx_owner = NULL; + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); +} +#endif /* __USE_MI_MUTEX */ diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index c217957784a..3de2940b5f5 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.h,v 1.11 2017/11/29 15:12:52 visa Exp $ */ +/* $OpenBSD: mutex.h,v 1.12 2018/01/25 15:06:29 mpi Exp $ */ /* * Copyright (c) 2004 Artur Grabowski @@ -44,6 +44,65 @@ #include +#ifdef __USE_MI_MUTEX + +#include + +struct mutex { + volatile void *mtx_owner; + int mtx_wantipl; + int mtx_oldipl; +#ifdef WITNESS + struct lock_object mtx_lock_obj; +#endif +}; + +/* + * To prevent lock ordering problems with the kernel lock, we need to + * make sure we block all interrupts that can grab the kernel lock. + * The simplest way to achieve this is to make sure mutexes always + * raise the interrupt priority level to the highest level that has + * interrupts that grab the kernel lock. + */ +#ifdef MULTIPROCESSOR +#define __MUTEX_IPL(ipl) \ + (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) +#else +#define __MUTEX_IPL(ipl) (ipl) +#endif + +#ifdef WITNESS +#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ + { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } +#else +#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ + { NULL, __MUTEX_IPL((ipl)), IPL_NONE } +#endif + +void __mtx_init(struct mutex *, int); +#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) + +#ifdef DIAGNOSTIC +#define MUTEX_ASSERT_LOCKED(mtx) do { \ + if ((mtx)->mtx_owner != curcpu()) \ + panic("mutex %p not held in %s", (mtx), __func__); \ +} while (0) + +#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ + if ((mtx)->mtx_owner == curcpu()) \ + panic("mutex %p held in %s", (mtx), __func__); \ +} while (0) +#else +#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) +#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) +#endif + +#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) +#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl + +#endif /* __USE_MI_MUTEX */ + + #define MTX_LO_FLAGS(flags) \ ((!((flags) & MTX_NOWITNESS) ? LO_WITNESS : 0) | \ ((flags) & MTX_DUPOK ? LO_DUPOK : 0) | \