diff options
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/alpha/conf/files.alpha | 3 | ||||
-rw-r--r-- | sys/arch/alpha/include/mutex.h | 86 | ||||
-rw-r--r-- | sys/arch/amd64/conf/files.amd64 | 3 | ||||
-rw-r--r-- | sys/arch/amd64/include/mutex.h | 86 | ||||
-rw-r--r-- | sys/arch/arm64/conf/files.arm64 | 3 | ||||
-rw-r--r-- | sys/arch/arm64/include/mutex.h | 86 | ||||
-rw-r--r-- | sys/arch/i386/conf/files.i386 | 3 | ||||
-rw-r--r-- | sys/arch/i386/include/mutex.h | 86 | ||||
-rw-r--r-- | sys/arch/mips64/conf/files.mips64 | 3 | ||||
-rw-r--r-- | sys/arch/mips64/include/mutex.h | 86 | ||||
-rw-r--r-- | sys/arch/powerpc/conf/files.powerpc | 3 | ||||
-rw-r--r-- | sys/arch/powerpc/include/mutex.h | 86 |
12 files changed, 18 insertions, 516 deletions
diff --git a/sys/arch/alpha/conf/files.alpha b/sys/arch/alpha/conf/files.alpha index 5a5d118c4b5..f6f455c854b 100644 --- a/sys/arch/alpha/conf/files.alpha +++ b/sys/arch/alpha/conf/files.alpha @@ -1,4 +1,4 @@ -# $OpenBSD: files.alpha,v 1.105 2017/11/02 14:04:24 mpi Exp $ +# $OpenBSD: files.alpha,v 1.106 2018/01/25 15:06:29 mpi Exp $ # $NetBSD: files.alpha,v 1.32 1996/11/25 04:03:21 cgd Exp $ # # alpha-specific configuration info @@ -293,7 +293,6 @@ file arch/alpha/alpha/fp_complete.c !no_ieee file arch/alpha/alpha/vm_machdep.c file arch/alpha/alpha/disksubr.c file arch/alpha/dev/bus_dma.c -file arch/alpha/alpha/mutex.c # # Network protocol checksum routines diff --git a/sys/arch/alpha/include/mutex.h b/sys/arch/alpha/include/mutex.h index 20078d4b7f7..b5b66f12fb0 100644 --- a/sys/arch/alpha/include/mutex.h +++ b/sys/arch/alpha/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.10 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.11 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include <sys/_lock.h> - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64 index 2ac3517699d..b28df143bb5 100644 --- a/sys/arch/amd64/conf/files.amd64 +++ b/sys/arch/amd64/conf/files.amd64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.amd64,v 1.94 2018/01/12 20:14:21 deraadt Exp $ +# $OpenBSD: files.amd64,v 1.95 2018/01/25 15:06:29 mpi Exp $ maxpartitions 16 maxusers 2 16 128 @@ -29,7 +29,6 @@ file arch/amd64/amd64/fpu.c file arch/amd64/amd64/softintr.c file arch/amd64/amd64/i8259.c file arch/amd64/amd64/cacheinfo.c -file arch/amd64/amd64/mutex.c file arch/amd64/amd64/vector.S file arch/amd64/amd64/copy.S file arch/amd64/amd64/spl.S diff --git a/sys/arch/amd64/include/mutex.h b/sys/arch/amd64/include/mutex.h index 20078d4b7f7..b5b66f12fb0 100644 --- a/sys/arch/amd64/include/mutex.h +++ b/sys/arch/amd64/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.10 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.11 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include <sys/_lock.h> - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/arm64/conf/files.arm64 b/sys/arch/arm64/conf/files.arm64 index e2134f24851..b58bb1f6c5c 100644 --- a/sys/arch/arm64/conf/files.arm64 +++ b/sys/arch/arm64/conf/files.arm64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.arm64,v 1.17 2018/01/10 23:27:18 kettenis Exp $ +# $OpenBSD: files.arm64,v 1.18 2018/01/25 15:06:29 mpi Exp $ maxpartitions 16 maxusers 2 8 64 @@ -32,7 +32,6 @@ file arch/arm64/arm64/exception.S file arch/arm64/arm64/trampoline.S file arch/arm64/arm64/trap.c file arch/arm64/arm64/ast.c -file arch/arm64/arm64/arm64_mutex.c file arch/arm64/arm64/cpufunc_asm.S file arch/arm64/arm64/support.S diff --git a/sys/arch/arm64/include/mutex.h b/sys/arch/arm64/include/mutex.h index 5247385e106..212d863cd44 100644 --- a/sys/arch/arm64/include/mutex.h +++ b/sys/arch/arm64/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.4 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.5 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include <sys/_lock.h> - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/i386/conf/files.i386 b/sys/arch/i386/conf/files.i386 index 76651889efe..7962e8d33dc 100644 --- a/sys/arch/i386/conf/files.i386 +++ b/sys/arch/i386/conf/files.i386 @@ -1,4 +1,4 @@ -# $OpenBSD: files.i386,v 1.236 2017/12/20 11:08:44 mpi Exp $ +# $OpenBSD: files.i386,v 1.237 2018/01/25 15:06:29 mpi Exp $ # # new style config file for i386 architecture # @@ -21,7 +21,6 @@ file arch/i386/i386/est.c !small_kernel file arch/i386/i386/gdt.c file arch/i386/i386/in_cksum.s file arch/i386/i386/machdep.c -file arch/i386/i386/mutex.c file arch/i386/i386/hibernate_machdep.c hibernate file arch/i386/i386/via.c file arch/i386/i386/locore.s diff --git a/sys/arch/i386/include/mutex.h b/sys/arch/i386/include/mutex.h index d3525fc2eff..db1ff31a572 100644 --- a/sys/arch/i386/include/mutex.h +++ b/sys/arch/i386/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.12 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.13 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include <sys/_lock.h> - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/mips64/conf/files.mips64 b/sys/arch/mips64/conf/files.mips64 index 441c9f683e8..8460a512a28 100644 --- a/sys/arch/mips64/conf/files.mips64 +++ b/sys/arch/mips64/conf/files.mips64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.mips64,v 1.28 2017/10/21 06:11:22 visa Exp $ +# $OpenBSD: files.mips64,v 1.29 2018/01/25 15:06:29 mpi Exp $ file arch/mips64/mips64/arcbios.c arcbios file arch/mips64/mips64/clock.c clock @@ -13,7 +13,6 @@ file arch/mips64/mips64/softintr.c file arch/mips64/mips64/sys_machdep.c file arch/mips64/mips64/trap.c file arch/mips64/mips64/vm_machdep.c -file arch/mips64/mips64/mutex.c file arch/mips64/mips64/cache_loongson2.c cpu_loongson2 file arch/mips64/mips64/cache_loongson3.c cpu_loongson3 diff --git a/sys/arch/mips64/include/mutex.h b/sys/arch/mips64/include/mutex.h index a77a60eadc3..212d863cd44 100644 --- a/sys/arch/mips64/include/mutex.h +++ b/sys/arch/mips64/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.4 2018/01/12 09:19:33 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.5 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include <sys/_lock.h> - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX diff --git a/sys/arch/powerpc/conf/files.powerpc b/sys/arch/powerpc/conf/files.powerpc index 3762f7997a9..a05878c88e2 100644 --- a/sys/arch/powerpc/conf/files.powerpc +++ b/sys/arch/powerpc/conf/files.powerpc @@ -1,4 +1,4 @@ -# $OpenBSD: files.powerpc,v 1.54 2016/03/05 17:41:55 mpi Exp $ +# $OpenBSD: files.powerpc,v 1.55 2018/01/25 15:06:29 mpi Exp $ # file arch/powerpc/powerpc/setjmp.S ddb @@ -13,7 +13,6 @@ file arch/powerpc/powerpc/process_machdep.c file arch/powerpc/powerpc/sys_machdep.c file arch/powerpc/powerpc/trap.c file arch/powerpc/powerpc/vm_machdep.c -file arch/powerpc/powerpc/mutex.c file arch/powerpc/powerpc/lock_machdep.c multiprocessor file arch/powerpc/powerpc/intr.c file arch/powerpc/powerpc/softintr.c diff --git a/sys/arch/powerpc/include/mutex.h b/sys/arch/powerpc/include/mutex.h index 2aeefb1a7b4..b32e5a751ff 100644 --- a/sys/arch/powerpc/include/mutex.h +++ b/sys/arch/powerpc/include/mutex.h @@ -1,85 +1,3 @@ -/* $OpenBSD: mutex.h,v 1.8 2018/01/13 15:18:11 mpi Exp $ */ +/* $OpenBSD: mutex.h,v 1.9 2018/01/25 15:06:29 mpi Exp $ */ -/* - * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _MACHINE_MUTEX_H_ -#define _MACHINE_MUTEX_H_ - -#include <sys/_lock.h> - -struct mutex { - volatile void *mtx_owner; - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - -/* - * To prevent lock ordering problems with the kernel lock, we need to - * make sure we block all interrupts that can grab the kernel lock. - * The simplest way to achieve this is to make sure mutexes always - * raise the interrupt priority level to the highest level that has - * interrupts that grab the kernel lock. - */ -#ifdef MULTIPROCESSOR -#define __MUTEX_IPL(ipl) \ - (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) -#endif - -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), IPL_NONE } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif /* _MACHINE_MUTEX_H_ */ +#define __USE_MI_MUTEX |