summaryrefslogtreecommitdiff
path: root/sys/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/sparc64')
-rw-r--r--sys/arch/sparc64/include/mutex.h21
-rw-r--r--sys/arch/sparc64/sparc64/mutex.S4
2 files changed, 21 insertions, 4 deletions
diff --git a/sys/arch/sparc64/include/mutex.h b/sys/arch/sparc64/include/mutex.h
index 8b8654ee554..73a061419ad 100644
--- a/sys/arch/sparc64/include/mutex.h
+++ b/sys/arch/sparc64/include/mutex.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mutex.h,v 1.2 2007/08/25 18:36:47 kettenis Exp $ */
+/* $OpenBSD: mutex.h,v 1.3 2013/07/14 21:22:08 kettenis Exp $ */
/*
* Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
@@ -34,7 +34,24 @@ struct mutex {
int mtx_oldipl;
};
-#define MUTEX_INITIALIZER(ipl) { NULL, ipl, 0 }
+/*
+ * To prevent lock ordering problems with the kernel lock, we need to
+ * make sure we block all interrupts that can grab the kernel lock.
+ * The simplest way to achieve this is to make sure mutexes always
+ * raise the interrupt priority level to the highest level that has
+ * interrupts that grab the kernel lock.
+ */
+#ifdef MULTIPROCESSOR
+#define __MUTEX_IPL(ipl) \
+ (((ipl) > IPL_NONE && (ipl) < IPL_SERIAL) ? IPL_SERIAL : (ipl))
+#else
+#define __MUTEX_IPL(ipl) (ipl)
+#endif
+
+#define MUTEX_INITIALIZER(ipl) { NULL, __MUTEX_IPL((ipl)), 0 }
+
+void __mtx_init(struct mutex *, int);
+#define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
#ifdef DIAGNOSTIC
#define MUTEX_ASSERT_LOCKED(mtx) do { \
diff --git a/sys/arch/sparc64/sparc64/mutex.S b/sys/arch/sparc64/sparc64/mutex.S
index 987d801e554..bfdfdf4e5c5 100644
--- a/sys/arch/sparc64/sparc64/mutex.S
+++ b/sys/arch/sparc64/sparc64/mutex.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: mutex.S,v 1.7 2010/09/28 20:27:55 miod Exp $ */
+/* $OpenBSD: mutex.S,v 1.8 2013/07/14 21:22:09 kettenis Exp $ */
/*
* Copyright (c) 2007 Mark Kettenis
@@ -38,7 +38,7 @@
#endif
-ENTRY(mtx_init)
+ENTRY(__mtx_init)
stx %g0, [%o0 + MTX_OWNER]
stw %o1, [%o0 + MTX_WANTIPL]
retl