summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/sgi/include/mutex.h21
-rw-r--r--sys/arch/sgi/sgi/mutex.c4
2 files changed, 20 insertions, 5 deletions
diff --git a/sys/arch/sgi/include/mutex.h b/sys/arch/sgi/include/mutex.h
index 490d62b4f5c..1727d60f032 100644
--- a/sys/arch/sgi/include/mutex.h
+++ b/sys/arch/sgi/include/mutex.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mutex.h,v 1.6 2009/12/28 06:55:27 syuu Exp $ */
+/* $OpenBSD: mutex.h,v 1.7 2014/02/10 20:30:05 kettenis Exp $ */
/*
* Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
@@ -35,9 +35,24 @@ struct mutex {
void *mtx_owner;
};
-void mtx_init(struct mutex *, int);
+/*
+ * To prevent lock ordering problems with the kernel lock, we need to
+ * make sure we block all interrupts that can grab the kernel lock.
+ * The simplest way to achieve this is to make sure mutexes always
+ * raise the interrupt priority level to the highest level that has
+ * interrupts that grab the kernel lock.
+ */
+#ifdef MULTIPROCESSOR
+#define __MUTEX_IPL(ipl) \
+ (((ipl) > IPL_NONE && (ipl) < IPL_TTY) ? IPL_TTY : (ipl))
+#else
+#define __MUTEX_IPL(ipl) (ipl)
+#endif
+
+#define MUTEX_INITIALIZER(ipl) { 0, __MUTEX_IPL((ipl)), IPL_NONE }
-#define MUTEX_INITIALIZER(ipl) { 0, (ipl), IPL_NONE }
+void __mtx_init(struct mutex *, int);
+#define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
#ifdef DIAGNOSTIC
#define MUTEX_ASSERT_LOCKED(mtx) do { \
diff --git a/sys/arch/sgi/sgi/mutex.c b/sys/arch/sgi/sgi/mutex.c
index 3fa43dac71c..b7fb7b16074 100644
--- a/sys/arch/sgi/sgi/mutex.c
+++ b/sys/arch/sgi/sgi/mutex.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mutex.c,v 1.13 2013/12/26 21:02:37 miod Exp $ */
+/* $OpenBSD: mutex.c,v 1.14 2014/02/10 20:30:05 kettenis Exp $ */
/*
* Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
@@ -60,7 +60,7 @@ try_lock(struct mutex *mtx)
}
void
-mtx_init(struct mutex *mtx, int wantipl)
+__mtx_init(struct mutex *mtx, int wantipl)
{
mtx->mtx_lock = 0;
mtx->mtx_wantipl = wantipl;