summaryrefslogtreecommitdiff
path: root/sys/arch/sgi/include/mutex.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/sgi/include/mutex.h')
-rw-r--r--sys/arch/sgi/include/mutex.h21
1 files changed, 18 insertions, 3 deletions
diff --git a/sys/arch/sgi/include/mutex.h b/sys/arch/sgi/include/mutex.h
index 490d62b4f5c..1727d60f032 100644
--- a/sys/arch/sgi/include/mutex.h
+++ b/sys/arch/sgi/include/mutex.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mutex.h,v 1.6 2009/12/28 06:55:27 syuu Exp $ */
+/* $OpenBSD: mutex.h,v 1.7 2014/02/10 20:30:05 kettenis Exp $ */
/*
* Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
@@ -35,9 +35,24 @@ struct mutex {
void *mtx_owner;
};
-void mtx_init(struct mutex *, int);
+/*
+ * To prevent lock ordering problems with the kernel lock, we need to
+ * make sure we block all interrupts that can grab the kernel lock.
+ * The simplest way to achieve this is to make sure mutexes always
+ * raise the interrupt priority level to the highest level that has
+ * interrupts that grab the kernel lock.
+ */
+#ifdef MULTIPROCESSOR
+#define __MUTEX_IPL(ipl) \
+ (((ipl) > IPL_NONE && (ipl) < IPL_TTY) ? IPL_TTY : (ipl))
+#else
+#define __MUTEX_IPL(ipl) (ipl)
+#endif
+
+#define MUTEX_INITIALIZER(ipl) { 0, __MUTEX_IPL((ipl)), IPL_NONE }
-#define MUTEX_INITIALIZER(ipl) { 0, (ipl), IPL_NONE }
+void __mtx_init(struct mutex *, int);
+#define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
#ifdef DIAGNOSTIC
#define MUTEX_ASSERT_LOCKED(mtx) do { \