summaryrefslogtreecommitdiff
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c39
1 files changed, 38 insertions, 1 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 8cd4a4285e1..209883c59b4 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_synch.c,v 1.81 2007/10/10 15:53:53 art Exp $ */
+/* $OpenBSD: kern_synch.c,v 1.82 2007/11/28 20:07:36 oga Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*
@@ -136,6 +136,43 @@ tsleep(void *ident, int priority, const char *wmesg, int timo)
return (error);
}
+/*
+ * Same as tsleep, but if we have a mutex provided, then once we've
+ * entered the sleep queue we drop the mutex. After sleeping we re-lock.
+ */
+int
+msleep(void *ident, struct mutex *mtx, int priority, const char *wmesg, int timo)
+{
+ struct sleep_state sls;
+ int error, error1;
+
+ sleep_setup(&sls, ident, priority, wmesg);
+ sleep_setup_timeout(&sls, timo);
+ sleep_setup_signal(&sls, priority);
+
+ if (mtx) {
+ /* XXX - We need to make sure that the mutex doesn't
+ * unblock splsched. This can be made a bit more
+ * correct when the sched_lock is a mutex.
+ */
+ MUTEX_OLDIPL(mtx) = splsched();
+ mtx_leave(mtx);
+ }
+
+ sleep_finish(&sls, 1);
+ error1 = sleep_finish_timeout(&sls);
+ error = sleep_finish_signal(&sls);
+
+ if (mtx && (priority & PNORELOCK) == 0)
+ mtx_enter(mtx);
+
+ /* Signal errors are higher priority than timeouts. */
+ if (error == 0 && error1 != 0)
+ error = error1;
+
+ return (error);
+}
+
void
sleep_setup(struct sleep_state *sls, void *ident, int prio, const char *wmesg)
{