summaryrefslogtreecommitdiff
path: root/sys/kern/kern_synch.c
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>2005-05-25 23:17:48 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>2005-05-25 23:17:48 +0000
commitd6ec0bc1862a4fed11c7f4ac537413b2c7e89de4 (patch)
tree6c1ed544e4a8e11ea3e107d10a95bad1273dc5a4 /sys/kern/kern_synch.c
parentfee642f79221488ebcacbd0ca219a563c8607281 (diff)
This patch is mortly art's work and was done *a year* ago. Art wants to thank
everyone for the prompt review and ok of this work ;-) Yeah, that includes me too, or maybe especially me. I am sorry. Change the sched_lock to a mutex. This fixes, among other things, the infamous "telnet localhost &" problem. The real bug in that case was that the sched_lock which is by design a non-recursive lock, was recursively acquired, and not enough releases made us hold the lock in the idle loop, blocking scheduling on the other processors. Some of the other processors would hold the biglock though, which made it impossible for cpu 0 to enter the kernel... A nice deadlock. Let me just say debugging this for days just to realize that it was all fixed in an old diff noone ever ok'd was somewhat of an anti-climax. This diff also changes splsched to be correct for all our architectures.
Diffstat (limited to 'sys/kern/kern_synch.c')
-rw-r--r--sys/kern/kern_synch.c75
1 files changed, 37 insertions, 38 deletions
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index fa0ff867b3c..9d887083455 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_synch.c,v 1.61 2004/07/29 06:25:45 tedu Exp $ */
+/* $OpenBSD: kern_synch.c,v 1.62 2005/05/25 23:17:47 niklas Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -145,8 +145,12 @@ ltsleep(ident, priority, wmesg, timo, interlock)
else
*qp->sq_tailp = p;
*(qp->sq_tailp = &p->p_forw) = 0;
+
+ p->p_stat = SSLEEP;
+
if (timo)
timeout_add(&p->p_sleep_to, timo);
+
/*
* We can now release the interlock; the scheduler_slock
* is held, so a thread can't get in to do wakeup() before
@@ -170,13 +174,16 @@ ltsleep(ident, priority, wmesg, timo, interlock)
*/
if (catch) {
p->p_flag |= P_SINTR;
+ SCHED_UNLOCK(s); /* XXX - must unlock for CURSIG */
if ((sig = CURSIG(p)) != 0) {
+ SCHED_LOCK(s);
if (p->p_wchan)
unsleep(p);
p->p_stat = SONPROC;
SCHED_UNLOCK(s);
goto resume;
}
+ SCHED_LOCK(s);
if (p->p_wchan == 0) {
catch = 0;
SCHED_UNLOCK(s);
@@ -184,22 +191,14 @@ ltsleep(ident, priority, wmesg, timo, interlock)
}
} else
sig = 0;
- p->p_stat = SSLEEP;
p->p_stats->p_ru.ru_nvcsw++;
SCHED_ASSERT_LOCKED();
- mi_switch();
+ mi_switch(s);
#ifdef DDB
/* handy breakpoint location after process "wakes" */
__asm(".globl bpendtsleep\nbpendtsleep:");
#endif
- SCHED_ASSERT_UNLOCKED();
- /*
- * Note! this splx belongs to the SCHED_LOCK(s) above, mi_switch
- * releases the scheduler lock, but does not lower the spl.
- */
- splx(s);
-
resume:
#ifdef __HAVE_CPUINFO
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
@@ -270,20 +269,13 @@ endtsleep(arg)
* Remove a process from its wait queue
*/
void
-unsleep(p)
- register struct proc *p;
+unsleep(struct proc *p)
{
- register struct slpque *qp;
- register struct proc **hp;
-#if 0
- int s;
+ struct slpque *qp;
+ struct proc **hp;
+
+ SCHED_ASSERT_LOCKED();
- /*
- * XXX we cannot do recursive SCHED_LOCKing yet. All callers lock
- * anyhow.
- */
- SCHED_LOCK(s);
-#endif
if (p->p_wchan) {
hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
while (*hp != p)
@@ -293,24 +285,39 @@ unsleep(p)
qp->sq_tailp = hp;
p->p_wchan = 0;
}
-#if 0
+}
+
+void
+wakeup(void *ident)
+{
+ int s;
+
+ SCHED_LOCK(s);
+ sched_wakeup(ident);
+ SCHED_UNLOCK(s);
+}
+
+void
+wakeup_n(void *ident, int n)
+{
+ int s;
+
+ SCHED_LOCK(s);
+ sched_wakeup_n(ident, n);
SCHED_UNLOCK(s);
-#endif
}
/*
* Make all processes sleeping on the specified identifier runnable.
*/
void
-wakeup_n(ident, n)
- void *ident;
- int n;
+sched_wakeup_n(void *ident, int n)
{
struct slpque *qp;
struct proc *p, **q;
- int s;
- SCHED_LOCK(s);
+ SCHED_ASSERT_LOCKED();
+
qp = &slpque[LOOKUP(ident)];
restart:
for (q = &qp->sq_head; (p = *q) != NULL; ) {
@@ -349,7 +356,7 @@ restart:
need_resched(0);
#endif
} else {
- wakeup((caddr_t)&proc0);
+ sched_wakeup((caddr_t)&proc0);
}
/* END INLINE EXPANSION */
@@ -361,12 +368,4 @@ restart:
} else
q = &p->p_forw;
}
- SCHED_UNLOCK(s);
-}
-
-void
-wakeup(chan)
- void *chan;
-{
- wakeup_n(chan, -1);
}