summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2005-05-29 03:20:44 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2005-05-29 03:20:44 +0000
commit93c77ad51e4fcc28584bb93f63dd48a4a6c771ec (patch)
tree0198ee58bab354860cf7d00f461d33143ff1aa7f /sys/kern
parent171f684dab1e3ccc2da4ee86219e9396e8e38e55 (diff)
sched work by niklas and art backed out; causes panics
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_fork.c4
-rw-r--r--sys/kern/kern_lock.c70
-rw-r--r--sys/kern/kern_resource.c4
-rw-r--r--sys/kern/kern_sig.c63
-rw-r--r--sys/kern/kern_synch.c75
-rw-r--r--sys/kern/kern_time.c6
-rw-r--r--sys/kern/sched_bsd.c81
-rw-r--r--sys/kern/vfs_sync.c7
8 files changed, 199 insertions, 111 deletions
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 05ba79f3399..5e0d1273538 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_fork.c,v 1.75 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: kern_fork.c,v 1.76 2005/05/29 03:20:41 deraadt Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@@ -341,9 +341,9 @@ fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
/*
* Make child runnable, set start time, and add to run queue.
*/
+ SCHED_LOCK(s);
getmicrotime(&p2->p_stats->p_start);
p2->p_acflag = AFORK;
- SCHED_LOCK(s);
p2->p_stat = SRUN;
setrunqueue(p2);
SCHED_UNLOCK(s);
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d36c6d14ec6..3bcb5928023 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_lock.c,v 1.18 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: kern_lock.c,v 1.19 2005/05/29 03:20:41 deraadt Exp $ */
/*
* Copyright (c) 1995
@@ -1121,6 +1121,18 @@ simple_lock_freecheck(void *start, void *end)
splx(s);
}
+/*
+ * We must be holding exactly one lock: the sched_lock.
+ */
+
+#ifdef notyet
+void
+simple_lock_switchcheck(void)
+{
+
+ simple_lock_only_held(&sched_lock, "switching");
+}
+#endif
void
simple_lock_only_held(volatile struct simplelock *lp, const char *where)
@@ -1160,6 +1172,60 @@ simple_lock_only_held(volatile struct simplelock *lp, const char *where)
* so that they show up in profiles.
*/
+/*
+ * XXX Instead of using struct lock for the kernel lock and thus requiring us
+ * XXX to implement simplelocks, causing all sorts of fine-grained locks all
+ * XXX over our tree getting activated consuming both time and potentially
+ * XXX introducing locking protocol bugs.
+ */
+#ifdef notyet
+
+struct lock kernel_lock;
+
+void
+_kernel_lock_init(void)
+{
+ spinlockinit(&kernel_lock, "klock", 0);
+}
+
+/*
+ * Acquire/release the kernel lock. Intended for use in the scheduler
+ * and the lower half of the kernel.
+ */
+void
+_kernel_lock(int flag)
+{
+ SCHED_ASSERT_UNLOCKED();
+ spinlockmgr(&kernel_lock, flag, 0);
+}
+
+void
+_kernel_unlock(void)
+{
+ spinlockmgr(&kernel_lock, LK_RELEASE, 0);
+}
+
+/*
+ * Acquire/release the kernel_lock on behalf of a process. Intended for
+ * use in the top half of the kernel.
+ */
+void
+_kernel_proc_lock(struct proc *p)
+{
+ SCHED_ASSERT_UNLOCKED();
+ spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
+ p->p_flag |= P_BIGLOCK;
+}
+
+void
+_kernel_proc_unlock(struct proc *p)
+{
+ p->p_flag &= ~P_BIGLOCK;
+ spinlockmgr(&kernel_lock, LK_RELEASE, 0);
+}
+
+#else
+
struct __mp_lock kernel_lock;
void
@@ -1206,6 +1272,8 @@ _kernel_proc_unlock(struct proc *p)
__mp_unlock(&kernel_lock);
}
+#endif
+
#ifdef MP_LOCKDEBUG
/* CPU-dependent timing, needs this to be settable from ddb. */
int __mp_lock_spinout = 200000000;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index d770a3858af..5f33e7b55a1 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_resource.c,v 1.29 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.30 2005/05/29 03:20:41 deraadt Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
@@ -199,7 +199,7 @@ donice(curp, chgp, n)
return (EACCES);
chgp->p_nice = n;
SCHED_LOCK(s);
- resetpriority(chgp);
+ (void)resetpriority(chgp);
SCHED_UNLOCK(s);
return (0);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index d228b218db6..a8092d7bcb1 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sig.c,v 1.74 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: kern_sig.c,v 1.75 2005/05/29 03:20:41 deraadt Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@@ -805,19 +805,29 @@ trapsignal(p, signum, code, type, sigval)
* regardless of the signal action (eg, blocked or ignored).
*
* Other ignored signals are discarded immediately.
+ *
+ * XXXSMP: Invoked as psignal() or sched_psignal().
*/
void
-psignal(struct proc *p, int signum)
+psignal1(p, signum, dolock)
+ register struct proc *p;
+ register int signum;
+ int dolock; /* XXXSMP: works, but icky */
{
- int s, prop;
- sig_t action;
+ register int s, prop;
+ register sig_t action;
int mask;
#ifdef DIAGNOSTIC
if ((u_int)signum >= NSIG || signum == 0)
panic("psignal signal number");
+
+ /* XXXSMP: works, but icky */
+ if (dolock)
+ SCHED_ASSERT_UNLOCKED();
+ else
+ SCHED_ASSERT_LOCKED();
#endif
- SCHED_ASSERT_UNLOCKED();
/* Ignore signal if we are exiting */
if (p->p_flag & P_WEXIT)
@@ -880,8 +890,10 @@ psignal(struct proc *p, int signum)
*/
if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
return;
+ /* XXXSMP: works, but icky */
+ if (dolock)
+ SCHED_LOCK(s);
- SCHED_LOCK(s);
switch (p->p_stat) {
case SSLEEP:
@@ -922,11 +934,12 @@ psignal(struct proc *p, int signum)
goto out;
p->p_siglist &= ~mask;
p->p_xstat = signum;
- if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) {
- SCHED_UNLOCK(s);
- psignal(p->p_pptr, SIGCHLD);
- SCHED_LOCK(s);
- }
+ if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
+ /*
+ * XXXSMP: recursive call; don't lock
+ * the second time around.
+ */
+ sched_psignal(p->p_pptr, SIGCHLD);
proc_stop(p);
goto out;
}
@@ -963,7 +976,7 @@ psignal(struct proc *p, int signum)
* Otherwise, process goes back to sleep state.
*/
p->p_flag |= P_CONTINUED;
- sched_wakeup(p->p_pptr);
+ wakeup(p->p_pptr);
if (action == SIG_DFL)
p->p_siglist &= ~mask;
if (action == SIG_CATCH)
@@ -1014,7 +1027,9 @@ runfast:
run:
setrunnable(p);
out:
- SCHED_UNLOCK(s);
+ /* XXXSMP: works, but icky */
+ if (dolock)
+ SCHED_UNLOCK(s);
}
/*
@@ -1059,23 +1074,24 @@ issignal(struct proc *p)
*/
p->p_xstat = signum;
+ SCHED_LOCK(s); /* protect mi_switch */
if (p->p_flag & P_FSTRACE) {
#ifdef PROCFS
- SCHED_LOCK(s);
/* procfs debugging */
p->p_stat = SSTOP;
- sched_wakeup(p);
- mi_switch(s);
+ wakeup(p);
+ mi_switch();
#else
panic("procfs debugging");
#endif
} else {
/* ptrace debugging */
psignal(p->p_pptr, SIGCHLD);
- SCHED_LOCK(s);
proc_stop(p);
- mi_switch(s);
+ mi_switch();
}
+ SCHED_ASSERT_UNLOCKED();
+ splx(s);
/*
* If we are no longer being traced, or the parent
@@ -1136,7 +1152,9 @@ issignal(struct proc *p)
psignal(p->p_pptr, SIGCHLD);
SCHED_LOCK(s);
proc_stop(p);
- mi_switch(s);
+ mi_switch();
+ SCHED_ASSERT_UNLOCKED();
+ splx(s);
break;
} else if (prop & SA_IGNORE) {
/*
@@ -1180,13 +1198,16 @@ keep:
* on the run queue.
*/
void
-proc_stop(struct proc *p)
+proc_stop(p)
+ struct proc *p;
{
+#ifdef MULTIPROCESSOR
SCHED_ASSERT_LOCKED();
+#endif
p->p_stat = SSTOP;
p->p_flag &= ~P_WAITED;
- sched_wakeup(p->p_pptr);
+ wakeup(p->p_pptr);
}
/*
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index 9d887083455..16fc2d6f8ae 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_synch.c,v 1.62 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: kern_synch.c,v 1.63 2005/05/29 03:20:41 deraadt Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -145,12 +145,8 @@ ltsleep(ident, priority, wmesg, timo, interlock)
else
*qp->sq_tailp = p;
*(qp->sq_tailp = &p->p_forw) = 0;
-
- p->p_stat = SSLEEP;
-
if (timo)
timeout_add(&p->p_sleep_to, timo);
-
/*
* We can now release the interlock; the scheduler_slock
* is held, so a thread can't get in to do wakeup() before
@@ -174,16 +170,13 @@ ltsleep(ident, priority, wmesg, timo, interlock)
*/
if (catch) {
p->p_flag |= P_SINTR;
- SCHED_UNLOCK(s); /* XXX - must unlock for CURSIG */
if ((sig = CURSIG(p)) != 0) {
- SCHED_LOCK(s);
if (p->p_wchan)
unsleep(p);
p->p_stat = SONPROC;
SCHED_UNLOCK(s);
goto resume;
}
- SCHED_LOCK(s);
if (p->p_wchan == 0) {
catch = 0;
SCHED_UNLOCK(s);
@@ -191,14 +184,22 @@ ltsleep(ident, priority, wmesg, timo, interlock)
}
} else
sig = 0;
+ p->p_stat = SSLEEP;
p->p_stats->p_ru.ru_nvcsw++;
SCHED_ASSERT_LOCKED();
- mi_switch(s);
+ mi_switch();
#ifdef DDB
/* handy breakpoint location after process "wakes" */
__asm(".globl bpendtsleep\nbpendtsleep:");
#endif
+ SCHED_ASSERT_UNLOCKED();
+ /*
+ * Note! this splx belongs to the SCHED_LOCK(s) above, mi_switch
+ * releases the scheduler lock, but does not lower the spl.
+ */
+ splx(s);
+
resume:
#ifdef __HAVE_CPUINFO
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
@@ -269,13 +270,20 @@ endtsleep(arg)
* Remove a process from its wait queue
*/
void
-unsleep(struct proc *p)
+unsleep(p)
+ register struct proc *p;
{
- struct slpque *qp;
- struct proc **hp;
-
- SCHED_ASSERT_LOCKED();
+ register struct slpque *qp;
+ register struct proc **hp;
+#if 0
+ int s;
+ /*
+ * XXX we cannot do recursive SCHED_LOCKing yet. All callers lock
+ * anyhow.
+ */
+ SCHED_LOCK(s);
+#endif
if (p->p_wchan) {
hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
while (*hp != p)
@@ -285,39 +293,24 @@ unsleep(struct proc *p)
qp->sq_tailp = hp;
p->p_wchan = 0;
}
-}
-
-void
-wakeup(void *ident)
-{
- int s;
-
- SCHED_LOCK(s);
- sched_wakeup(ident);
- SCHED_UNLOCK(s);
-}
-
-void
-wakeup_n(void *ident, int n)
-{
- int s;
-
- SCHED_LOCK(s);
- sched_wakeup_n(ident, n);
+#if 0
SCHED_UNLOCK(s);
+#endif
}
/*
* Make all processes sleeping on the specified identifier runnable.
*/
void
-sched_wakeup_n(void *ident, int n)
+wakeup_n(ident, n)
+ void *ident;
+ int n;
{
struct slpque *qp;
struct proc *p, **q;
+ int s;
- SCHED_ASSERT_LOCKED();
-
+ SCHED_LOCK(s);
qp = &slpque[LOOKUP(ident)];
restart:
for (q = &qp->sq_head; (p = *q) != NULL; ) {
@@ -356,7 +349,7 @@ restart:
need_resched(0);
#endif
} else {
- sched_wakeup((caddr_t)&proc0);
+ wakeup((caddr_t)&proc0);
}
/* END INLINE EXPANSION */
@@ -368,4 +361,12 @@ restart:
} else
q = &p->p_forw;
}
+ SCHED_UNLOCK(s);
+}
+
+void
+wakeup(chan)
+ void *chan;
+{
+ wakeup_n(chan, -1);
}
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index d5e29b55362..237b20a2f09 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_time.c,v 1.46 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: kern_time.c,v 1.47 2005/05/29 03:20:41 deraadt Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@@ -560,12 +560,8 @@ sys_setitimer(p, v, retval)
}
p->p_realtimer = aitv;
} else {
- int s;
-
itimerround(&aitv.it_interval);
- s = splclock();
p->p_stats->p_timer[SCARG(uap, which)] = aitv;
- splx(s);
}
return (0);
diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c
index 3bfe54b9bcd..854ff074573 100644
--- a/sys/kern/sched_bsd.c
+++ b/sys/kern/sched_bsd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched_bsd.c,v 1.3 2005/05/26 18:10:40 art Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.4 2005/05/29 03:20:41 deraadt Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -65,9 +65,7 @@ int rrticks_init; /* # of hardclock ticks per roundrobin() */
int whichqs; /* Bit mask summary of non-empty Q's. */
struct prochd qs[NQS];
-#ifdef MULTIPROCESSOR
-struct mutex sched_mutex = MUTEX_INITIALIZER(IPL_SCHED);
-#endif
+struct SIMPLELOCK sched_lock;
void scheduler_start(void);
@@ -262,7 +260,7 @@ schedcpu(arg)
struct timeout *to = (struct timeout *)arg;
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct proc *p;
- int s, t;
+ int s;
unsigned int newcpu;
int phz;
@@ -276,7 +274,6 @@ schedcpu(arg)
KASSERT(phz);
for (p = LIST_FIRST(&allproc); p != 0; p = LIST_NEXT(p, p_list)) {
- SCHED_LOCK(s);
/*
* Increment time in/out of memory and sleep time
* (if sleeping). We ignore overflow; with 16-bit int's
@@ -290,11 +287,9 @@ schedcpu(arg)
* If the process has slept the entire second,
* stop recalculating its priority until it wakes up.
*/
- if (p->p_slptime > 1) {
- SCHED_UNLOCK(s);
+ if (p->p_slptime > 1)
continue;
- }
- t = splstatclock();
+ s = splstatclock(); /* prevent state changes */
/*
* p_pctcpu is only for ps.
*/
@@ -310,7 +305,8 @@ schedcpu(arg)
p->p_cpticks = 0;
newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
p->p_estcpu = newcpu;
- splx(t);
+ splx(s);
+ SCHED_LOCK(s);
resetpriority(p);
if (p->p_priority >= PUSER) {
if ((p != curproc) &&
@@ -359,13 +355,13 @@ updatepri(p)
void
sched_unlock_idle(void)
{
- mtx_leave(&sched_mutex);
+ SIMPLE_UNLOCK(&sched_lock);
}
void
sched_lock_idle(void)
{
- mtx_enter(&sched_mutex);
+ SIMPLE_LOCK(&sched_lock);
}
#endif /* MULTIPROCESSOR || LOCKDEBUG */
@@ -384,7 +380,9 @@ yield()
p->p_stat = SRUN;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
- mi_switch(s);
+ mi_switch();
+ SCHED_ASSERT_UNLOCKED();
+ splx(s);
}
/*
@@ -411,17 +409,19 @@ preempt(newp)
p->p_stat = SRUN;
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
- mi_switch(s);
+ mi_switch();
+ SCHED_ASSERT_UNLOCKED();
+ splx(s);
}
/*
- * Must be called at splsched() or higher.
+ * Must be called at splstatclock() or higher.
*/
void
-mi_switch(int s)
+mi_switch()
{
- struct proc *p = curproc;
+ struct proc *p = curproc; /* XXX */
struct rlimit *rlim;
struct timeval tv;
#if defined(MULTIPROCESSOR)
@@ -433,6 +433,20 @@ mi_switch(int s)
SCHED_ASSERT_LOCKED();
+#if defined(MULTIPROCESSOR)
+ /*
+ * Release the kernel_lock, as we are about to yield the CPU.
+ * The scheduler lock is still held until cpu_switch()
+ * selects a new process and removes it from the run queue.
+ */
+ if (p->p_flag & P_BIGLOCK)
+#ifdef notyet
+ hold_count = spinlock_release_all(&kernel_lock);
+#else
+ hold_count = __mp_release_all(&kernel_lock);
+#endif
+#endif
+
/*
* Compute the amount of time during which the current
* process was running, and add that to its total so far.
@@ -470,7 +484,6 @@ mi_switch(int s)
*/
rlim = &p->p_rlimit[RLIMIT_CPU];
if ((rlim_t)p->p_rtime.tv_sec >= rlim->rlim_cur) {
- SCHED_UNLOCK(s);
if ((rlim_t)p->p_rtime.tv_sec >= rlim->rlim_max) {
psignal(p, SIGKILL);
} else {
@@ -478,23 +491,8 @@ mi_switch(int s)
if (rlim->rlim_cur < rlim->rlim_max)
rlim->rlim_cur += 5;
}
- SCHED_LOCK(s);
}
-#if defined(MULTIPROCESSOR)
- /*
- * Release the kernel_lock, as we are about to yield the CPU.
- * The scheduler lock is still held until cpu_switch()
- * selects a new process and removes it from the run queue.
- */
- if (p->p_flag & P_BIGLOCK)
-#ifdef notyet
- hold_count = spinlock_release_all(&kernel_lock);
-#else
- hold_count = __mp_release_all(&kernel_lock);
-#endif
-#endif
-
/*
* Process is about to yield the CPU; clear the appropriate
* scheduling flags.
@@ -537,9 +535,12 @@ mi_switch(int s)
* we reacquire the interlock.
*/
if (p->p_flag & P_BIGLOCK)
+#ifdef notyet
+ spinlock_acquire_count(&kernel_lock, hold_count);
+#else
__mp_acquire_count(&kernel_lock, hold_count);
#endif
- splx(s);
+#endif
}
/*
@@ -553,6 +554,7 @@ rqinit()
for (i = 0; i < NQS; i++)
qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
+ SIMPLE_LOCK_INIT(&sched_lock);
}
static __inline void
@@ -600,7 +602,8 @@ resched_proc(struct proc *p, u_char pri)
* and awakening the swapper if it isn't in memory.
*/
void
-setrunnable(struct proc *p)
+setrunnable(p)
+ register struct proc *p;
{
SCHED_ASSERT_LOCKED();
@@ -632,7 +635,7 @@ setrunnable(struct proc *p)
updatepri(p);
p->p_slptime = 0;
if ((p->p_flag & P_INMEM) == 0)
- sched_wakeup((caddr_t)&proc0);
+ wakeup((caddr_t)&proc0);
else
resched_proc(p, p->p_priority);
}
@@ -677,10 +680,10 @@ schedclock(p)
{
int s;
- SCHED_LOCK(s);
p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
+ SCHED_LOCK(s);
resetpriority(p);
+ SCHED_UNLOCK(s);
if (p->p_priority >= PUSER)
p->p_priority = p->p_usrpri;
- SCHED_UNLOCK(s);
}
diff --git a/sys/kern/vfs_sync.c b/sys/kern/vfs_sync.c
index 549962182f6..4d74da6a3b2 100644
--- a/sys/kern/vfs_sync.c
+++ b/sys/kern/vfs_sync.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_sync.c,v 1.30 2005/05/25 23:17:47 niklas Exp $ */
+/* $OpenBSD: vfs_sync.c,v 1.31 2005/05/29 03:20:42 deraadt Exp $ */
/*
* Portions of this code are:
@@ -50,7 +50,6 @@
#include <sys/malloc.h>
#include <sys/kernel.h>
-#include <sys/sched.h>
#ifdef FFS_SOFTUPDATES
int softdep_process_worklist(struct mount *);
@@ -245,10 +244,10 @@ speedup_syncer()
{
int s;
- SCHED_LOCK(s);
+ s = splhigh();
if (syncerproc && syncerproc->p_wchan == &lbolt)
setrunnable(syncerproc);
- SCHED_UNLOCK(s);
+ splx(s);
if (rushjob < syncdelay / 2) {
rushjob += 1;
stat_rush_requests += 1;