summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>2004-06-13 21:49:30 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>2004-06-13 21:49:30 +0000
commit2dd254afa61a7c0cc5ae920b463d3d4266852804 (patch)
tree7adbebef3be24ba910fd83ee1ba09e1577ae21a8 /sys/kern
parent4d62e331dcde739b4067d712dd602c0927ce11b3 (diff)
debranch SMP, have fun
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/init_main.c30
-rw-r--r--sys/kern/kern_clock.c19
-rw-r--r--sys/kern/kern_exit.c24
-rw-r--r--sys/kern/kern_fork.c25
-rw-r--r--sys/kern/kern_ktrace.c3
-rw-r--r--sys/kern/kern_lock.c1047
-rw-r--r--sys/kern/kern_proc.c6
-rw-r--r--sys/kern/kern_resource.c6
-rw-r--r--sys/kern/kern_sig.c50
-rw-r--r--sys/kern/kern_subr.c3
-rw-r--r--sys/kern/kern_synch.c269
-rw-r--r--sys/kern/kern_sysctl.c21
-rw-r--r--sys/kern/kern_time.c4
-rw-r--r--sys/kern/subr_pool.c13
-rw-r--r--sys/kern/subr_prf.c53
-rw-r--r--sys/kern/sys_generic.c7
-rw-r--r--sys/kern/sys_process.c6
-rw-r--r--sys/kern/tty.c5
18 files changed, 1277 insertions, 314 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 55dad5457b4..a8d052d0a05 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: init_main.c,v 1.115 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: init_main.c,v 1.116 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: init_main.c,v 1.84.4.1 1996/06/02 09:08:06 mrg Exp $ */
/*
@@ -125,7 +125,6 @@ struct timeval boottime;
#ifndef __HAVE_CPUINFO
struct timeval runtime;
#endif
-
int ncpus = 1;
#if !defined(NO_PROPOLICE)
@@ -188,6 +187,7 @@ main(framep)
int s, i;
register_t rval[2];
extern struct pdevinit pdevinit[];
+ extern struct SIMPLELOCK kprintf_slock;
extern void scheduler_start(void);
extern void disk_init(void);
extern void endtsleep(void *);
@@ -213,8 +213,13 @@ main(framep)
*/
config_init(); /* init autoconfiguration data structures */
consinit();
+
+ SIMPLE_LOCK_INIT(&kprintf_slock);
+
printf("%s\n", copyright);
+ KERNEL_LOCK_INIT();
+
uvm_init();
disk_init(); /* must come before autoconfiguration */
tty_init(); /* initialise tty's */
@@ -270,7 +275,7 @@ main(framep)
session0.s_leader = p;
p->p_flag = P_INMEM | P_SYSTEM | P_NOCLDWAIT;
- p->p_stat = SRUN;
+ p->p_stat = SONPROC;
p->p_nice = NZERO;
p->p_emul = &emul_native;
bcopy("swapper", p->p_comm, sizeof ("swapper"));
@@ -344,6 +349,9 @@ main(framep)
/* Start real time and statistics clocks. */
initclocks();
+ /* Lock the kernel on behalf of proc0. */
+ KERNEL_PROC_LOCK(p);
+
#ifdef SYSVSHM
/* Initialize System V style shared memory. */
shminit();
@@ -413,8 +421,6 @@ main(framep)
VOP_UNLOCK(rootvnode, 0, p);
p->p_fd->fd_rdir = NULL;
- uvm_swap_init();
-
/*
* Now can look at time, having had a chance to verify the time
* from the file system. Reset p->p_rtime as it may have been
@@ -424,10 +430,12 @@ main(framep)
p->p_stats->p_start = mono_time = boottime = time;
p->p_cpu->ci_schedstate.spc_runtime = time;
#else
- p->p_stats->p_start = runtime = mono_time = boottime = time;
+ p->p_stats->p_start = runtime = mono_time = boottime = time;
#endif
p->p_rtime.tv_sec = p->p_rtime.tv_usec = 0;
+ uvm_swap_init();
+
/* Create process 1 (init(8)). */
if (fork1(p, SIGCHLD, FORK_FORK, NULL, 0, start_init, NULL, rval))
panic("fork init");
@@ -465,6 +473,12 @@ main(framep)
srandom((u_long)(rtv.tv_sec ^ rtv.tv_usec));
randompid = 1;
+
+#if defined(MULTIPROCESSOR)
+ /* Boot the secondary processors. */
+ cpu_boot_secondary_processors();
+#endif
+
/* The scheduler is an infinite loop. */
uvm_scheduler();
/* NOTREACHED */
@@ -623,8 +637,10 @@ start_init(arg)
* Now try to exec the program. If can't for any reason
* other than it doesn't exist, complain.
*/
- if ((error = sys_execve(p, &args, retval)) == 0)
+ if ((error = sys_execve(p, &args, retval)) == 0) {
+ KERNEL_PROC_UNLOCK(p);
return;
+ }
if (error != ENOENT)
printf("exec %s: error %d\n", path, error);
}
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 8a34e63d16c..5a3df5ecec4 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clock.c,v 1.43 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_clock.c,v 1.44 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@@ -192,9 +192,16 @@ hardclock(struct clockframe *frame)
if (stathz == 0)
statclock(frame);
-#ifdef __HAVE_CPUINFO
+#if defined(__HAVE_CPUINFO)
if (--ci->ci_schedstate.spc_rrticks <= 0)
roundrobin(ci);
+
+ /*
+ * If we are not the primary CPU, we're not allowed to do
+ * any more work.
+ */
+ if (CPU_IS_PRIMARY(ci) == 0)
+ return;
#endif
/*
@@ -420,9 +427,10 @@ statclock(struct clockframe *frame)
if (psdiv == 1) {
setstatclockrate(stathz);
} else {
- setstatclockrate(profhz);
+ setstatclockrate(profhz);
}
}
+
/* XXX Kludgey */
#define pscnt spc->spc_pscnt
#define cp_time spc->spc_cp_time
@@ -483,7 +491,7 @@ statclock(struct clockframe *frame)
pscnt = psdiv;
#ifdef __HAVE_CPUINFO
-#undef pscnt
+#undef psdiv
#undef cp_time
#endif
@@ -495,7 +503,8 @@ statclock(struct clockframe *frame)
*/
if (schedhz == 0) {
#ifdef __HAVE_CPUINFO
- if ((++curcpu()->ci_schedstate.spc_schedticks & 3) == 0)
+ if ((++curcpu()->ci_schedstate.spc_schedticks & 3) ==
+ 0)
schedclock(p);
#else
if ((++schedclk & 3) == 0)
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 68d77771271..1587724bc08 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_exit.c,v 1.50 2004/05/27 20:48:46 tedu Exp $ */
+/* $OpenBSD: kern_exit.c,v 1.51 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/*
@@ -285,6 +285,9 @@ exit1(p, rv)
limfree(p->p_limit);
p->p_limit = NULL;
+ /* This process no longer needs to hold the kernel lock. */
+ KERNEL_PROC_UNLOCK(p);
+
/*
* If emulation has process exit hook, call it now.
*/
@@ -319,12 +322,15 @@ void
exit2(p)
struct proc *p;
{
+ int s;
- simple_lock(&deadproc_slock);
+ SIMPLE_LOCK(&deadproc_slock);
LIST_INSERT_HEAD(&deadproc, p, p_hash);
- simple_unlock(&deadproc_slock);
+ SIMPLE_UNLOCK(&deadproc_slock);
wakeup(&deadproc);
+
+ SCHED_LOCK(s);
}
/*
@@ -337,19 +343,22 @@ reaper(void)
{
struct proc *p;
+ KERNEL_PROC_UNLOCK(curproc);
+
for (;;) {
- simple_lock(&deadproc_slock);
+ SIMPLE_LOCK(&deadproc_slock);
p = LIST_FIRST(&deadproc);
if (p == NULL) {
/* No work for us; go to sleep until someone exits. */
- simple_unlock(&deadproc_slock);
+ SIMPLE_UNLOCK(&deadproc_slock);
(void) tsleep(&deadproc, PVM, "reaper", 0);
continue;
}
/* Remove us from the deadproc list. */
LIST_REMOVE(p, p_hash);
- simple_unlock(&deadproc_slock);
+ SIMPLE_UNLOCK(&deadproc_slock);
+ KERNEL_PROC_LOCK(curproc);
/*
* Give machine-dependent code a chance to free any
@@ -377,6 +386,9 @@ reaper(void)
/* Noone will wait for us. Just zap the process now */
proc_zap(p);
}
+ /* XXXNJW where should this be with respect to
+ * the wakeup() above? */
+ KERNEL_PROC_UNLOCK(curproc);
}
}
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index a103c391634..e33ea08e005 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_fork.c,v 1.68 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_fork.c,v 1.69 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@@ -204,7 +204,7 @@ fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
timeout_set(&p2->p_sleep_to, endtsleep, p2);
timeout_set(&p2->p_realit_to, realitexpire, p2);
-#ifdef __HAVE_CPUINFO
+#if defined(__HAVE_CPUINFO)
p2->p_cpu = NULL;
#endif
@@ -339,12 +339,12 @@ fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
/*
* Make child runnable, set start time, and add to run queue.
*/
- s = splstatclock();
+ SCHED_LOCK(s);
p2->p_stats->p_start = time;
p2->p_acflag = AFORK;
p2->p_stat = SRUN;
setrunqueue(p2);
- splx(s);
+ SCHED_UNLOCK(s);
/*
* Now can be swapped.
@@ -399,3 +399,20 @@ pidtaken(pid_t pid)
return (1);
return (0);
}
+
+#if defined(MULTIPROCESSOR)
+/*
+ * XXX This is a slight hack to get newly-formed processes to
+ * XXX acquire the kernel lock as soon as they run.
+ */
+void
+proc_trampoline_mp(void)
+{
+ struct proc *p;
+
+ p = curproc;
+
+ SCHED_ASSERT_UNLOCKED();
+ KERNEL_PROC_LOCK(p);
+}
+#endif
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index b811644d403..efb904c589a 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_ktrace.c,v 1.32 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_ktrace.c,v 1.33 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_ktrace.c,v 1.23 1996/02/09 18:59:36 christos Exp $ */
/*
@@ -37,6 +37,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/file.h>
#include <sys/namei.h>
#include <sys/vnode.h>
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d75d09acc94..8ea70f3a097 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_lock.c,v 1.15 2003/06/02 23:28:05 millert Exp $ */
+/* $OpenBSD: kern_lock.c,v 1.16 2004/06/13 21:49:26 niklas Exp $ */
/*
* Copyright (c) 1995
@@ -39,9 +39,20 @@
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/systm.h>
+#include <sys/sched.h>
#include <machine/cpu.h>
+#ifndef spllock
+#define spllock() splhigh()
+#endif
+
+#ifdef MULTIPROCESSOR
+#define CPU_NUMBER() cpu_number()
+#else
+#define CPU_NUMBER() 0
+#endif
+
void record_stacktrace(int *, int);
void playback_stacktrace(int *, int);
@@ -50,67 +61,243 @@ void playback_stacktrace(int *, int);
* Locks provide shared/exclusive sychronization.
*/
-#if 0
-#ifdef DEBUG
-#define COUNT(p, x) if (p) (p)->p_locks += (x)
+/*
+ * Locking primitives implementation.
+ * Locks provide shared/exclusive synchronization.
+ */
+
+#if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
+#if defined(MULTIPROCESSOR) /* { */
+#define COUNT_CPU(cpu_id, x) \
+ curcpu()->ci_spin_locks += (x)
#else
-#define COUNT(p, x)
+u_long spin_locks;
+#define COUNT_CPU(cpu_id, x) spin_locks += (x)
+#endif /* MULTIPROCESSOR */ /* } */
+
+#define COUNT(lkp, p, cpu_id, x) \
+do { \
+ if ((lkp)->lk_flags & LK_SPIN) \
+ COUNT_CPU((cpu_id), (x)); \
+ else \
+ (p)->p_locks += (x); \
+} while (/*CONSTCOND*/0)
+#else
+#define COUNT(lkp, p, cpu_id, x)
+#define COUNT_CPU(cpu_id, x)
+#endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
+
+#ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
+#define SPINLOCK_SPIN_HOOK /* nothing */
#endif
+
+#define INTERLOCK_ACQUIRE(lkp, flags, s) \
+do { \
+ if ((flags) & LK_SPIN) \
+ s = spllock(); \
+ simple_lock(&(lkp)->lk_interlock); \
+} while (/*CONSTCOND*/ 0)
+
+#define INTERLOCK_RELEASE(lkp, flags, s) \
+do { \
+ simple_unlock(&(lkp)->lk_interlock); \
+ if ((flags) & LK_SPIN) \
+ splx(s); \
+} while (/*CONSTCOND*/ 0)
+
+#ifdef DDB /* { */
+#ifdef MULTIPROCESSOR
+int simple_lock_debugger = 1; /* more serious on MP */
+#else
+int simple_lock_debugger = 0;
#endif
+#define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
+#define SLOCK_TRACE() \
+ db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
+ TRUE, 65535, "", lock_printf);
+#else
+#define SLOCK_DEBUGGER() /* nothing */
+#define SLOCK_TRACE() /* nothing */
+#endif /* } */
-#define COUNT(p, x)
+#if defined(LOCKDEBUG)
+#if defined(DDB)
+#define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
+#else
+#define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
+#endif
+
+#define SPINLOCK_SPINCHECK_DECL \
+ /* 32-bits of count -- wrap constitutes a "spinout" */ \
+ uint32_t __spinc = 0
-#if NCPUS > 1
+#define SPINLOCK_SPINCHECK \
+do { \
+ if (++__spinc == 0) { \
+ lock_printf("LK_SPIN spinout, excl %d, share %d\n", \
+ lkp->lk_exclusivecount, lkp->lk_sharecount); \
+ if (lkp->lk_exclusivecount) \
+ lock_printf("held by CPU %lu\n", \
+ (u_long) lkp->lk_cpu); \
+ if (lkp->lk_lock_file) \
+ lock_printf("last locked at %s:%d\n", \
+ lkp->lk_lock_file, lkp->lk_lock_line); \
+ if (lkp->lk_unlock_file) \
+ lock_printf("last unlocked at %s:%d\n", \
+ lkp->lk_unlock_file, lkp->lk_unlock_line); \
+ SLOCK_TRACE(); \
+ SPINLOCK_SPINCHECK_DEBUGGER; \
+ } \
+} while (/*CONSTCOND*/ 0)
+#else
+#define SPINLOCK_SPINCHECK_DECL /* nothing */
+#define SPINLOCK_SPINCHECK /* nothing */
+#endif /* LOCKDEBUG && DDB */
/*
- * For multiprocessor system, try spin lock first.
- *
- * This should be inline expanded below, but we cannot have #if
- * inside a multiline define.
+ * Acquire a resource.
*/
-int lock_wait_time = 100;
-#define PAUSE(lkp, wanted) \
- if (lock_wait_time > 0) { \
- int i; \
+#define ACQUIRE(lkp, error, extflags, drain, wanted) \
+ if ((extflags) & LK_SPIN) { \
+ int interlocked; \
+ SPINLOCK_SPINCHECK_DECL; \
\
- simple_unlock(&lkp->lk_interlock); \
- for (i = lock_wait_time; i > 0; i--) \
- if (!(wanted)) \
- break; \
- simple_lock(&lkp->lk_interlock); \
+ if ((drain) == 0) \
+ (lkp)->lk_waitcount++; \
+ for (interlocked = 1;;) { \
+ SPINLOCK_SPINCHECK; \
+ if (wanted) { \
+ if (interlocked) { \
+ INTERLOCK_RELEASE((lkp), \
+ LK_SPIN, s); \
+ interlocked = 0; \
+ } \
+ SPINLOCK_SPIN_HOOK; \
+ } else if (interlocked) { \
+ break; \
+ } else { \
+ INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
+ interlocked = 1; \
+ } \
} \
- if (!(wanted)) \
- break;
+ if ((drain) == 0) \
+ (lkp)->lk_waitcount--; \
+ KASSERT((wanted) == 0); \
+ error = 0; /* sanity */ \
+ } else { \
+ for (error = 0; wanted; ) { \
+ if ((drain)) \
+ (lkp)->lk_flags |= LK_WAITDRAIN; \
+ else \
+ (lkp)->lk_waitcount++; \
+ /* XXX Cast away volatile. */ \
+ error = ltsleep((drain) ? \
+ (void *)&(lkp)->lk_flags : \
+ (void *)(lkp), (lkp)->lk_prio, \
+ (lkp)->lk_wmesg, (lkp)->lk_timo, \
+ &(lkp)->lk_interlock); \
+ if ((drain) == 0) \
+ (lkp)->lk_waitcount--; \
+ if (error) \
+ break; \
+ if ((extflags) & LK_SLEEPFAIL) { \
+ error = ENOLCK; \
+ break; \
+ } \
+ } \
+ }
-#else /* NCPUS == 1 */
+#define SETHOLDER(lkp, pid, cpu_id) \
+do { \
+ if ((lkp)->lk_flags & LK_SPIN) \
+ (lkp)->lk_cpu = cpu_id; \
+ else \
+ (lkp)->lk_lockholder = pid; \
+} while (/*CONSTCOND*/0)
-/*
- * It is an error to spin on a uniprocessor as nothing will ever cause
- * the simple lock to clear while we are executing.
- */
-#define PAUSE(lkp, wanted)
+#define WEHOLDIT(lkp, pid, cpu_id) \
+ (((lkp)->lk_flags & LK_SPIN) != 0 ? \
+ ((lkp)->lk_cpu == (cpu_id)) : \
+ ((lkp)->lk_lockholder == (pid)))
+
+#define WAKEUP_WAITER(lkp) \
+do { \
+ if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
+ /* XXX Cast away volatile. */ \
+ wakeup((void *)(lkp)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#if defined(LOCKDEBUG) /* { */
+#if defined(MULTIPROCESSOR) /* { */
+struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
-#endif /* NCPUS == 1 */
+#define SPINLOCK_LIST_LOCK() \
+ __cpu_simple_lock(&spinlock_list_slock.lock_data)
+
+#define SPINLOCK_LIST_UNLOCK() \
+ __cpu_simple_unlock(&spinlock_list_slock.lock_data)
+#else
+#define SPINLOCK_LIST_LOCK() /* nothing */
+#define SPINLOCK_LIST_UNLOCK() /* nothing */
+#endif /* MULTIPROCESSOR */ /* } */
+
+TAILQ_HEAD(, lock) spinlock_list =
+ TAILQ_HEAD_INITIALIZER(spinlock_list);
+
+#define HAVEIT(lkp) \
+do { \
+ if ((lkp)->lk_flags & LK_SPIN) { \
+ int s = spllock(); \
+ SPINLOCK_LIST_LOCK(); \
+ /* XXX Cast away volatile. */ \
+ TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
+ lk_list); \
+ SPINLOCK_LIST_UNLOCK(); \
+ splx(s); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define DONTHAVEIT(lkp) \
+do { \
+ if ((lkp)->lk_flags & LK_SPIN) { \
+ int s = spllock(); \
+ SPINLOCK_LIST_LOCK(); \
+ /* XXX Cast away volatile. */ \
+ TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
+ lk_list); \
+ SPINLOCK_LIST_UNLOCK(); \
+ splx(s); \
+ } \
+} while (/*CONSTCOND*/0)
+#else
+#define HAVEIT(lkp) /* nothing */
+
+#define DONTHAVEIT(lkp) /* nothing */
+#endif /* LOCKDEBUG */ /* } */
+
+#if defined(LOCKDEBUG)
/*
- * Acquire a resource.
+ * Lock debug printing routine; can be configured to print to console
+ * or log to syslog.
*/
-#define ACQUIRE(lkp, error, extflags, wanted) \
- PAUSE(lkp, wanted); \
- for (error = 0; wanted; ) { \
- (lkp)->lk_waitcount++; \
- simple_unlock(&(lkp)->lk_interlock); \
- error = tsleep((void *)lkp, (lkp)->lk_prio, \
- (lkp)->lk_wmesg, (lkp)->lk_timo); \
- simple_lock(&(lkp)->lk_interlock); \
- (lkp)->lk_waitcount--; \
- if (error) \
- break; \
- if ((extflags) & LK_SLEEPFAIL) { \
- error = ENOLCK; \
- break; \
- } \
+void
+lock_printf(const char *fmt, ...)
+{
+ char b[150];
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (lock_debug_syslog)
+ vlog(LOG_DEBUG, fmt, ap);
+ else {
+ vsnprintf(b, sizeof(b), fmt, ap);
+ printf_nolog("%s", b);
}
+ va_end(ap);
+}
+#endif /* LOCKDEBUG */
/*
* Initialize a lock; required before use.
@@ -127,10 +314,18 @@ lockinit(lkp, prio, wmesg, timo, flags)
bzero(lkp, sizeof(struct lock));
simple_lock_init(&lkp->lk_interlock);
lkp->lk_flags = flags & LK_EXTFLG_MASK;
- lkp->lk_prio = prio;
- lkp->lk_timo = timo;
- lkp->lk_wmesg = wmesg;
- lkp->lk_lockholder = LK_NOPROC;
+ if (flags & LK_SPIN)
+ lkp->lk_cpu = LK_NOCPU;
+ else {
+ lkp->lk_lockholder = LK_NOPROC;
+ lkp->lk_prio = prio;
+ lkp->lk_timo = timo;
+ }
+ lkp->lk_wmesg = wmesg; /* just a name for spin locks */
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = NULL;
+ lkp->lk_unlock_file = NULL;
+#endif
}
/*
@@ -140,14 +335,14 @@ int
lockstatus(lkp)
struct lock *lkp;
{
- int lock_type = 0;
+ int s = 0, lock_type = 0;
- simple_lock(&lkp->lk_interlock);
+ INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
if (lkp->lk_exclusivecount != 0)
lock_type = LK_EXCLUSIVE;
else if (lkp->lk_sharecount != 0)
lock_type = LK_SHARED;
- simple_unlock(&lkp->lk_interlock);
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
return (lock_type);
}
@@ -168,17 +363,33 @@ lockmgr(lkp, flags, interlkp, p)
int error;
pid_t pid;
int extflags;
+ cpuid_t cpu_id;
+ int s = 0;
error = 0;
- if (p)
- pid = p->p_pid;
- else
- pid = LK_KERNPROC;
- simple_lock(&lkp->lk_interlock);
+
+ INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
if (flags & LK_INTERLOCK)
simple_unlock(interlkp);
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
-#ifdef DIAGNOSTIC
+
+#ifdef DIAGNOSTIC /* { */
+ /*
+ * Don't allow spins on sleep locks and don't allow sleeps
+ * on spin locks.
+ */
+ if ((flags ^ lkp->lk_flags) & LK_SPIN)
+ panic("lockmgr: sleep/spin mismatch");
+#endif /* } */
+
+ if (extflags & LK_SPIN) {
+ pid = LK_KERNPROC;
+ } else {
+ /* Process context required. */
+ pid = p->p_pid;
+ }
+ cpu_id = CPU_NUMBER();
+
/*
* Once a lock has drained, the LK_DRAINING flag is set and an
* exclusive lock is returned. The only valid operation thereafter
@@ -191,12 +402,14 @@ lockmgr(lkp, flags, interlkp, p)
* the lock by specifying LK_REENABLE.
*/
if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
+#ifdef DIAGNOSTIC
if (lkp->lk_flags & LK_DRAINED)
panic("lockmgr: using decommissioned lock");
if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
- lkp->lk_lockholder != pid)
+ WEHOLDIT(lkp, pid, cpu_id) == 0)
panic("lockmgr: non-release on draining lock: %d",
flags & LK_TYPE_MASK);
+#endif /* DIAGNOSTIC */
lkp->lk_flags &= ~LK_DRAINING;
if ((flags & LK_REENABLE) == 0)
lkp->lk_flags |= LK_DRAINED;
@@ -208,12 +421,11 @@ lockmgr(lkp, flags, interlkp, p)
if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
(LK_CANRECURSE|LK_RECURSEFAIL))
panic("lockmgr: make up your mind");
-#endif /* DIAGNOSTIC */
switch (flags & LK_TYPE_MASK) {
case LK_SHARED:
- if (lkp->lk_lockholder != pid) {
+ if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
/*
* If just polling, check to see if we will block.
*/
@@ -225,12 +437,12 @@ lockmgr(lkp, flags, interlkp, p)
/*
* Wait for exclusive locks and upgrades to clear.
*/
- ACQUIRE(lkp, error, extflags, lkp->lk_flags &
+ ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
if (error)
break;
lkp->lk_sharecount++;
- COUNT(p, 1);
+ COUNT(lkp, p, cpu_id, 1);
break;
}
/*
@@ -238,18 +450,24 @@ lockmgr(lkp, flags, interlkp, p)
* An alternative would be to fail with EDEADLK.
*/
lkp->lk_sharecount++;
- COUNT(p, 1);
+ COUNT(lkp, p, cpu_id, 1);
/* fall into downgrade */
case LK_DOWNGRADE:
- if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
+ if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
+ lkp->lk_exclusivecount == 0)
panic("lockmgr: not holding exclusive lock");
lkp->lk_sharecount += lkp->lk_exclusivecount;
lkp->lk_exclusivecount = 0;
+ lkp->lk_recurselevel = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
- lkp->lk_lockholder = LK_NOPROC;
- if (lkp->lk_waitcount)
- wakeup((void *)lkp);
+ SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+#if defined(LOCKDEBUG)
+ lkp->lk_unlock_file = file;
+ lkp->lk_unlock_line = line;
+#endif
+ DONTHAVEIT(lkp);
+ WAKEUP_WAITER(lkp);
break;
case LK_EXCLUPGRADE:
@@ -260,7 +478,7 @@ lockmgr(lkp, flags, interlkp, p)
*/
if (lkp->lk_flags & LK_WANT_UPGRADE) {
lkp->lk_sharecount--;
- COUNT(p, -1);
+ COUNT(lkp, p, cpu_id, -1);
error = EBUSY;
break;
}
@@ -275,10 +493,10 @@ lockmgr(lkp, flags, interlkp, p)
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
- if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
+ if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
panic("lockmgr: upgrade exclusive lock");
lkp->lk_sharecount--;
- COUNT(p, -1);
+ COUNT(lkp, p, cpu_id, -1);
/*
* If we are just polling, check to see if we will block.
*/
@@ -295,16 +513,23 @@ lockmgr(lkp, flags, interlkp, p)
* drop to zero, then take exclusive lock.
*/
lkp->lk_flags |= LK_WANT_UPGRADE;
- ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
+ ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
lkp->lk_flags &= ~LK_WANT_UPGRADE;
if (error)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = pid;
+ SETHOLDER(lkp, pid, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
+ HAVEIT(lkp);
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
- COUNT(p, 1);
+ if (extflags & LK_SETRECURSE)
+ lkp->lk_recurselevel = 1;
+ COUNT(lkp, p, cpu_id, 1);
break;
}
/*
@@ -312,24 +537,28 @@ lockmgr(lkp, flags, interlkp, p)
* lock, awaken upgrade requestor if we are the last shared
* lock, then request an exclusive lock.
*/
- if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
- wakeup((void *)lkp);
+ if (lkp->lk_sharecount == 0)
+ WAKEUP_WAITER(lkp);
/* fall into exclusive request */
case LK_EXCLUSIVE:
- if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
+ if (WEHOLDIT(lkp, pid, cpu_id)) {
/*
- * Recursive lock.
+ * Recursive lock.
*/
- if ((extflags & LK_CANRECURSE) == 0) {
+ if ((extflags & LK_CANRECURSE) == 0 &&
+ lkp->lk_recurselevel == 0) {
if (extflags & LK_RECURSEFAIL) {
error = EDEADLK;
break;
- }
- panic("lockmgr: locking against myself");
+ } else
+ panic("lockmgr: locking against myself");
}
lkp->lk_exclusivecount++;
- COUNT(p, 1);
+ if (extflags & LK_SETRECURSE &&
+ lkp->lk_recurselevel == 0)
+ lkp->lk_recurselevel = lkp->lk_exclusivecount;
+ COUNT(lkp, p, cpu_id, 1);
break;
}
/*
@@ -344,7 +573,7 @@ lockmgr(lkp, flags, interlkp, p)
/*
* Try to acquire the want_exclusive flag.
*/
- ACQUIRE(lkp, error, extflags, lkp->lk_flags &
+ ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL));
if (error)
break;
@@ -352,38 +581,62 @@ lockmgr(lkp, flags, interlkp, p)
/*
* Wait for shared locks and upgrades to finish.
*/
- ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
+ ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
(lkp->lk_flags & LK_WANT_UPGRADE));
lkp->lk_flags &= ~LK_WANT_EXCL;
if (error)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
- lkp->lk_lockholder = pid;
+ SETHOLDER(lkp, pid, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
+ HAVEIT(lkp);
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
- COUNT(p, 1);
+ if (extflags & LK_SETRECURSE)
+ lkp->lk_recurselevel = 1;
+ COUNT(lkp, p, cpu_id, 1);
break;
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
- if (pid != lkp->lk_lockholder)
- panic("lockmgr: pid %d, not %s %d unlocking",
- pid, "exclusive lock holder",
- lkp->lk_lockholder);
+ if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
+ if (lkp->lk_flags & LK_SPIN) {
+ panic("lockmgr: processor %lu, not "
+ "exclusive lock holder %lu "
+ "unlocking", cpu_id, lkp->lk_cpu);
+ } else {
+ panic("lockmgr: pid %d, not "
+ "exclusive lock holder %d "
+ "unlocking", pid,
+ lkp->lk_lockholder);
+ }
+ }
+ if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
+ lkp->lk_recurselevel = 0;
lkp->lk_exclusivecount--;
- COUNT(p, -1);
+ COUNT(lkp, p, cpu_id, -1);
if (lkp->lk_exclusivecount == 0) {
lkp->lk_flags &= ~LK_HAVE_EXCL;
- lkp->lk_lockholder = LK_NOPROC;
+ SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+#if defined(LOCKDEBUG)
+ lkp->lk_unlock_file = file;
+ lkp->lk_unlock_line = line;
+#endif
+ DONTHAVEIT(lkp);
}
} else if (lkp->lk_sharecount != 0) {
lkp->lk_sharecount--;
- COUNT(p, -1);
- } else
- panic("lockmgr: LK_RELEASE of unlocked lock");
- if (lkp->lk_waitcount)
- wakeup((void *)lkp);
+ COUNT(lkp, p, cpu_id, -1);
+ }
+#ifdef DIAGNOSTIC
+ else
+ panic("lockmgr: release of unlocked lock!");
+#endif
+ WAKEUP_WAITER(lkp);
break;
case LK_DRAIN:
@@ -393,7 +646,7 @@ lockmgr(lkp, flags, interlkp, p)
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
- if (lkp->lk_lockholder == pid)
+ if (WEHOLDIT(lkp, pid, cpu_id))
panic("lockmgr: draining against myself");
/*
* If we are just polling, check to see if we will sleep.
@@ -404,66 +657,228 @@ lockmgr(lkp, flags, interlkp, p)
error = EBUSY;
break;
}
- PAUSE(lkp, ((lkp->lk_flags &
+ ACQUIRE(lkp, error, extflags, 1,
+ ((lkp->lk_flags &
(LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
- lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
- for (error = 0; ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
- lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
- lkp->lk_flags |= LK_WAITDRAIN;
- simple_unlock(&lkp->lk_interlock);
- if ((error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
- lkp->lk_wmesg, lkp->lk_timo)) != 0)
- return (error);
- if ((extflags) & LK_SLEEPFAIL)
- return (ENOLCK);
- simple_lock(&lkp->lk_interlock);
- }
+ lkp->lk_sharecount != 0 ||
+ lkp->lk_waitcount != 0));
+ if (error)
+ break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
- lkp->lk_lockholder = pid;
+ SETHOLDER(lkp, pid, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
+ HAVEIT(lkp);
lkp->lk_exclusivecount = 1;
- COUNT(p, 1);
+ /* XXX unlikely that we'd want this */
+ if (extflags & LK_SETRECURSE)
+ lkp->lk_recurselevel = 1;
+ COUNT(lkp, p, cpu_id, 1);
break;
default:
- simple_unlock(&lkp->lk_interlock);
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
panic("lockmgr: unknown locktype request %d",
flags & LK_TYPE_MASK);
/* NOTREACHED */
}
- if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
- lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
+ if ((lkp->lk_flags & (LK_WAITDRAIN | LK_SPIN)) == LK_WAITDRAIN &&
+ ((lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
+ lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
- simple_unlock(&lkp->lk_interlock);
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
return (error);
}
/*
+ * For a recursive spinlock held one or more times by the current CPU,
+ * release all N locks, and return N.
+ * Intended for use in mi_switch() shortly before context switching.
+ */
+
+#ifdef notyet
+int
+#if defined(LOCKDEBUG)
+_spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
+#else
+spinlock_release_all(__volatile struct lock *lkp)
+#endif
+{
+ int s, count;
+ cpuid_t cpu_id;
+
+ KASSERT(lkp->lk_flags & LK_SPIN);
+
+ INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
+
+ cpu_id = CPU_NUMBER();
+ count = lkp->lk_exclusivecount;
+
+ if (count != 0) {
+#ifdef DIAGNOSTIC
+ if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
+ panic("spinlock_release_all: processor %lu, not "
+ "exclusive lock holder %lu "
+ "unlocking", (long)cpu_id, lkp->lk_cpu);
+ }
+#endif
+ lkp->lk_recurselevel = 0;
+ lkp->lk_exclusivecount = 0;
+ COUNT_CPU(cpu_id, -count);
+ lkp->lk_flags &= ~LK_HAVE_EXCL;
+ SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+#if defined(LOCKDEBUG)
+ lkp->lk_unlock_file = file;
+ lkp->lk_unlock_line = line;
+#endif
+ DONTHAVEIT(lkp);
+ }
+#ifdef DIAGNOSTIC
+ else if (lkp->lk_sharecount != 0)
+ panic("spinlock_release_all: release of shared lock!");
+ else
+ panic("spinlock_release_all: release of unlocked lock!");
+#endif
+ INTERLOCK_RELEASE(lkp, LK_SPIN, s);
+
+ return (count);
+}
+#endif
+
+/*
+ * For a recursive spinlock held one or more times by the current CPU,
+ * release all N locks, and return N.
+ * Intended for use in mi_switch() right after resuming execution.
+ */
+
+#ifdef notyet
+void
+#if defined(LOCKDEBUG)
+_spinlock_acquire_count(__volatile struct lock *lkp, int count,
+ const char *file, int line)
+#else
+spinlock_acquire_count(__volatile struct lock *lkp, int count)
+#endif
+{
+ int s, error;
+ cpuid_t cpu_id;
+
+ KASSERT(lkp->lk_flags & LK_SPIN);
+
+ INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
+
+ cpu_id = CPU_NUMBER();
+
+#ifdef DIAGNOSTIC
+ if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
+ panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
+#endif
+ /*
+ * Try to acquire the want_exclusive flag.
+ */
+ ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
+ (LK_HAVE_EXCL | LK_WANT_EXCL));
+ lkp->lk_flags |= LK_WANT_EXCL;
+ /*
+ * Wait for shared locks and upgrades to finish.
+ */
+ ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
+ (lkp->lk_flags & LK_WANT_UPGRADE));
+ lkp->lk_flags &= ~LK_WANT_EXCL;
+ lkp->lk_flags |= LK_HAVE_EXCL;
+ SETHOLDER(lkp, LK_NOPROC, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
+ HAVEIT(lkp);
+ if (lkp->lk_exclusivecount != 0)
+ panic("lockmgr: non-zero exclusive count");
+ lkp->lk_exclusivecount = count;
+ lkp->lk_recurselevel = 1;
+ COUNT_CPU(cpu_id, count);
+
+ INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
+}
+#endif
+
+/*
* Print out information about state of a lock. Used by VOP_PRINT
* routines to display ststus about contained locks.
*/
void
lockmgr_printinfo(lkp)
- struct lock *lkp;
+ __volatile struct lock *lkp;
{
if (lkp->lk_sharecount)
printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
lkp->lk_sharecount);
- else if (lkp->lk_flags & LK_HAVE_EXCL)
- printf(" lock type %s: EXCL (count %d) by pid %d",
- lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
- if (lkp->lk_waitcount > 0)
+ else if (lkp->lk_flags & LK_HAVE_EXCL) {
+ printf(" lock type %s: EXCL (count %d) by ",
+ lkp->lk_wmesg, lkp->lk_exclusivecount);
+ if (lkp->lk_flags & LK_SPIN)
+ printf("processor %lu", lkp->lk_cpu);
+ else
+ printf("pid %d", lkp->lk_lockholder);
+ } else
+ printf(" not locked");
+ if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
printf(" with %d pending", lkp->lk_waitcount);
}
#if defined(LOCKDEBUG)
+TAILQ_HEAD(, simplelock) simplelock_list =
+ TAILQ_HEAD_INITIALIZER(simplelock_list);
+
+#if defined(MULTIPROCESSOR) /* { */
+struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
+
+#define SLOCK_LIST_LOCK() \
+ __cpu_simple_lock(&simplelock_list_slock.lock_data)
+
+#define SLOCK_LIST_UNLOCK() \
+ __cpu_simple_unlock(&simplelock_list_slock.lock_data)
+
+#define SLOCK_COUNT(x) \
+ curcpu()->ci_simple_locks += (x)
+#else
+u_long simple_locks;
+
+#define SLOCK_LIST_LOCK() /* nothing */
+
+#define SLOCK_LIST_UNLOCK() /* nothing */
-int lockdebug_print = 0;
-int lockdebug_debugger = 0;
+#define SLOCK_COUNT(x) simple_locks += (x)
+#endif /* MULTIPROCESSOR */ /* } */
+
+#ifdef MULTIPROCESSOR
+#define SLOCK_MP() lock_printf("on cpu %ld\n", \
+ (u_long) cpu_number())
+#else
+#define SLOCK_MP() /* nothing */
+#endif
+
+#define SLOCK_WHERE(str, alp, id, l) \
+do { \
+ lock_printf("\n"); \
+ lock_printf(str); \
+ lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
+ SLOCK_MP(); \
+ if ((alp)->lock_file != NULL) \
+ lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
+ (alp)->lock_line); \
+ if ((alp)->unlock_file != NULL) \
+ lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
+ (alp)->unlock_line); \
+ SLOCK_TRACE() \
+ SLOCK_DEBUGGER(); \
+} while (/*CONSTCOND*/0)
/*
* Simple lock functions so that the debugger can see from whence
@@ -474,7 +889,16 @@ simple_lock_init(lkp)
struct simplelock *lkp;
{
- lkp->lock_data = SLOCK_UNLOCKED;
+#if defined(MULTIPROCESSOR) /* { */
+ __cpu_simple_lock_init(&alp->lock_data);
+#else
+ alp->lock_data = __SIMPLELOCK_UNLOCKED;
+#endif /* } */
+ alp->lock_file = NULL;
+ alp->lock_line = 0;
+ alp->unlock_file = NULL;
+ alp->unlock_line = 0;
+ alp->lock_holder = LK_NOCPU;
}
void
@@ -483,16 +907,80 @@ _simple_lock(lkp, id, l)
const char *id;
int l;
{
+ cpuid_t cpu_id = CPU_NUMBER();
+ int s;
+
+ s = spllock();
+
+ /*
+ * MULTIPROCESSOR case: This is `safe' since if it's not us, we
+ * don't take any action, and just fall into the normal spin case.
+ */
+ if (alp->lock_data == __SIMPLELOCK_LOCKED) {
+#if defined(MULTIPROCESSOR) /* { */
+ if (alp->lock_holder == cpu_id) {
+ SLOCK_WHERE("simple_lock: locking against myself\n",
+ alp, id, l);
+ goto out;
+ }
+#else
+ SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
+ goto out;
+#endif /* MULTIPROCESSOR */ /* } */
+ }
+
+#if defined(MULTIPROCESSOR) /* { */
+ /* Acquire the lock before modifying any fields. */
+ splx(s);
+ __cpu_simple_lock(&alp->lock_data);
+ s = spllock();
+#else
+ alp->lock_data = __SIMPLELOCK_LOCKED;
+#endif /* } */
- if (lkp->lock_data == SLOCK_LOCKED) {
- if (lockdebug_print)
- printf("%s:%d simple_lock: lock held...\n", id, l);
- if (lockdebug_debugger)
- Debugger();
+ if (alp->lock_holder != LK_NOCPU) {
+ SLOCK_WHERE("simple_lock: uninitialized lock\n",
+ alp, id, l);
}
- lkp->lock_data = SLOCK_LOCKED;
+ alp->lock_file = id;
+ alp->lock_line = l;
+ alp->lock_holder = cpu_id;
+
+ SLOCK_LIST_LOCK();
+ /* XXX Cast away volatile */
+ TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
+ SLOCK_LIST_UNLOCK();
+
+ SLOCK_COUNT(1);
+
+ out:
+ splx(s);
}
+int
+_simple_lock_held(__volatile struct simplelock *alp)
+{
+ cpuid_t cpu_id = CPU_NUMBER();
+ int s, locked = 0;
+
+ s = spllock();
+
+#if defined(MULTIPROCESSOR)
+ if (__cpu_simple_lock_try(&alp->lock_data) == 0)
+ locked = (alp->lock_holder == cpu_id);
+ else
+ __cpu_simple_unlock(&alp->lock_data);
+#else
+ if (alp->lock_data == __SIMPLELOCK_LOCKED) {
+ locked = 1;
+ KASSERT(alp->lock_holder == cpu_id);
+ }
+#endif
+
+ splx(s);
+
+ return (locked);
+}
int
_simple_lock_try(lkp, id, l)
@@ -500,14 +988,50 @@ _simple_lock_try(lkp, id, l)
const char *id;
int l;
{
+ cpuid_t cpu_id = CPU_NUMBER();
+ int s, rv = 0;
+
+ s = spllock();
- if (lkp->lock_data == SLOCK_LOCKED) {
- if (lockdebug_print)
- printf("%s:%d simple_lock: lock held...\n", id, l);
- if (lockdebug_debugger)
- Debugger();
+ /*
+ * MULTIPROCESSOR case: This is `safe' since if it's not us, we
+ * don't take any action.
+ */
+#if defined(MULTIPROCESSOR) /* { */
+ if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
+ if (alp->lock_holder == cpu_id)
+ SLOCK_WHERE("simple_lock_try: locking against myself\n",
+ alp, id, l);
+ goto out;
}
- return lkp->lock_data = SLOCK_LOCKED;
+#else
+ if (alp->lock_data == __SIMPLELOCK_LOCKED) {
+ SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
+ goto out;
+ }
+ alp->lock_data = __SIMPLELOCK_LOCKED;
+#endif /* MULTIPROCESSOR */ /* } */
+
+ /*
+ * At this point, we have acquired the lock.
+ */
+
+ rv = 1;
+
+ alp->lock_file = id;
+ alp->lock_line = l;
+ alp->lock_holder = cpu_id;
+
+ SLOCK_LIST_LOCK();
+ /* XXX Cast away volatile. */
+ TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
+ SLOCK_LIST_UNLOCK();
+
+ SLOCK_COUNT(1);
+
+ out:
+ splx(s);
+ return (rv);
}
void
@@ -516,30 +1040,239 @@ _simple_unlock(lkp, id, l)
const char *id;
int l;
{
+ int s;
- if (lkp->lock_data == SLOCK_UNLOCKED) {
- if (lockdebug_print)
- printf("%s:%d simple_unlock: lock not held...\n",
- id, l);
- if (lockdebug_debugger)
- Debugger();
+ s = spllock();
+
+ /*
+ * MULTIPROCESSOR case: This is `safe' because we think we hold
+ * the lock, and if we don't, we don't take any action.
+ */
+ if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
+ SLOCK_WHERE("simple_unlock: lock not held\n",
+ alp, id, l);
+ goto out;
}
- lkp->lock_data = SLOCK_UNLOCKED;
+
+ SLOCK_LIST_LOCK();
+ TAILQ_REMOVE(&simplelock_list, alp, list);
+ SLOCK_LIST_UNLOCK();
+
+ SLOCK_COUNT(-1);
+
+ alp->list.tqe_next = NULL; /* sanity */
+ alp->list.tqe_prev = NULL; /* sanity */
+
+ alp->unlock_file = id;
+ alp->unlock_line = l;
+
+#if defined(MULTIPROCESSOR) /* { */
+ alp->lock_holder = LK_NOCPU;
+ /* Now that we've modified all fields, release the lock. */
+ __cpu_simple_unlock(&alp->lock_data);
+#else
+ alp->lock_data = __SIMPLELOCK_UNLOCKED;
+ KASSERT(alp->lock_holder == CPU_NUMBER());
+ alp->lock_holder = LK_NOCPU;
+#endif /* } */
+
+ out:
+ splx(s);
}
void
-_simple_lock_assert(lkp, state, id, l)
- __volatile struct simplelock *lkp;
- int state;
- const char *id;
- int l;
+simple_lock_dump(void)
+{
+ struct simplelock *alp;
+ int s;
+
+ s = spllock();
+ SLOCK_LIST_LOCK();
+ lock_printf("all simple locks:\n");
+ TAILQ_FOREACH(alp, &simplelock_list, list) {
+ lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
+ alp->lock_file, alp->lock_line);
+ }
+ SLOCK_LIST_UNLOCK();
+ splx(s);
+}
+
+void
+simple_lock_freecheck(void *start, void *end)
+{
+ struct simplelock *alp;
+ int s;
+
+ s = spllock();
+ SLOCK_LIST_LOCK();
+ TAILQ_FOREACH(alp, &simplelock_list, list) {
+ if ((void *)alp >= start && (void *)alp < end) {
+ lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
+ alp, alp->lock_holder, alp->lock_file,
+ alp->lock_line);
+ SLOCK_DEBUGGER();
+ }
+ }
+ SLOCK_LIST_UNLOCK();
+ splx(s);
+ }
+
+/*
+ * We must be holding exactly one lock: the sched_lock.
+ */
+
+#ifdef notyet
+void
+simple_lock_switchcheck(void)
+{
+
+ simple_lock_only_held(&sched_lock, "switching");
+}
+#endif
+
+void
+simple_lock_only_held(volatile struct simplelock *lp, const char *where)
{
- if (lkp->lock_data != state) {
- if (lockdebug_print)
- printf("%s:%d simple_lock_assert: wrong state: %d",
- id, l, lkp->lock_data);
- if (lockdebug_debugger)
- Debugger();
+ struct simplelock *alp;
+ cpuid_t cpu_id = CPU_NUMBER();
+ int s;
+
+ if (lp) {
+ LOCK_ASSERT(simple_lock_held(lp));
+ }
+ s = spllock();
+ SLOCK_LIST_LOCK();
+ TAILQ_FOREACH(alp, &simplelock_list, list) {
+ if (alp == lp)
+ continue;
+ if (alp->lock_holder == cpu_id)
+ break;
+ }
+ SLOCK_LIST_UNLOCK();
+ splx(s);
+
+ if (alp != NULL) {
+ lock_printf("\n%s with held simple_lock %p "
+ "CPU %lu %s:%d\n",
+ where, alp, alp->lock_holder, alp->lock_file,
+ alp->lock_line);
+ SLOCK_TRACE();
+ SLOCK_DEBUGGER();
}
}
#endif /* LOCKDEBUG */
+
+#if defined(MULTIPROCESSOR)
+/*
+ * Functions for manipulating the kernel_lock. We put them here
+ * so that they show up in profiles.
+ */
+
+/*
+ * XXX Instead of using struct lock for the kernel lock and thus requiring us
+ * XXX to implement simplelocks, causing all sorts of fine-grained locks all
+ * XXX over our tree getting activated consuming both time and potentially
+ * XXX introducing locking protocol bugs.
+ */
+#ifdef notyet
+
+struct lock kernel_lock;
+
+void
+_kernel_lock_init(void)
+{
+ spinlockinit(&kernel_lock, "klock", 0);
+}
+
+/*
+ * Acquire/release the kernel lock. Intended for use in the scheduler
+ * and the lower half of the kernel.
+ */
+void
+_kernel_lock(int flag)
+{
+ SCHED_ASSERT_UNLOCKED();
+ spinlockmgr(&kernel_lock, flag, 0);
+}
+
+void
+_kernel_unlock(void)
+{
+ spinlockmgr(&kernel_lock, LK_RELEASE, 0);
+}
+
+/*
+ * Acquire/release the kernel_lock on behalf of a process. Intended for
+ * use in the top half of the kernel.
+ */
+void
+_kernel_proc_lock(struct proc *p)
+{
+ SCHED_ASSERT_UNLOCKED();
+ spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
+ p->p_flag |= P_BIGLOCK;
+}
+
+void
+_kernel_proc_unlock(struct proc *p)
+{
+ p->p_flag &= ~P_BIGLOCK;
+ spinlockmgr(&kernel_lock, LK_RELEASE, 0);
+}
+
+#else
+
+struct __mp_lock kernel_lock;
+
+void
+_kernel_lock_init(void)
+{
+ __mp_lock_init(&kernel_lock);
+}
+
+/*
+ * Acquire/release the kernel lock. Intended for use in the scheduler
+ * and the lower half of the kernel.
+ */
+
+/* XXX The flag should go, all callers want equal behaviour. */
+void
+_kernel_lock(int flag)
+{
+ SCHED_ASSERT_UNLOCKED();
+ __mp_lock(&kernel_lock);
+}
+
+void
+_kernel_unlock(void)
+{
+ __mp_unlock(&kernel_lock);
+}
+
+/*
+ * Acquire/release the kernel_lock on behalf of a process. Intended for
+ * use in the top half of the kernel.
+ */
+void
+_kernel_proc_lock(struct proc *p)
+{
+ SCHED_ASSERT_UNLOCKED();
+ __mp_lock(&kernel_lock);
+ p->p_flag |= P_BIGLOCK;
+}
+
+void
+_kernel_proc_unlock(struct proc *p)
+{
+ p->p_flag &= ~P_BIGLOCK;
+ __mp_unlock(&kernel_lock);
+}
+
+#endif
+
+#ifdef MP_LOCKDEBUG
+/* CPU-dependent timing, needs this to be settable from ddb. */
+int __mp_lock_spinout = 200000000;
+#endif
+
+#endif /* MULTIPROCESSOR */
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 84519c2b60e..aa7ec306c56 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_proc.c,v 1.18 2004/01/29 17:19:42 millert Exp $ */
+/* $OpenBSD: kern_proc.c,v 1.19 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_proc.c,v 1.14 1996/02/09 18:59:41 christos Exp $ */
/*
@@ -85,7 +85,7 @@ struct pool pcred_pool;
* proclist. Processes on this proclist are also on zombproc;
* we use the p_hash member to linkup to deadproc.
*/
-struct simplelock deadproc_slock;
+struct SIMPLELOCK deadproc_slock;
struct proclist deadproc; /* dead, but not yet undead */
static void orphanpg(struct pgrp *);
@@ -104,7 +104,7 @@ procinit()
LIST_INIT(&zombproc);
LIST_INIT(&deadproc);
- simple_lock_init(&deadproc_slock);
+ SIMPLE_LOCK_INIT(&deadproc_slock);
pidhashtbl = hashinit(maxproc / 4, M_PROC, M_WAITOK, &pidhash);
pgrphashtbl = hashinit(maxproc / 4, M_PROC, M_WAITOK, &pgrphash);
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 1e868518989..6d7af1fd136 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_resource.c,v 1.26 2003/12/11 23:02:30 millert Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.27 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
@@ -44,6 +44,7 @@
#include <sys/resourcevar.h>
#include <sys/pool.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
@@ -184,6 +185,7 @@ donice(curp, chgp, n)
register int n;
{
register struct pcred *pcred = curp->p_cred;
+ int s;
if (pcred->pc_ucred->cr_uid && pcred->p_ruid &&
pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid &&
@@ -197,7 +199,9 @@ donice(curp, chgp, n)
if (n < chgp->p_nice && suser(curp, 0))
return (EACCES);
chgp->p_nice = n;
+ SCHED_LOCK(s);
(void)resetpriority(chgp);
+ SCHED_UNLOCK(s);
return (0);
}
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 494f6878d80..0913d2b2a1f 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sig.c,v 1.70 2004/04/06 17:24:11 mickey Exp $ */
+/* $OpenBSD: kern_sig.c,v 1.71 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/*
@@ -62,6 +62,7 @@
#include <sys/malloc.h>
#include <sys/pool.h>
#include <sys/ptrace.h>
+#include <sys/sched.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
@@ -805,19 +806,30 @@ trapsignal(p, signum, code, type, sigval)
* regardless of the signal action (eg, blocked or ignored).
*
* Other ignored signals are discarded immediately.
+ *
+ * XXXSMP: Invoked as psignal() or sched_psignal().
*/
void
-psignal(p, signum)
+psignal1(p, signum, dolock)
register struct proc *p;
register int signum;
+ int dolock; /* XXXSMP: works, but icky */
{
register int s, prop;
register sig_t action;
int mask;
+#ifdef DIAGNOSTIC
if ((u_int)signum >= NSIG || signum == 0)
panic("psignal signal number");
+ /* XXXSMP: works, but icky */
+ if (dolock)
+ SCHED_ASSERT_UNLOCKED();
+ else
+ SCHED_ASSERT_LOCKED();
+#endif
+
/* Ignore signal if we are exiting */
if (p->p_flag & P_WEXIT)
return;
@@ -879,7 +891,10 @@ psignal(p, signum)
*/
if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP))
return;
- s = splhigh();
+ /* XXXSMP: works, but icky */
+ if (dolock)
+ SCHED_LOCK(s);
+
switch (p->p_stat) {
case SSLEEP:
@@ -921,7 +936,11 @@ psignal(p, signum)
p->p_siglist &= ~mask;
p->p_xstat = signum;
if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
- psignal(p->p_pptr, SIGCHLD);
+ /*
+ * XXXSMP: recursive call; don't lock
+ * the second time around.
+ */
+ sched_psignal(p->p_pptr, SIGCHLD);
proc_stop(p);
goto out;
}
@@ -1009,7 +1028,9 @@ runfast:
run:
setrunnable(p);
out:
- splx(s);
+ /* XXXSMP: works, but icky */
+ if (dolock)
+ SCHED_UNLOCK(s);
}
/*
@@ -1054,7 +1075,7 @@ issignal(struct proc *p)
*/
p->p_xstat = signum;
- s = splstatclock(); /* protect mi_switch */
+ SCHED_LOCK(s); /* protect mi_switch */
if (p->p_flag & P_FSTRACE) {
#ifdef PROCFS
/* procfs debugging */
@@ -1070,6 +1091,7 @@ issignal(struct proc *p)
proc_stop(p);
mi_switch();
}
+ SCHED_ASSERT_UNLOCKED();
splx(s);
/*
@@ -1130,8 +1152,9 @@ issignal(struct proc *p)
if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0)
psignal(p->p_pptr, SIGCHLD);
proc_stop(p);
- s = splstatclock();
+ SCHED_LOCK(s);
mi_switch();
+ SCHED_ASSERT_UNLOCKED();
splx(s);
break;
} else if (prop & SA_IGNORE) {
@@ -1179,6 +1202,9 @@ void
proc_stop(p)
struct proc *p;
{
+#ifdef MULTIPROCESSOR
+ SCHED_ASSERT_LOCKED();
+#endif
p->p_stat = SSTOP;
p->p_flag &= ~P_WAITED;
@@ -1205,6 +1231,9 @@ postsig(signum)
if (signum == 0)
panic("postsig");
#endif
+
+ KERNEL_PROC_LOCK(p);
+
mask = sigmask(signum);
p->p_siglist &= ~mask;
action = ps->ps_sigact[signum];
@@ -1254,7 +1283,11 @@ postsig(signum)
* mask from before the sigpause is what we want
* restored after the signal processing is completed.
*/
+#ifdef MULTIPROCESSOR
+ s = splsched();
+#else
s = splhigh();
+#endif
if (ps->ps_flags & SAS_OLDMASK) {
returnmask = ps->ps_oldmask;
ps->ps_flags &= ~SAS_OLDMASK;
@@ -1279,6 +1312,8 @@ postsig(signum)
(*p->p_emul->e_sendsig)(action, signum, returnmask, code,
type, sigval);
}
+
+ KERNEL_PROC_UNLOCK(p);
}
/*
@@ -1308,7 +1343,6 @@ sigexit(p, signum)
register struct proc *p;
int signum;
{
-
/* Mark process as going away */
p->p_flag |= P_WEXIT;
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index 52432e05522..32e659af713 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_subr.c,v 1.27 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_subr.c,v 1.28 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_subr.c,v 1.15 1996/04/09 17:21:56 ragge Exp $ */
/*
@@ -40,6 +40,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
+#include <sys/sched.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/kernel.h>
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index baba311f668..ab7ca8f7ed3 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_synch.c,v 1.55 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_synch.c,v 1.56 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -59,15 +59,19 @@ u_char curpriority; /* usrpri of curproc */
#endif
int lbolt; /* once a second sleep address */
#ifdef __HAVE_CPUINFO
-int rrticks_init; /* # of harclock ticks per roundrobin */
+int rrticks_init; /* # of hardclock ticks per roundrobin() */
#endif
int whichqs; /* Bit mask summary of non-empty Q's. */
struct prochd qs[NQS];
+struct SIMPLELOCK sched_lock;
+
void scheduler_start(void);
-#ifndef __HAVE_CPUINFO
+#ifdef __HAVE_CPUINFO
+void roundrobin(struct cpu_info *);
+#else
void roundrobin(void *);
#endif
void schedcpu(void *);
@@ -85,11 +89,13 @@ scheduler_start()
/*
* We avoid polluting the global namespace by keeping the scheduler
* timeouts static in this function.
- * We setup the timeouts here and kick roundrobin and schedcpu once to
+ * We setup the timeouts here and kick schedcpu and roundrobin once to
* make them do their job.
*/
- timeout_set(&roundrobin_to, roundrobin, &roundrobin_to);
+#ifndef __HAVE_CPUINFO
+ timeout_set(&roundrobin_to, schedcpu, &roundrobin_to);
+#endif
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
#ifdef __HAVE_CPUINFO
@@ -103,6 +109,7 @@ scheduler_start()
/*
* Force switch among equal priority processes every 100ms.
*/
+/* ARGSUSED */
#ifdef __HAVE_CPUINFO
void
roundrobin(struct cpu_info *ci)
@@ -122,7 +129,7 @@ roundrobin(struct cpu_info *ci)
*/
spc->spc_schedflags |= SPCF_SHOULDYIELD;
} else {
- spc->spc_schedflags |= SPCF_SEENRR;
+ spc->spc_schedflags |= SPCF_SEENRR;
}
splx(s);
}
@@ -130,7 +137,6 @@ roundrobin(struct cpu_info *ci)
need_resched(curcpu());
}
#else
-/* ARGSUSED */
void
roundrobin(void *arg)
{
@@ -152,7 +158,8 @@ roundrobin(void *arg)
}
splx(s);
}
- need_resched();
+
+ need_resched(0);
timeout_add(to, hz / 10);
}
#endif
@@ -298,6 +305,8 @@ schedcpu(arg)
p->p_cpticks = 0;
newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu);
p->p_estcpu = newcpu;
+ splx(s);
+ SCHED_LOCK(s);
resetpriority(p);
if (p->p_priority >= PUSER) {
if ((p != curproc) &&
@@ -310,7 +319,7 @@ schedcpu(arg)
} else
p->p_priority = p->p_usrpri;
}
- splx(s);
+ SCHED_UNLOCK(s);
}
uvm_meter();
wakeup((caddr_t)&lbolt);
@@ -329,6 +338,8 @@ updatepri(p)
register unsigned int newcpu = p->p_estcpu;
register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
+ SCHED_ASSERT_LOCKED();
+
if (p->p_slptime > 5 * loadfac)
p->p_estcpu = 0;
else {
@@ -392,11 +403,6 @@ ltsleep(ident, priority, wmesg, timo, interlock)
int catch = priority & PCATCH;
int relock = (priority & PNORELOCK) == 0;
-#ifdef KTRACE
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p, 1, 0);
-#endif
- s = splhigh();
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration,
@@ -404,16 +410,26 @@ ltsleep(ident, priority, wmesg, timo, interlock)
* don't run any other procs or panic below,
* in case this is the idle process and already asleep.
*/
+ s = splhigh();
splx(safepri);
splx(s);
if (interlock != NULL && relock == 0)
simple_unlock(interlock);
return (0);
}
+
+#ifdef KTRACE
+ if (KTRPOINT(p, KTR_CSW))
+ ktrcsw(p, 1, 0);
+#endif
+
+ SCHED_LOCK(s);
+
#ifdef DIAGNOSTIC
- if (ident == NULL || p->p_stat != SRUN || p->p_back)
+ if (ident == NULL || p->p_stat != SONPROC || p->p_back != NULL)
panic("tsleep");
#endif
+
p->p_wchan = ident;
p->p_wmesg = wmesg;
p->p_slptime = 0;
@@ -452,29 +468,39 @@ ltsleep(ident, priority, wmesg, timo, interlock)
if ((sig = CURSIG(p)) != 0) {
if (p->p_wchan)
unsleep(p);
- p->p_stat = SRUN;
+ p->p_stat = SONPROC;
+ SCHED_UNLOCK(s);
goto resume;
}
if (p->p_wchan == 0) {
catch = 0;
+ SCHED_UNLOCK(s);
goto resume;
}
} else
sig = 0;
p->p_stat = SSLEEP;
p->p_stats->p_ru.ru_nvcsw++;
+ SCHED_ASSERT_LOCKED();
mi_switch();
#ifdef DDB
/* handy breakpoint location after process "wakes" */
__asm(".globl bpendtsleep\nbpendtsleep:");
#endif
+
+ SCHED_ASSERT_UNLOCKED();
+ /*
+ * Note! this splx belongs to the SCHED_LOCK(s) above, mi_switch
+ * releases the scheduler lock, but does not lower the spl.
+ */
+ splx(s);
+
resume:
#ifdef __HAVE_CPUINFO
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
#else
curpriority = p->p_usrpri;
#endif
- splx(s);
p->p_flag &= ~P_SINTR;
if (p->p_flag & P_TIMEOUT) {
p->p_flag &= ~P_TIMEOUT;
@@ -504,6 +530,7 @@ resume:
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p, 0, 0);
#endif
+
if (interlock != NULL && relock)
simple_lock(interlock);
return (0);
@@ -523,7 +550,7 @@ endtsleep(arg)
int s;
p = (struct proc *)arg;
- s = splhigh();
+ SCHED_LOCK(s);
if (p->p_wchan) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@@ -531,75 +558,7 @@ endtsleep(arg)
unsleep(p);
p->p_flag |= P_TIMEOUT;
}
- splx(s);
-}
-
-/*
- * Short-term, non-interruptable sleep.
- */
-void
-sleep(ident, priority)
- void *ident;
- int priority;
-{
- register struct proc *p = curproc;
- register struct slpque *qp;
- register int s;
-
-#ifdef DIAGNOSTIC
- if (priority > PZERO) {
- printf("sleep called with priority %d > PZERO, wchan: %p\n",
- priority, ident);
- panic("old sleep");
- }
-#endif
- s = splhigh();
- if (cold || panicstr) {
- /*
- * After a panic, or during autoconfiguration,
- * just give interrupts a chance, then just return;
- * don't run any other procs or panic below,
- * in case this is the idle process and already asleep.
- */
- splx(safepri);
- splx(s);
- return;
- }
-#ifdef DIAGNOSTIC
- if (ident == NULL || p->p_stat != SRUN || p->p_back)
- panic("sleep");
-#endif
- p->p_wchan = ident;
- p->p_wmesg = NULL;
- p->p_slptime = 0;
- p->p_priority = priority;
- qp = &slpque[LOOKUP(ident)];
- if (qp->sq_head == 0)
- qp->sq_head = p;
- else
- *qp->sq_tailp = p;
- *(qp->sq_tailp = &p->p_forw) = 0;
- p->p_stat = SSLEEP;
- p->p_stats->p_ru.ru_nvcsw++;
-#ifdef KTRACE
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p, 1, 0);
-#endif
- mi_switch();
-#ifdef DDB
- /* handy breakpoint location after process "wakes" */
- __asm(".globl bpendsleep\nbpendsleep:");
-#endif
-#ifdef KTRACE
- if (KTRPOINT(p, KTR_CSW))
- ktrcsw(p, 0, 0);
-#endif
-#ifdef __HAVE_CPUINFO
- p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
-#else
- curpriority = p->p_usrpri;
-#endif
- splx(s);
+ SCHED_UNLOCK(s);
}
/*
@@ -611,9 +570,15 @@ unsleep(p)
{
register struct slpque *qp;
register struct proc **hp;
+#if 0
int s;
- s = splhigh();
+ /*
+ * XXX we cannot do recursive SCHED_LOCKing yet. All callers lock
+ * anyhow.
+ */
+ SCHED_LOCK(s);
+#endif
if (p->p_wchan) {
hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
while (*hp != p)
@@ -623,9 +588,25 @@ unsleep(p)
qp->sq_tailp = hp;
p->p_wchan = 0;
}
- splx(s);
+#if 0
+ SCHED_UNLOCK(s);
+#endif
+}
+
+#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
+void
+sched_unlock_idle(void)
+{
+ SIMPLE_UNLOCK(&sched_lock);
}
+void
+sched_lock_idle(void)
+{
+ SIMPLE_LOCK(&sched_lock);
+}
+#endif /* MULTIPROCESSOR || LOCKDEBUG */
+
/*
* Make all processes sleeping on the specified identifier runnable.
*/
@@ -638,7 +619,7 @@ wakeup_n(ident, n)
struct proc *p, **q;
int s;
- s = splhigh();
+ SCHED_LOCK(s);
qp = &slpque[LOOKUP(ident)];
restart:
for (q = &qp->sq_head; (p = *q) != NULL; ) {
@@ -662,15 +643,19 @@ restart:
/*
* Since curpriority is a user priority,
* p->p_priority is always better than
- * curpriority.
+ * curpriority on the last CPU on
+ * which it ran.
+ *
+ * XXXSMP See affinity comment in
+ * resched_proc().
*/
-
if ((p->p_flag & P_INMEM) != 0) {
setrunqueue(p);
#ifdef __HAVE_CPUINFO
+ KASSERT(p->p_cpu != NULL);
need_resched(p->p_cpu);
#else
- need_resched();
+ need_resched(0);
#endif
} else {
wakeup((caddr_t)&proc0);
@@ -685,7 +670,7 @@ restart:
} else
q = &p->p_forw;
}
- splx(s);
+ SCHED_UNLOCK(s);
}
void
@@ -705,11 +690,12 @@ yield()
struct proc *p = curproc;
int s;
- s = splstatclock();
+ SCHED_LOCK(s);
p->p_priority = p->p_usrpri;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
+ SCHED_ASSERT_UNLOCKED();
splx(s);
}
@@ -732,11 +718,13 @@ preempt(newp)
if (newp != NULL)
panic("preempt: cpu_preempt not yet implemented");
- s = splstatclock();
+ SCHED_LOCK(s);
p->p_priority = p->p_usrpri;
+ p->p_stat = SRUN;
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
+ SCHED_ASSERT_UNLOCKED();
splx(s);
}
@@ -750,11 +738,28 @@ mi_switch()
struct proc *p = curproc; /* XXX */
struct rlimit *rlim;
struct timeval tv;
+#if defined(MULTIPROCESSOR)
+ int hold_count;
+#endif
#ifdef __HAVE_CPUINFO
struct schedstate_percpu *spc = &p->p_cpu->ci_schedstate;
#endif
- splassert(IPL_STATCLOCK);
+ SCHED_ASSERT_LOCKED();
+
+#if defined(MULTIPROCESSOR)
+ /*
+ * Release the kernel_lock, as we are about to yield the CPU.
+ * The scheduler lock is still held until cpu_switch()
+ * selects a new process and removes it from the run queue.
+ */
+ if (p->p_flag & P_BIGLOCK)
+#ifdef notyet
+ hold_count = spinlock_release_all(&kernel_lock);
+#else
+ hold_count = __mp_release_all(&kernel_lock);
+#endif
+#endif
/*
* Compute the amount of time during which the current
@@ -765,19 +770,19 @@ mi_switch()
if (timercmp(&tv, &spc->spc_runtime, <)) {
#if 0
printf("time is not monotonic! "
- "tv=%ld.%06ld, runtime=%ld.%06ld\n",
+ "tv=%lu.%06lu, runtime=%lu.%06lu\n",
tv.tv_sec, tv.tv_usec, spc->spc_runtime.tv_sec,
spc->spc_runtime.tv_usec);
#endif
} else {
- timersub(&tv, &spc->runtime, &tv);
+ timersub(&tv, &spc->spc_runtime, &tv);
timeradd(&p->p_rtime, &tv, &p->p_rtime);
}
#else
if (timercmp(&tv, &runtime, <)) {
#if 0
printf("time is not monotonic! "
- "tv=%ld.%06ld, runtime=%ld.%06ld\n",
+ "tv=%lu.%06lu, runtime=%lu.%06lu\n",
tv.tv_sec, tv.tv_usec, runtime.tv_sec, runtime.tv_usec);
#endif
} else {
@@ -817,12 +822,38 @@ mi_switch()
uvmexp.swtch++;
cpu_switch(p);
+ /*
+ * Make sure that MD code released the scheduler lock before
+ * resuming us.
+ */
+ SCHED_ASSERT_UNLOCKED();
+
+ /*
+ * We're running again; record our new start time. We might
+ * be running on a new CPU now, so don't use the cache'd
+ * schedstate_percpu pointer.
+ */
#ifdef __HAVE_CPUINFO
- /* p->p_cpu might have changed in cpu_switch() */
+ KDASSERT(p->p_cpu != NULL);
+ KDASSERT(p->p_cpu == curcpu());
microtime(&p->p_cpu->ci_schedstate.spc_runtime);
#else
microtime(&runtime);
#endif
+
+#if defined(MULTIPROCESSOR)
+ /*
+ * Reacquire the kernel_lock now. We do this after we've
+ * released the scheduler lock to avoid deadlock, and before
+ * we reacquire the interlock.
+ */
+ if (p->p_flag & P_BIGLOCK)
+#ifdef notyet
+ spinlock_acquire_count(&kernel_lock, hold_count);
+#else
+ __mp_acquire_count(&kernel_lock, hold_count);
+#endif
+#endif
}
/*
@@ -836,6 +867,7 @@ rqinit()
for (i = 0; i < NQS; i++)
qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
+ SIMPLE_LOCK_INIT(&sched_lock);
}
static __inline void
@@ -845,13 +877,35 @@ resched_proc(struct proc *p, u_char pri)
struct cpu_info *ci;
#endif
+ /*
+ * XXXSMP
+ * Since p->p_cpu persists across a context switch,
+ * this gives us *very weak* processor affinity, in
+ * that we notify the CPU on which the process last
+ * ran that it should try to switch.
+ *
+ * This does not guarantee that the process will run on
+ * that processor next, because another processor might
+ * grab it the next time it performs a context switch.
+ *
+ * This also does not handle the case where its last
+ * CPU is running a higher-priority process, but every
+ * other CPU is running a lower-priority process. There
+ * are ways to handle this situation, but they're not
+ * currently very pretty, and we also need to weigh the
+ * cost of moving a process from one CPU to another.
+ *
+ * XXXSMP
+ * There is also the issue of locking the other CPU's
+ * sched state, which we currently do not do.
+ */
#ifdef __HAVE_CPUINFO
ci = (p->p_cpu != NULL) ? p->p_cpu : curcpu();
if (pri < ci->ci_schedstate.spc_curpriority)
need_resched(ci);
#else
if (pri < curpriority)
- need_resched();
+ need_resched(0);
#endif
}
@@ -864,12 +918,12 @@ void
setrunnable(p)
register struct proc *p;
{
- register int s;
+ SCHED_ASSERT_LOCKED();
- s = splhigh();
switch (p->p_stat) {
case 0:
case SRUN:
+ case SONPROC:
case SZOMB:
case SDEAD:
default:
@@ -890,7 +944,6 @@ setrunnable(p)
p->p_stat = SRUN;
if (p->p_flag & P_INMEM)
setrunqueue(p);
- splx(s);
if (p->p_slptime > 1)
updatepri(p);
p->p_slptime = 0;
@@ -911,6 +964,8 @@ resetpriority(p)
{
register unsigned int newpriority;
+ SCHED_ASSERT_LOCKED();
+
newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
newpriority = min(newpriority, MAXPRI);
p->p_usrpri = newpriority;
@@ -936,8 +991,12 @@ void
schedclock(p)
struct proc *p;
{
+ int s;
+
p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
+ SCHED_LOCK(s);
resetpriority(p);
+ SCHED_UNLOCK(s);
if (p->p_priority >= PUSER)
p->p_priority = p->p_usrpri;
}
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index a517ba2951c..603a354b76b 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sysctl.c,v 1.111 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_sysctl.c,v 1.112 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@@ -429,6 +429,20 @@ kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
return (sysctl_malloc(name + 1, namelen - 1, oldp, oldlenp,
newp, newlen, p));
case KERN_CPTIME:
+#ifdef MULTIPROCESSOR
+ {
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ int i;
+
+ bzero(cp_time, sizeof(cp_time));
+
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ for (i = 0; i < CPUSTATES; i++)
+ cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
+ }
+ }
+#endif
return (sysctl_rdstruct(oldp, oldlenp, newp, &cp_time,
sizeof(cp_time)));
case KERN_NCHSTATS:
@@ -1317,6 +1331,11 @@ fill_kproc2(struct proc *p, struct kinfo_proc2 *ki)
&p->p_stats->p_cru.ru_stime, &ut);
ki->p_uctime_sec = ut.tv_sec;
ki->p_uctime_usec = ut.tv_usec;
+ ki->p_cpuid = KI_NOCPU;
+#ifdef MULTIPROCESSOR
+ if (p->p_cpu != NULL)
+ ki->p_cpuid = p->p_cpu->ci_cpuid;
+#endif
PRELE(p);
}
}
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index f40928b2824..1ed5f182d36 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_time.c,v 1.40 2004/06/09 20:18:28 art Exp $ */
+/* $OpenBSD: kern_time.c,v 1.41 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@@ -99,7 +99,7 @@ settime(struct timeval *tv)
timersub(tv, &time, &delta);
time = *tv;
timeradd(&boottime, &delta, &boottime);
-#ifdef __HAVE_CURCPU
+#ifdef __HAVE_CPUINFO
/*
* XXXSMP
* This is wrong. We should traverse a list of all
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index e9114b34990..db01a8a868c 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.41 2004/06/02 22:17:22 tedu Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.42 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -54,7 +54,6 @@
/*
* XXX - for now.
*/
-#define SIMPLELOCK_INITIALIZER { SLOCK_UNLOCKED }
#ifdef LOCKDEBUG
#define simple_lock_freecheck(a, s) do { /* nothing */ } while (0)
#define simple_lock_only_held(lkp, str) do { /* nothing */ } while (0)
@@ -86,7 +85,7 @@ int pool_inactive_time = 10;
static struct pool *drainpp;
/* This spin lock protects both pool_head and drainpp. */
-struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
+struct simplelock pool_head_slock;
struct pool_item_header {
/* Page headers */
@@ -529,6 +528,8 @@ pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
0, "pcgpool", NULL);
}
+ simple_lock_init(&pool_head_slock);
+
/* Insert this into the list of all pools. */
simple_lock(&pool_head_slock);
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist);
@@ -2062,9 +2063,9 @@ pool_allocator_drain(struct pool_allocator *pa, struct pool *org, int need)
TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
if (pp == org)
continue;
- simple_unlock(&pa->pa_list);
- freed = pool_reclaim(pp)
- simple_lock(&pa->pa_list);
+ simple_unlock(&pa->pa_slock);
+ freed = pool_reclaim(pp);
+ simple_lock(&pa->pa_slock);
} while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && (freed < need));
if (!freed) {
diff --git a/sys/kern/subr_prf.c b/sys/kern/subr_prf.c
index 906ce028770..a843c2f1dc2 100644
--- a/sys/kern/subr_prf.c
+++ b/sys/kern/subr_prf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_prf.c,v 1.58 2004/01/03 14:08:53 espie Exp $ */
+/* $OpenBSD: subr_prf.c,v 1.59 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: subr_prf.c,v 1.45 1997/10/24 18:14:25 chuck Exp $ */
/*-
@@ -47,6 +47,7 @@
#include <sys/ioctl.h>
#include <sys/vnode.h>
#include <sys/file.h>
+#include <sys/simplelock.h>
#include <sys/tty.h>
#include <sys/tprintf.h>
#include <sys/syslog.h>
@@ -97,6 +98,50 @@ extern int uvm_doswapencrypt;
int kprintf(const char *, int, void *, char *, va_list);
void kputchar(int, int, struct tty *);
+#ifdef MULTIPROCESSOR
+
+#ifdef notdef
+
+struct simplelock kprintf_slock;
+
+#define KPRINTF_MUTEX_ENTER(s) \
+do { \
+ (s) = splhigh(); \
+ simple_lock(&kprintf_slock); \
+} while (/*CONSTCOND*/0)
+
+#define KPRINTF_MUTEX_EXIT(s) \
+do { \
+ simple_unlock(&kprintf_slock); \
+ splx((s)); \
+} while (/*CONSTCOND*/0)
+
+#else
+
+struct __mp_lock kprintf_slock;
+
+#define KPRINTF_MUTEX_ENTER(s) \
+do { \
+ (s) = splhigh(); \
+ __mp_lock(&kprintf_slock); \
+} while (/*CONSTCOND*/0)
+
+#define KPRINTF_MUTEX_EXIT(s) \
+do { \
+ __mp_unlock(&kprintf_slock); \
+ splx((s)); \
+} while (/*CONSTCOND*/0)
+
+#endif
+
+#else
+
+struct simplelock kprintf_slock;
+#define KPRINTF_MUTEX_ENTER(s) (s) = splhigh()
+#define KPRINTF_MUTEX_EXIT(s) splx((s))
+
+#endif /* MULTIPROCESSOR */
+
/*
* globals
*/
@@ -506,6 +551,9 @@ printf(const char *fmt, ...)
{
va_list ap;
int savintr, retval;
+ int s;
+
+ KPRINTF_MUTEX_ENTER(s);
savintr = consintr; /* disable interrupts */
consintr = 0;
@@ -515,6 +563,9 @@ printf(const char *fmt, ...)
if (!panicstr)
logwakeup();
consintr = savintr; /* reenable interrupts */
+
+ KPRINTF_MUTEX_EXIT(s);
+
return(retval);
}
diff --git a/sys/kern/sys_generic.c b/sys/kern/sys_generic.c
index a6fdd09c95e..c512b9a40dd 100644
--- a/sys/kern/sys_generic.c
+++ b/sys/kern/sys_generic.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sys_generic.c,v 1.47 2003/12/10 23:10:08 millert Exp $ */
+/* $OpenBSD: sys_generic.c,v 1.48 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: sys_generic.c,v 1.24 1996/03/29 00:25:32 cgd Exp $ */
/*
@@ -55,6 +55,7 @@
#ifdef KTRACE
#include <sys/ktrace.h>
#endif
+#include <sys/sched.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
@@ -852,7 +853,7 @@ selwakeup(sip)
p = pfind(sip->si_selpid);
sip->si_selpid = 0;
if (p != NULL) {
- s = splhigh();
+ SCHED_LOCK(s);
if (p->p_wchan == (caddr_t)&selwait) {
if (p->p_stat == SSLEEP)
setrunnable(p);
@@ -860,7 +861,7 @@ selwakeup(sip)
unsleep(p);
} else if (p->p_flag & P_SELECT)
p->p_flag &= ~P_SELECT;
- splx(s);
+ SCHED_UNLOCK(s);
}
}
diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c
index 37664becb7d..21180cda141 100644
--- a/sys/kern/sys_process.c
+++ b/sys/kern/sys_process.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sys_process.c,v 1.27 2004/02/08 00:04:21 deraadt Exp $ */
+/* $OpenBSD: sys_process.c,v 1.28 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: sys_process.c,v 1.55 1996/05/15 06:17:47 tls Exp $ */
/*-
@@ -57,6 +57,7 @@
#include <sys/ptrace.h>
#include <sys/uio.h>
#include <sys/user.h>
+#include <sys/sched.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
@@ -91,6 +92,7 @@ sys_ptrace(p, v, retval)
#endif
int error, write;
int temp;
+ int s;
/* "A foolish consistency..." XXX */
if (SCARG(uap, req) == PT_TRACE_ME)
@@ -353,7 +355,9 @@ sys_ptrace(p, v, retval)
/* Finally, deliver the requested signal (or none). */
if (t->p_stat == SSTOP) {
t->p_xstat = SCARG(uap, data);
+ SCHED_LOCK(s);
setrunnable(t);
+ SCHED_UNLOCK(s);
} else {
if (SCARG(uap, data) != 0)
psignal(t, SCARG(uap, data));
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index 0aca0394f7a..6dec67d20fe 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: tty.c,v 1.64 2004/03/19 19:03:07 deraadt Exp $ */
+/* $OpenBSD: tty.c,v 1.65 2004/06/13 21:49:26 niklas Exp $ */
/* $NetBSD: tty.c,v 1.68.4.2 1996/06/06 16:04:52 thorpej Exp $ */
/*-
@@ -2098,7 +2098,8 @@ ttyinfo(tp)
pick = p;
ttyprintf(tp, " cmd: %s %d [%s] ", pick->p_comm, pick->p_pid,
- pick->p_stat == SRUN ? "running" :
+ pick->p_stat == SONPROC ? "running" :
+ pick->p_stat == SRUN ? "runnable" :
pick->p_wmesg ? pick->p_wmesg : "iowait");
calcru(pick, &utime, &stime, NULL);