summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2004-06-09 20:18:29 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2004-06-09 20:18:29 +0000
commita3775629966bec0445340075a4869a0ea66b15f2 (patch)
tree5d5188c5a0c691a1dfd5e9d7e80b7a62514d4e95
parent9637ef0a1e557491619d6e5e20fe656ae1780859 (diff)
Merge in a piece of the SMP branch into HEAD.
Introduce the cpu_info structure, p_cpu field in struct proc and global scheduling context and various changed code to deal with this. At the moment no architecture uses this stuff yet, but it will allow us slow and controlled migration to the new APIs. All new code is ifdef:ed out. ok deraadt@ niklas@
-rw-r--r--sys/kern/init_main.c15
-rw-r--r--sys/kern/kern_clock.c65
-rw-r--r--sys/kern/kern_fork.c6
-rw-r--r--sys/kern/kern_ktrace.c6
-rw-r--r--sys/kern/kern_subr.c7
-rw-r--r--sys/kern/kern_synch.c109
-rw-r--r--sys/kern/kern_sysctl.c6
-rw-r--r--sys/kern/kern_time.c13
-rw-r--r--sys/sys/kernel.h4
-rw-r--r--sys/sys/proc.h53
-rw-r--r--sys/sys/sched.h5
11 files changed, 258 insertions, 31 deletions
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 2ab539d8b8a..55dad5457b4 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: init_main.c,v 1.114 2004/06/08 18:09:31 marc Exp $ */
+/* $OpenBSD: init_main.c,v 1.115 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: init_main.c,v 1.84.4.1 1996/06/02 09:08:06 mrg Exp $ */
/*
@@ -110,7 +110,7 @@ struct pcred cred0;
struct plimit limit0;
struct vmspace vmspace0;
struct sigacts sigacts0;
-#ifndef curproc
+#if !defined(__HAVE_CPUINFO) && !defined(curproc)
struct proc *curproc;
#endif
struct proc *initproc;
@@ -122,7 +122,10 @@ void (*md_diskconf)(void) = NULL;
struct vnode *rootvp, *swapdev_vp;
int boothowto;
struct timeval boottime;
+#ifndef __HAVE_CPUINFO
struct timeval runtime;
+#endif
+
int ncpus = 1;
#if !defined(NO_PROPOLICE)
@@ -195,6 +198,9 @@ main(framep)
* any possible traps/probes to simplify trap processing.
*/
curproc = p = &proc0;
+#ifdef __HAVE_CPUINFO
+ p->p_cpu = curcpu();
+#endif
/*
* Initialize timeouts.
@@ -414,7 +420,12 @@ main(framep)
* from the file system. Reset p->p_rtime as it may have been
* munched in mi_switch() after the time got set.
*/
+#ifdef __HAVE_CPUINFO
+ p->p_stats->p_start = mono_time = boottime = time;
+ p->p_cpu->ci_schedstate.spc_runtime = time;
+#else
p->p_stats->p_start = runtime = mono_time = boottime = time;
+#endif
p->p_rtime.tv_sec = p->p_rtime.tv_usec = 0;
/* Create process 1 (init(8)). */
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index a04640ae7a3..8a34e63d16c 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clock.c,v 1.42 2003/06/02 23:28:05 millert Exp $ */
+/* $OpenBSD: kern_clock.c,v 1.43 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@@ -159,13 +159,15 @@ initclocks()
* The real-time timer, interrupting hz times per second.
*/
void
-hardclock(frame)
- register struct clockframe *frame;
+hardclock(struct clockframe *frame)
{
- register struct proc *p;
- register int delta;
+ struct proc *p;
+ int delta;
extern int tickdelta;
extern long timedelta;
+#ifdef __HAVE_CPUINFO
+ struct cpu_info *ci = curcpu();
+#endif
p = curproc;
if (p) {
@@ -190,6 +192,11 @@ hardclock(frame)
if (stathz == 0)
statclock(frame);
+#ifdef __HAVE_CPUINFO
+ if (--ci->ci_schedstate.spc_rrticks <= 0)
+ roundrobin(ci);
+#endif
+
/*
* Increment the time-of-day. The increment is normally just
* ``tick''. If the machine is one which has a clock frequency
@@ -388,18 +395,40 @@ stopprofclock(p)
* do process and kernel statistics.
*/
void
-statclock(frame)
- register struct clockframe *frame;
+statclock(struct clockframe *frame)
{
#ifdef GPROF
- register struct gmonparam *g;
- register int i;
+ struct gmonparam *g;
+ int i;
#endif
+#ifdef __HAVE_CPUINFO
+ struct cpu_info *ci = curcpu();
+ struct schedstate_percpu *spc = &ci->ci_schedstate;
+#else
static int schedclk;
- register struct proc *p;
+#endif
+ struct proc *p = curproc;
+
+#ifdef __HAVE_CPUINFO
+ /*
+ * Notice changes in divisor frequency, and adjust clock
+ * frequency accordingly.
+ */
+ if (spc->spc_psdiv != psdiv) {
+ spc->spc_psdiv = psdiv;
+ spc->spc_pscnt = psdiv;
+ if (psdiv == 1) {
+ setstatclockrate(stathz);
+ } else {
+ setstatclockrate(profhz);
+ }
+ }
+/* XXX Kludgey */
+#define pscnt spc->spc_pscnt
+#define cp_time spc->spc_cp_time
+#endif
if (CLKF_USERMODE(frame)) {
- p = curproc;
if (p->p_flag & P_PROFIL)
addupc_intr(p, CLKF_PC(frame));
if (--pscnt > 0)
@@ -441,7 +470,6 @@ statclock(frame)
* so that we know how much of its real time was spent
* in ``non-process'' (i.e., interrupt) work.
*/
- p = curproc;
if (CLKF_INTR(frame)) {
if (p != NULL)
p->p_iticks++;
@@ -454,15 +482,26 @@ statclock(frame)
}
pscnt = psdiv;
+#ifdef __HAVE_CPUINFO
+#undef pscnt
+#undef cp_time
+#endif
+
if (p != NULL) {
p->p_cpticks++;
/*
* If no schedclock is provided, call it here at ~~12-25 Hz;
* ~~16 Hz is best
*/
- if (schedhz == 0)
+ if (schedhz == 0) {
+#ifdef __HAVE_CPUINFO
+ if ((++curcpu()->ci_schedstate.spc_schedticks & 3) == 0)
+ schedclock(p);
+#else
if ((++schedclk & 3) == 0)
schedclock(p);
+#endif
+ }
}
}
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index becb63288a8..a103c391634 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_fork.c,v 1.67 2004/06/05 22:38:40 tedu Exp $ */
+/* $OpenBSD: kern_fork.c,v 1.68 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/*
@@ -204,6 +204,10 @@ fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
timeout_set(&p2->p_sleep_to, endtsleep, p2);
timeout_set(&p2->p_realit_to, realitexpire, p2);
+#ifdef __HAVE_CPUINFO
+ p2->p_cpu = NULL;
+#endif
+
/*
* Duplicate sub-structures as needed.
* Increase reference counts on shared objects.
diff --git a/sys/kern/kern_ktrace.c b/sys/kern/kern_ktrace.c
index 0172d28efb2..b811644d403 100644
--- a/sys/kern/kern_ktrace.c
+++ b/sys/kern/kern_ktrace.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_ktrace.c,v 1.31 2003/09/01 18:06:03 henning Exp $ */
+/* $OpenBSD: kern_ktrace.c,v 1.32 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_ktrace.c,v 1.23 1996/02/09 18:59:36 christos Exp $ */
/*
@@ -211,7 +211,11 @@ ktrgenio(p, fd, rw, iov, len, error)
* Don't allow this process to hog the cpu when doing
* huge I/O.
*/
+#ifdef __HAVE_CPUINFO
+ if (curcpu()->ci_schedstate.spc_schedflags & SPCF_SHOULDYIELD)
+#else
if (p->p_schedflags & PSCHED_SHOULDYIELD)
+#endif
preempt(NULL);
count = min(iov->iov_len, buflen);
diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c
index d9c4abba39a..52432e05522 100644
--- a/sys/kern/kern_subr.c
+++ b/sys/kern/kern_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_subr.c,v 1.26 2003/10/31 11:10:41 markus Exp $ */
+/* $OpenBSD: kern_subr.c,v 1.27 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_subr.c,v 1.15 1996/04/09 17:21:56 ragge Exp $ */
/*
@@ -77,7 +77,12 @@ uiomove(cp, n, uio)
switch (uio->uio_segflg) {
case UIO_USERSPACE:
+#ifdef __HAVE_CPUINFO
+ if (curcpu()->ci_schedstate.spc_schedflags &
+ SPCF_SHOULDYIELD)
+#else
if (p->p_schedflags & PSCHED_SHOULDYIELD)
+#endif
preempt(NULL);
if (uio->uio_rw == UIO_READ)
error = copyout(cp, iov->iov_base, cnt);
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index ce05eac5512..baba311f668 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_synch.c,v 1.54 2004/01/26 01:27:02 deraadt Exp $ */
+/* $OpenBSD: kern_synch.c,v 1.55 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -54,15 +54,22 @@
#include <machine/cpu.h>
+#ifndef __HAVE_CPUINFO
u_char curpriority; /* usrpri of curproc */
+#endif
int lbolt; /* once a second sleep address */
+#ifdef __HAVE_CPUINFO
+int rrticks_init; /* # of harclock ticks per roundrobin */
+#endif
int whichqs; /* Bit mask summary of non-empty Q's. */
struct prochd qs[NQS];
void scheduler_start(void);
+#ifndef __HAVE_CPUINFO
void roundrobin(void *);
+#endif
void schedcpu(void *);
void updatepri(struct proc *);
void endtsleep(void *);
@@ -70,7 +77,9 @@ void endtsleep(void *);
void
scheduler_start()
{
+#ifndef __HAVE_CPUINFO
static struct timeout roundrobin_to;
+#endif
static struct timeout schedcpu_to;
/*
@@ -83,17 +92,47 @@ scheduler_start()
timeout_set(&roundrobin_to, roundrobin, &roundrobin_to);
timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
+#ifdef __HAVE_CPUINFO
+ rrticks_init = hz / 10;
+#else
roundrobin(&roundrobin_to);
+#endif
schedcpu(&schedcpu_to);
}
/*
* Force switch among equal priority processes every 100ms.
*/
+#ifdef __HAVE_CPUINFO
+void
+roundrobin(struct cpu_info *ci)
+{
+ struct schedstate_percpu *spc = &ci->ci_schedstate;
+ int s;
+
+ spc->spc_rrticks = rrticks_init;
+
+ if (curproc != NULL) {
+ s = splstatclock();
+ if (spc->spc_schedflags & SPCF_SEENRR) {
+ /*
+ * The process has already been through a roundrobin
+ * without switching and may be hogging the CPU.
+ * Indicate that the process should yield.
+ */
+ spc->spc_schedflags |= SPCF_SHOULDYIELD;
+ } else {
+ spc->spc_schedflags |= SPCF_SEENRR;
+ }
+ splx(s);
+ }
+
+ need_resched(curcpu());
+}
+#else
/* ARGSUSED */
void
-roundrobin(arg)
- void *arg;
+roundrobin(void *arg)
{
struct timeout *to = (struct timeout *)arg;
struct proc *p = curproc;
@@ -116,6 +155,7 @@ roundrobin(arg)
need_resched();
timeout_add(to, hz / 10);
}
+#endif
/*
* Constants for digital decay and forget:
@@ -429,7 +469,11 @@ ltsleep(ident, priority, wmesg, timo, interlock)
__asm(".globl bpendtsleep\nbpendtsleep:");
#endif
resume:
+#ifdef __HAVE_CPUINFO
+ p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
+#else
curpriority = p->p_usrpri;
+#endif
splx(s);
p->p_flag &= ~P_SINTR;
if (p->p_flag & P_TIMEOUT) {
@@ -550,7 +594,11 @@ sleep(ident, priority)
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p, 0, 0);
#endif
+#ifdef __HAVE_CPUINFO
+ p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
+#else
curpriority = p->p_usrpri;
+#endif
splx(s);
}
@@ -619,7 +667,11 @@ restart:
if ((p->p_flag & P_INMEM) != 0) {
setrunqueue(p);
+#ifdef __HAVE_CPUINFO
+ need_resched(p->p_cpu);
+#else
need_resched();
+#endif
} else {
wakeup((caddr_t)&proc0);
}
@@ -698,6 +750,9 @@ mi_switch()
struct proc *p = curproc; /* XXX */
struct rlimit *rlim;
struct timeval tv;
+#ifdef __HAVE_CPUINFO
+ struct schedstate_percpu *spc = &p->p_cpu->ci_schedstate;
+#endif
splassert(IPL_STATCLOCK);
@@ -706,6 +761,19 @@ mi_switch()
* process was running, and add that to its total so far.
*/
microtime(&tv);
+#ifdef __HAVE_CPUINFO
+ if (timercmp(&tv, &spc->spc_runtime, <)) {
+#if 0
+ printf("time is not monotonic! "
+ "tv=%ld.%06ld, runtime=%ld.%06ld\n",
+ tv.tv_sec, tv.tv_usec, spc->spc_runtime.tv_sec,
+ spc->spc_runtime.tv_usec);
+#endif
+ } else {
+ timersub(&tv, &spc->runtime, &tv);
+ timeradd(&p->p_rtime, &tv, &p->p_rtime);
+ }
+#else
if (timercmp(&tv, &runtime, <)) {
#if 0
printf("time is not monotonic! "
@@ -716,6 +784,7 @@ mi_switch()
timersub(&tv, &runtime, &tv);
timeradd(&p->p_rtime, &tv, &p->p_rtime);
}
+#endif
/*
* Check if the process exceeds its cpu resource allocation.
@@ -736,14 +805,24 @@ mi_switch()
* Process is about to yield the CPU; clear the appropriate
* scheduling flags.
*/
+#ifdef __HAVE_CPUINFO
+ spc->spc_schedflags &= ~SPCF_SWITCHCLEAR;
+#else
p->p_schedflags &= ~PSCHED_SWITCHCLEAR;
+#endif
/*
* Pick a new current process and record its start time.
*/
uvmexp.swtch++;
cpu_switch(p);
+
+#ifdef __HAVE_CPUINFO
+ /* p->p_cpu might have changed in cpu_switch() */
+ microtime(&p->p_cpu->ci_schedstate.spc_runtime);
+#else
microtime(&runtime);
+#endif
}
/*
@@ -759,6 +838,23 @@ rqinit()
qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
}
+static __inline void
+resched_proc(struct proc *p, u_char pri)
+{
+#ifdef __HAVE_CPUINFO
+ struct cpu_info *ci;
+#endif
+
+#ifdef __HAVE_CPUINFO
+ ci = (p->p_cpu != NULL) ? p->p_cpu : curcpu();
+ if (pri < ci->ci_schedstate.spc_curpriority)
+ need_resched(ci);
+#else
+ if (pri < curpriority)
+ need_resched();
+#endif
+}
+
/*
* Change process state to be runnable,
* placing it on the run queue if it is in memory,
@@ -800,8 +896,8 @@ setrunnable(p)
p->p_slptime = 0;
if ((p->p_flag & P_INMEM) == 0)
wakeup((caddr_t)&proc0);
- else if (p->p_priority < curpriority)
- need_resched();
+ else
+ resched_proc(p, p->p_priority);
}
/*
@@ -818,8 +914,7 @@ resetpriority(p)
newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
newpriority = min(newpriority, MAXPRI);
p->p_usrpri = newpriority;
- if (newpriority < curpriority)
- need_resched();
+ resched_proc(p, p->p_usrpri);
}
/*
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index 52d16c9d176..a517ba2951c 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sysctl.c,v 1.110 2004/06/08 18:09:31 marc Exp $ */
+/* $OpenBSD: kern_sysctl.c,v 1.111 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@@ -1261,7 +1261,11 @@ fill_kproc2(struct proc *p, struct kinfo_proc2 *ki)
ki->p_stat = p->p_stat;
ki->p_swtime = p->p_swtime;
ki->p_slptime = p->p_slptime;
+#ifdef __HAVE_CPUINFO
+ ki->p_schedflags = 0;
+#else
ki->p_schedflags = p->p_schedflags;
+#endif
ki->p_holdcnt = p->p_holdcnt;
ki->p_priority = p->p_priority;
ki->p_usrpri = p->p_usrpri;
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
index ab9f7f8de73..f40928b2824 100644
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_time.c,v 1.39 2004/02/15 02:34:14 tedu Exp $ */
+/* $OpenBSD: kern_time.c,v 1.40 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $ */
/*
@@ -99,7 +99,18 @@ settime(struct timeval *tv)
timersub(tv, &time, &delta);
time = *tv;
timeradd(&boottime, &delta, &boottime);
+#ifdef __HAVE_CURCPU
+ /*
+ * XXXSMP
+ * This is wrong. We should traverse a list of all
+ * CPUs and add the delta to the runtime of those
+ * CPUs which have a process on them.
+ */
+ timeradd(&curcpu()->ci_schedstate.spc_runtime, &delta,
+ &curcpu()->ci_schedstate.spc_runtime);
+#else
timeradd(&runtime, &delta, &runtime);
+#endif
splx(s);
resettodr();
diff --git a/sys/sys/kernel.h b/sys/sys/kernel.h
index fb4482db610..b3c92ca019b 100644
--- a/sys/sys/kernel.h
+++ b/sys/sys/kernel.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: kernel.h,v 1.8 2003/06/02 23:28:21 millert Exp $ */
+/* $OpenBSD: kernel.h,v 1.9 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: kernel.h,v 1.11 1995/03/03 01:24:16 cgd Exp $ */
/*-
@@ -49,7 +49,9 @@ extern int domainnamelen;
/* 1.2 */
extern volatile struct timeval mono_time;
extern struct timeval boottime;
+#ifndef __HAVE_CURCPU
extern struct timeval runtime;
+#endif
extern volatile struct timeval time;
extern struct timezone tz; /* XXX */
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 592cd9c6e86..c06b14eda43 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: proc.h,v 1.69 2004/04/02 19:08:58 tedu Exp $ */
+/* $OpenBSD: proc.h,v 1.70 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*-
@@ -46,6 +46,49 @@
#include <sys/timeout.h> /* For struct timeout. */
#include <sys/event.h> /* For struct klist */
+#ifdef __HAVE_CPUINFO
+/*
+ * CPU states.
+ * XXX Not really scheduler state, but no other good place to put
+ * it right now, and it really is per-CPU.
+ */
+#define CP_USER 0
+#define CP_NICE 1
+#define CP_SYS 2
+#define CP_INTR 3
+#define CP_IDLE 4
+#define CPUSTATES 5
+
+/*
+ * Per-CPU scheduler state. XXX - this should be in sys/sched.h
+ */
+struct schedstate_percpu {
+ struct timeval spc_runtime; /* time curproc started running */
+ __volatile int spc_schedflags; /* flags; see below */
+ u_int spc_schedticks; /* ticks for schedclock() */
+ u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */
+ u_char spc_curpriority; /* usrpri of curproc */
+ int spc_rrticks; /* ticks until roundrobin() */
+ int spc_pscnt; /* prof/stat counter */
+ int spc_psdiv; /* prof/stat divisor */
+};
+
+/* spc_flags */
+#define SPCF_SEENRR 0x0001 /* process has seen roundrobin() */
+#define SPCF_SHOULDYIELD 0x0002 /* process should yield the CPU */
+#define SPCF_SWITCHCLEAR (SPCF_SEENRR|SPCF_SHOULDYIELD)
+
+/*
+ * These are the fields we require in struct cpu_info that we get from
+ * curcpu():
+ *
+ * struct proc *ci_curproc;
+ * struct schedstate_percpu ci_schedstate;
+ * cpuid_t ci_cpuid;
+ */
+#define curproc curcpu()->ci_curproc
+#endif
+
/*
* One structure allocated per session.
*/
@@ -164,7 +207,11 @@ struct proc {
const char *p_wmesg; /* Reason for sleep. */
u_int p_swtime; /* Time swapped in or out. */
u_int p_slptime; /* Time since last blocked. */
+#ifdef __HAVE_CPUINFO
+ struct cpu_info * __volatile p_cpu;
+#else
int p_schedflags; /* PSCHED_* flags */
+#endif
struct itimerval p_realtimer; /* Alarm timer. */
struct timeout p_realit_to; /* Alarm timeout. */
@@ -275,6 +322,7 @@ struct proc {
#define P_EXITSIG(p) \
(((p)->p_flag & (P_TRACED | P_FSTRACE)) ? SIGCHLD : (p)->p_exitsig)
+#ifndef __HAVE_CPUINFO
/*
* These flags are kept in p_schedflags. p_schedflags may be modified
* only at splstatclock().
@@ -283,6 +331,7 @@ struct proc {
#define PSCHED_SHOULDYIELD 0x0002 /* process should yield */
#define PSCHED_SWITCHCLEAR (PSCHED_SEENRR|PSCHED_SHOULDYIELD)
+#endif
/*
* MOVE TO ucred.h?
@@ -343,7 +392,7 @@ extern u_long pidhash;
extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl;
extern u_long pgrphash;
-#ifndef curproc
+#if !defined(__HAVE_CPUINFO) && !defined(curproc)
extern struct proc *curproc; /* Current running proc. */
#endif
extern struct proc proc0; /* Process slot for swapper. */
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 509952c2d0a..08b1fb49904 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched.h,v 1.4 2003/06/02 23:28:21 millert Exp $ */
+/* $OpenBSD: sched.h,v 1.5 2004/06/09 20:18:28 art Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
@@ -90,6 +90,9 @@ extern int schedhz; /* ideally: 16 */
#ifdef _SYS_PROC_H_
void schedclock(struct proc *p);
+#ifdef __HAVE_CPUINFO
+void roundrobin(struct cpu_info *);
+#endif
static __inline void scheduler_fork_hook(
struct proc *parent, struct proc *child);
static __inline void scheduler_wait_hook(