summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2007-10-10 15:53:54 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2007-10-10 15:53:54 +0000
commite51062c8cca21a333603b567563e3b84f74ddac0 (patch)
treedccf12b7d5ef806260203fe60b2bcaf94260c651 /sys/arch
parent34c540de32da6090afdcdd6fee481f9a2df345fd (diff)
Make context switching much more MI:
- Move the functionality of choosing a process from cpu_switch into a much simpler function: cpu_switchto. Instead of having the locore code walk the run queues, let the MI code choose the process we want to run and only implement the context switching itself in MD code. - Let MD context switching run without worrying about spls or locks. - Instead of having the idle loop implemented with special contexts in MD code, implement one idle proc for each cpu. make the idle loop MI with MD hooks. - Change the proc lists from the old style vax queues to TAILQs. - Change the sleep queue from vax queues to TAILQs. This makes wakeup() go from O(n^2) to O(n) there will be some MD fallout, but it will be fixed shortly. There's also a few cleanups to be done after this. deraadt@, kettenis@ ok
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/alpha/alpha/genassym.cf7
-rw-r--r--sys/arch/alpha/alpha/locore.s195
-rw-r--r--sys/arch/alpha/alpha/machdep.c62
-rw-r--r--sys/arch/alpha/alpha/vm_machdep.c6
-rw-r--r--sys/arch/amd64/amd64/cpu.c6
-rw-r--r--sys/arch/amd64/amd64/genassym.cf4
-rw-r--r--sys/arch/amd64/amd64/locore.S277
-rw-r--r--sys/arch/amd64/amd64/mptramp.S5
-rw-r--r--sys/arch/amd64/amd64/vm_machdep.c10
-rw-r--r--sys/arch/amd64/conf/files.amd643
-rw-r--r--sys/arch/arm/arm/cpuswitch.S697
-rw-r--r--sys/arch/arm/arm/genassym.cf8
-rw-r--r--sys/arch/arm/arm/pmap.c9
-rw-r--r--sys/arch/arm/arm/vm_machdep.c42
-rw-r--r--sys/arch/arm/include/pmap.h9
-rw-r--r--sys/arch/aviion/aviion/locore.S10
-rw-r--r--sys/arch/aviion/aviion/machdep.c13
-rw-r--r--sys/arch/hp300/hp300/locore.s99
-rw-r--r--sys/arch/hp300/hp300/vm_machdep.c8
-rw-r--r--sys/arch/hppa/hppa/genassym.cf4
-rw-r--r--sys/arch/hppa/hppa/locore.S255
-rw-r--r--sys/arch/hppa/hppa/vm_machdep.c9
-rw-r--r--sys/arch/hppa/include/cpu.h3
-rw-r--r--sys/arch/i386/i386/cpu.c6
-rw-r--r--sys/arch/i386/i386/genassym.cf4
-rw-r--r--sys/arch/i386/i386/locore.s385
-rw-r--r--sys/arch/i386/i386/mptramp.s6
-rw-r--r--sys/arch/i386/i386/vm_machdep.c5
-rw-r--r--sys/arch/i386/include/frame.h3
-rw-r--r--sys/arch/luna88k/luna88k/locore.S35
-rw-r--r--sys/arch/luna88k/luna88k/machdep.c28
-rw-r--r--sys/arch/m68k/include/cpu.h6
-rw-r--r--sys/arch/m68k/m68k/genassym.cf7
-rw-r--r--sys/arch/m68k/m68k/proc_subr.s129
-rw-r--r--sys/arch/m88k/include/cpu.h5
-rw-r--r--sys/arch/m88k/m88k/genassym.cf8
-rw-r--r--sys/arch/m88k/m88k/m88k_machdep.c46
-rw-r--r--sys/arch/m88k/m88k/process.S261
-rw-r--r--sys/arch/m88k/m88k/vm_machdep.c15
-rw-r--r--sys/arch/mac68k/mac68k/locore.s100
-rw-r--r--sys/arch/mac68k/mac68k/vm_machdep.c15
-rw-r--r--sys/arch/macppc/macppc/cpu.c11
-rw-r--r--sys/arch/macppc/macppc/genassym.cf5
-rw-r--r--sys/arch/macppc/macppc/locore.S201
-rw-r--r--sys/arch/mips64/mips64/context.S225
-rw-r--r--sys/arch/mips64/mips64/db_machdep.c13
-rw-r--r--sys/arch/mips64/mips64/vm_machdep.c8
-rw-r--r--sys/arch/mvme68k/mvme68k/locore.s100
-rw-r--r--sys/arch/mvme68k/mvme68k/vm_machdep.c16
-rw-r--r--sys/arch/mvme88k/mvme88k/locore.S10
-rw-r--r--sys/arch/mvme88k/mvme88k/machdep.c11
-rw-r--r--sys/arch/powerpc/conf/files.powerpc3
-rw-r--r--sys/arch/powerpc/include/pcb.h3
-rw-r--r--sys/arch/powerpc/powerpc/vm_machdep.c7
-rw-r--r--sys/arch/sgi/localbus/macebus.c6
-rw-r--r--sys/arch/sgi/sgi/genassym.cf6
-rw-r--r--sys/arch/sh/include/cpu.h7
-rw-r--r--sys/arch/sh/sh/genassym.cf4
-rw-r--r--sys/arch/sh/sh/locore_c.c74
-rw-r--r--sys/arch/sh/sh/locore_subr.S71
-rw-r--r--sys/arch/sh/sh/sh_machdep.c41
-rw-r--r--sys/arch/solbourne/solbourne/locore.s286
-rw-r--r--sys/arch/sparc/conf/files.sparc3
-rw-r--r--sys/arch/sparc/sparc/locore.s281
-rw-r--r--sys/arch/sparc/sparc/locore2.c99
-rw-r--r--sys/arch/sparc/sparc/vm_machdep.c13
-rw-r--r--sys/arch/sparc64/conf/files.sparc643
-rw-r--r--sys/arch/sparc64/sparc64/locore.s382
-rw-r--r--sys/arch/sparc64/sparc64/vm_machdep.c10
-rw-r--r--sys/arch/vax/include/cpu.h6
-rw-r--r--sys/arch/vax/include/macros.h15
-rw-r--r--sys/arch/vax/vax/subr.s107
-rw-r--r--sys/arch/vax/vax/vm_machdep.c19
73 files changed, 691 insertions, 4160 deletions
diff --git a/sys/arch/alpha/alpha/genassym.cf b/sys/arch/alpha/alpha/genassym.cf
index cddc3760215..d8393934035 100644
--- a/sys/arch/alpha/alpha/genassym.cf
+++ b/sys/arch/alpha/alpha/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.10 2007/05/28 23:10:10 beck Exp $
+# $OpenBSD: genassym.cf,v 1.11 2007/10/10 15:53:51 art Exp $
# Copyright (c) 1994, 1995 Gordon W. Ross
# Copyright (c) 1993 Adam Glass
@@ -103,17 +103,12 @@ export ALPHA_PTE_KW
# Important offsets into the proc struct & associated constants
struct proc
-member p_forw
-member p_back
member p_addr
member p_vmspace
member p_stat
member P_MD_FLAGS p_md.md_flags
member P_MD_PCBPADDR p_md.md_pcbpaddr
member p_cpu
-struct prochd
-member ph_link
-member ph_rlink
export SONPROC
diff --git a/sys/arch/alpha/alpha/locore.s b/sys/arch/alpha/alpha/locore.s
index 6b9f238138f..fe7453cec66 100644
--- a/sys/arch/alpha/alpha/locore.s
+++ b/sys/arch/alpha/alpha/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.30 2007/05/28 23:10:10 beck Exp $ */
+/* $OpenBSD: locore.s,v 1.31 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: locore.s,v 1.94 2001/04/26 03:10:44 ross Exp $ */
/*-
@@ -716,123 +716,38 @@ LEAF(savectx, 1)
/**************************************************************************/
-IMPORT(whichqs, 4)
-
-/*
- * When no processes are on the runq, cpu_switch branches to idle
- * to wait for something to come ready.
- * Note: this is really a part of cpu_switch() but defined here for kernel
- * profiling.
- */
-LEAF(idle, 0)
- br pv, 1f
-1: LDGP(pv)
- /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
- GET_CURPROC
- stq zero, 0(v0) /* curproc <- NULL for stats */
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- CALL(sched_unlock_idle) /* release sched_lock */
-#endif
- mov zero, a0 /* enable all interrupts */
- call_pal PAL_OSF1_swpipl
-2: ldl t0, whichqs /* look for non-empty queue */
- beq t0, 2b
- ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
- call_pal PAL_OSF1_swpipl
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- CALL(sched_lock_idle) /* acquire sched_lock */
-#endif
- jmp zero, cpu_switch_queuescan /* jump back into the fire */
- END(idle)
-
/*
- * cpu_switch()
- * Find the highest priority process and resume it.
+ * cpu_switchto(struct proc *old, struct proc *new)
+ * Switch from "old" proc to "new".
*/
-LEAF(cpu_switch, 0)
+LEAF(cpu_switchto, 2)
LDGP(pv)
- /*
- * do an inline savectx(), to save old context
- * Note: GET_CURPROC clobbers v0, t0, t8...t11.
- */
- GET_CURPROC
- ldq a0, 0(v0)
- ldq a1, P_ADDR(a0)
- /* NOTE: ksp is stored by the swpctx */
- stq s0, U_PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
- stq s1, U_PCB_CONTEXT+(1 * 8)(a1)
- stq s2, U_PCB_CONTEXT+(2 * 8)(a1)
- stq s3, U_PCB_CONTEXT+(3 * 8)(a1)
- stq s4, U_PCB_CONTEXT+(4 * 8)(a1)
- stq s5, U_PCB_CONTEXT+(5 * 8)(a1)
- stq s6, U_PCB_CONTEXT+(6 * 8)(a1)
- stq ra, U_PCB_CONTEXT+(7 * 8)(a1) /* store ra */
- call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
- stq v0, U_PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */
- mov a0, s0 /* save old curproc */
- mov a1, s1 /* save old U-area */
-
-cpu_switch_queuescan:
- br pv, 1f
-1: LDGP(pv)
- ldl t0, whichqs /* look for non-empty queue */
- beq t0, idle /* and if none, go idle */
- mov t0, t3 /* t3 = saved whichqs */
- mov zero, t2 /* t2 = lowest bit set */
- blbs t0, 3f /* if low bit set, done! */
-
-2: srl t0, 1, t0 /* try next bit */
- addq t2, 1, t2
- blbc t0, 2b /* if clear, try again */
-
-3: /*
- * Remove process from queue
- */
- lda t1, qs /* get queues */
- sll t2, 4, t0 /* queue head is 16 bytes */
- addq t1, t0, t0 /* t0 = qp = &qs[firstbit] */
-
- ldq t4, PH_LINK(t0) /* t4 = p = highest pri proc */
- bne t4, 4f /* make sure p != NULL */
- PANIC("cpu_switch",Lcpu_switch_pmsg) /* nothing in queue! */
-
-4:
- ldq t5, P_FORW(t4) /* t5 = p->p_forw */
- stq t5, PH_LINK(t0) /* qp->ph_link = p->p_forw */
- stq t0, P_BACK(t5) /* p->p_forw->p_back = qp */
- stq zero, P_BACK(t4) /* firewall: p->p_back = NULL */
- cmpeq t0, t5, t0 /* see if queue is empty */
- beq t0, 5f /* nope, it's not! */
-
- ldiq t0, 1 /* compute bit in whichqs */
- sll t0, t2, t0
- xor t3, t0, t3 /* clear bit in whichqs */
- stl t3, whichqs
-
-5:
- mov t4, s2 /* save new proc */
- ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
/*
- * Done mucking with the run queues, release the
- * scheduler lock, but keep interrupts out.
+ * Don't bother saving the old context if oldproc is NULL.
*/
- CALL(sched_unlock_idle)
-#endif
+ beq a0, 1f
/*
- * Check to see if we're switching to ourself. If we are,
- * don't bother loading the new context.
- *
- * Note that even if we re-enter cpu_switch() from idle(),
- * s0 will still contain the old curproc value because any
- * users of that register between then and now must have
- * saved it. Also note that switch_exit() ensures that
- * s0 is clear before jumping here to find a new process.
+ * do an inline savectx(), to save old context
*/
- cmpeq s0, s2, t0 /* oldproc == newproc? */
- bne t0, 7f /* Yes! Skip! */
+ call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
+ ldq t0, P_ADDR(a0)
+ /* NOTE: ksp is stored by the swpctx */
+ stq s0, U_PCB_CONTEXT+(0 * 8)(t0) /* store s0 - s6 */
+ stq s1, U_PCB_CONTEXT+(1 * 8)(t0)
+ stq s2, U_PCB_CONTEXT+(2 * 8)(t0)
+ stq s3, U_PCB_CONTEXT+(3 * 8)(t0)
+ stq s4, U_PCB_CONTEXT+(4 * 8)(t0)
+ stq s5, U_PCB_CONTEXT+(5 * 8)(t0)
+ stq s6, U_PCB_CONTEXT+(6 * 8)(t0)
+ stq ra, U_PCB_CONTEXT+(7 * 8)(t0) /* store ra */
+ stq v0, U_PCB_CONTEXT+(8 * 8)(t0) /* store ps, for ipl */
+
+1:
+ mov a0, s0 /* save old proc */
+ mov a1, s2 /* save new proc */
+ ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
/*
* Deactivate the old address space before activating the
@@ -842,18 +757,18 @@ cpu_switch_queuescan:
* do this after we activate, then we might end up
* incorrectly marking the pmap inactive!
*
- * We don't deactivate if we came here from switch_exit
+ * We don't deactivate if we came here from sched_exit
* (old pmap no longer exists; vmspace has been freed).
* oldproc will be NULL in this case. We have actually
* taken care of calling pmap_deactivate() in cpu_exit(),
* before the vmspace went away.
*/
- beq s0, 6f
+ beq s0, 2f
mov s0, a0 /* pmap_deactivate(oldproc) */
CALL(pmap_deactivate)
-6: /*
+2: /*
* Activate the new process's address space and perform
* the actual context swap.
*/
@@ -864,7 +779,7 @@ cpu_switch_queuescan:
mov s3, a0 /* swap the context */
SWITCH_CONTEXT
-7: /*
+ /*
* Now that the switch is done, update curproc and other
* globals. We must do this even if switching to ourselves
* because we might have re-entered cpu_switch() from idle(),
@@ -911,7 +826,7 @@ EXPORT(__bwx_switch1)
ldiq v0, 1 /* possible ret to savectx() */
RET
- END(cpu_switch)
+ END(cpu_switchto)
#ifndef SMALL_KERNEL
/*
@@ -926,6 +841,18 @@ EXPORT(__bwx_switch2)
EXPORT(__bwx_switch3)
#endif
+LEAF(cpu_idle_enter, 0)
+ RET
+ END(cpu_idle_enter)
+
+LEAF(cpu_idle_cycle, 0)
+ RET
+ END(cpu_idle_cycle)
+
+LEAF(cpu_idle_leave, 0)
+ RET
+ END(cpu_idle_leave)
+
/*
* switch_trampoline()
*
@@ -945,46 +872,6 @@ LEAF(switch_trampoline, 0)
jmp zero, (pv)
END(switch_trampoline)
-/*
- * switch_exit(struct proc *p)
- * Make a the named process exit. Partially switch to our idle thread
- * (we don't update curproc or restore registers), and jump into the middle
- * of cpu_switch to switch into a few process. The process reaper will
- * free the dead process's VM resources. MUST BE CALLED AT SPLHIGH.
- */
-LEAF(switch_exit, 1)
- LDGP(pv)
-
- /* save the exiting proc pointer */
- mov a0, s2
-
- /* Switch to our idle stack. */
- GET_IDLE_PCB(a0) /* clobbers v0, t0, t8-t11 */
- SWITCH_CONTEXT
-
- /*
- * Now running as idle thread, except for the value of 'curproc' and
- * the saved regs.
- */
-
- /* Schedule the vmspace and stack to be freed. */
- mov s2, a0
- CALL(exit2)
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- CALL(sched_lock_idle) /* acquire sched_lock */
-#endif
-
- /*
- * Now jump back into the middle of cpu_switch(). Note that
- * we must clear s0 to guarantee that the check for switching
- * to ourselves in cpu_switch() will fail. This is safe since
- * s0 will be restored when a new process is resumed.
- */
- mov zero, s0
- jmp zero, cpu_switch_queuescan
- END(switch_exit)
-
/**************************************************************************/
/*
diff --git a/sys/arch/alpha/alpha/machdep.c b/sys/arch/alpha/alpha/machdep.c
index ce9d237b8d8..4f7836c9de1 100644
--- a/sys/arch/alpha/alpha/machdep.c
+++ b/sys/arch/alpha/alpha/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.112 2007/09/15 10:10:37 martin Exp $ */
+/* $OpenBSD: machdep.c,v 1.113 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */
/*-
@@ -1874,66 +1874,6 @@ spl0()
}
/*
- * The following primitives manipulate the run queues. _whichqs tells which
- * of the 32 queues _qs have processes in them. Setrunqueue puts processes
- * into queues, Remrunqueue removes them from queues. The running process is
- * on no queue, other processes are on a queue related to p->p_priority,
- * divided by 4 actually to shrink the 0-127 range of priorities into the 32
- * available queues.
- */
-/*
- * setrunqueue(p)
- * proc *p;
- *
- * Call should be made at splclock(), and p->p_stat should be SRUN.
- */
-
-/* XXXART - grmble */
-#define sched_qs qs
-#define sched_whichqs whichqs
-
-void
-setrunqueue(p)
- struct proc *p;
-{
- int bit;
-
- /* firewall: p->p_back must be NULL */
- if (p->p_back != NULL)
- panic("setrunqueue");
-
- bit = p->p_priority >> 2;
- sched_whichqs |= (1 << bit);
- p->p_forw = (struct proc *)&sched_qs[bit];
- p->p_back = sched_qs[bit].ph_rlink;
- p->p_back->p_forw = p;
- sched_qs[bit].ph_rlink = p;
-}
-
-/*
- * remrunqueue(p)
- *
- * Call should be made at splclock().
- */
-void
-remrunqueue(p)
- struct proc *p;
-{
- int bit;
-
- bit = p->p_priority >> 2;
- if ((sched_whichqs & (1 << bit)) == 0)
- panic("remrunqueue");
-
- p->p_back->p_forw = p->p_forw;
- p->p_forw->p_back = p->p_back;
- p->p_back = NULL; /* for firewall checking. */
-
- if ((struct proc *)&sched_qs[bit] == sched_qs[bit].ph_link)
- sched_whichqs &= ~(1 << bit);
-}
-
-/*
* Wait "n" microseconds.
*/
void
diff --git a/sys/arch/alpha/alpha/vm_machdep.c b/sys/arch/alpha/alpha/vm_machdep.c
index 16191729b90..eda0437fb46 100644
--- a/sys/arch/alpha/alpha/vm_machdep.c
+++ b/sys/arch/alpha/alpha/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.34 2007/09/03 01:24:22 krw Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.35 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.55 2000/03/29 03:49:48 simonb Exp $ */
/*
@@ -114,9 +114,7 @@ cpu_exit(p)
* vmspace's context until the switch to proc0 in switch_exit().
*/
pmap_deactivate(p);
-
- (void) splhigh();
- switch_exit(p);
+ sched_exit(p);
/* NOTREACHED */
}
diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c
index 2f29892d219..554a62ef344 100644
--- a/sys/arch/amd64/amd64/cpu.c
+++ b/sys/arch/amd64/amd64/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.14 2007/09/17 15:34:38 chl Exp $ */
+/* $OpenBSD: cpu.c,v 1.15 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@@ -345,6 +345,7 @@ cpu_attach(struct device *parent, struct device *self, void *aux)
#if defined(MULTIPROCESSOR)
cpu_intr_init(ci);
gdt_alloc_cpu(ci);
+ sched_init_cpu(ci);
cpu_start_secondary(ci);
ncpus++;
if (ci->ci_flags & CPUF_PRESENT) {
@@ -540,6 +541,9 @@ cpu_hatch(void *v)
microuptime(&ci->ci_schedstate.spc_runtime);
splx(s);
+
+ SCHED_LOCK(s);
+ cpu_switchto(NULL, sched_chooseproc());
}
#if defined(DDB)
diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf
index 1632d9b7c8f..f591049c3dc 100644
--- a/sys/arch/amd64/amd64/genassym.cf
+++ b/sys/arch/amd64/amd64/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.14 2007/06/01 21:01:51 art Exp $
+# $OpenBSD: genassym.cf,v 1.15 2007/10/10 15:53:51 art Exp $
# Written by Artur Grabowski art@openbsd.org, Public Domain
include <sys/param.h>
@@ -33,8 +33,6 @@ define UVM_PAGE_IDLE_ZERO offsetof(struct uvm, page_idle_zero)
struct proc
member p_addr
-member p_back
-member p_forw
member p_priority
member p_stat
member p_wchan
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index e44923662e8..7a382441870 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.23 2007/09/12 18:18:27 deraadt Exp $ */
+/* $OpenBSD: locore.S,v 1.24 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -723,13 +723,8 @@ ENTRY(longjmp)
* actually to shrink the 0-127 range of priorities into the 32 available
* queues.
*/
- .globl _C_LABEL(whichqs),_C_LABEL(qs)
.globl _C_LABEL(uvmexp),_C_LABEL(panic)
-#if NAPM > 0
- .globl _C_LABEL(apm_cpu_idle),_C_LABEL(apm_cpu_busy)
-#endif
-
#ifdef DIAGNOSTIC
NENTRY(switch_error1)
movabsq $1f,%rdi
@@ -749,12 +744,10 @@ NENTRY(switch_error3)
#endif /* DIAGNOSTIC */
/*
- * int cpu_switch(struct proc *)
- * Find a runnable process and switch to it. Wait if necessary. If the new
- * proc is the same as the old one, we short-circuit the context save and
- * restore.
+ * int cpu_switchto(struct proc *old, struct proc *new)
+ * Switch from "old" proc to "new".
*/
-ENTRY(cpu_switch)
+ENTRY(cpu_switchto)
pushq %rbx
pushq %rbp
pushq %r12
@@ -762,163 +755,12 @@ ENTRY(cpu_switch)
pushq %r14
pushq %r15
- movq %rdi,%r13
-
- /*
- * Clear curproc so that we don't accumulate system time while idle.
- * This also insures that schedcpu() will move the old proc to
- * the correct queue if it happens to get called from the spllower()
- * below and changes the priority. (See corresponding comment in
- * userret()).
- */
- movq $0,CPUVAR(CURPROC)
-
-
- /*
- * First phase: find new proc.
- *
- * Registers:
- * %rax - queue head, scratch, then zero
- * %r8 - queue number
- * %ecx - cached value of whichqs
- * %rdx - next process in queue
- * %r13 - old proc
- * %r12 - new proc
- */
-
- /* Look for new proc. */
- cli # splhigh doesn't do a cli
- movl _C_LABEL(whichqs)(%rip),%ecx
- bsfl %ecx,%r8d # find a full q
- jnz switch_dequeue
-
- /*
- * idling: save old context
- *
- * Registers:
- * %rax, %rcx - scratch
- * %r13 - old proc, then old pcb
- * %r12 - idle pcb
- */
-
- /* old proc still in %rdi */
- call _C_LABEL(pmap_deactivate)
-
- movq P_ADDR(%r13),%r13
-
- /* Save stack pointers */
-
- movq %rsp,PCB_RSP(%r13)
- movq %rbp,PCB_RBP(%r13)
-
- /* Find idle PCB for this CPU */
-#ifndef MULTIPROCESSOR
- leaq _C_LABEL(proc0)(%rip),%rcx
- movq P_ADDR(%rcx),%r12
- movl P_MD_TSS_SEL(%rcx),%edx
-#else
- movq CPUVAR(IDLE_PCB),%r12
- movl CPUVAR(IDLE_TSS_SEL),%edx
-#endif
- movq $0,CPUVAR(CURPROC)
-
- /* Restore the idle context (avoid interrupts) */
- cli
-
- /* Restore stack pointers. */
- movq PCB_RSP(%r12),%rsp
- movq PCB_RBP(%r12),%rbp
-
- /* Switch address space. */
- movq PCB_CR3(%r12),%rcx
- movq %rcx,%cr3
-
-#ifdef MULTIPROCESSOR
- movq CPUVAR(GDT),%rax
-#else
- movq _C_LABEL(gdtstore)(%rip),%rax
-#endif
-
- /* Switch TSS. Reset "task busy" flag before */
- andl $~0x0200,4(%rax,%rdx, 1)
- ltr %dx
-
- /* Restore cr0 (including FPU state). */
- movl PCB_CR0(%r12),%ecx
- movq %rcx,%cr0
-
- SET_CURPCB(%r12)
-
- xorq %r13,%r13
- sti
-idle_unlock:
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_unlock_idle)
-#endif
- /* Interrupts are okay again. */
- movl $IPL_NONE,%edi
- call _C_LABEL(Xspllower)
- jmp idle_start
-idle_zero:
- sti
- call _C_LABEL(uvm_pageidlezero)
- cli
- cmpl $0,_C_LABEL(whichqs)(%rip)
- jnz idle_exit
-idle_loop:
-#if NPCTR > 0
- incq _C_LABEL(pctr_idlcnt)
-#endif
- /* Try to zero some pages. */
- movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO(%rip),%ecx
- testl %ecx,%ecx
- jnz idle_zero
- sti
- hlt
-NENTRY(mpidle)
-idle_start:
- cli
- cmpl $0,_C_LABEL(whichqs)(%rip)
- jz idle_loop
-idle_exit:
- movl $IPL_HIGH,CPUVAR(ILEVEL)
- sti
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_lock_idle)
-#endif
-switch_search:
- movl _C_LABEL(whichqs)(%rip),%ecx
- bsfl %ecx,%r8d
- jz idle_unlock
-
-switch_dequeue:
-
- sti
- movq %r8,%r9
-
- shlq $4, %r9
- leaq _C_LABEL(qs)(%rip),%rax
- addq %r9,%rax
- /* movq (%rax),%rax */
-
- movq P_FORW(%rax),%r12 # unlink from front of process q
-#ifdef DIAGNOSTIC
- cmpq %r12,%rax # linked to self (i.e. nothing queued)?
- je _C_LABEL(switch_error1) # not possible
-#endif /* DIAGNOSTIC */
- movq P_FORW(%r12),%rdx
- movq %rdx,P_FORW(%rax)
- movq %rax,P_BACK(%rdx)
-
- cmpq %rdx,%rax # q empty?
- jne 3f
-
- btrl %r8d,%ecx # yes, clear to indicate empty
- movl %ecx,_C_LABEL(whichqs)(%rip) # update q status
+ movq %rdi, %r13
+ movq %rsi, %r12
-3: /* We just did it. */
xorq %rax,%rax
movl %eax,CPUVAR(RESCHED)
+
switch_resume:
#ifdef DIAGNOSTIC
cmpq %rax,P_WCHAN(%r12)
@@ -927,24 +769,16 @@ switch_resume:
jne _C_LABEL(switch_error3)
#endif
- /* Isolate proc. XXX Is this necessary? */
- movq %rax,P_BACK(%r12)
-
/* Record new proc. */
movb $SONPROC,P_STAT(%r12) # p->p_stat = SONPROC
SET_CURPROC(%r12,%rcx)
- /* Skip context switch if same proc. */
- xorl %ebx,%ebx
- cmpq %r12,%r13
- je switch_return
-
/* If old proc exited, don't bother. */
testq %r13,%r13
jz switch_exited
/*
- * Second phase: save old context.
+ * Save old context.
*
* Registers:
* %rax, %rcx - scratch
@@ -963,7 +797,7 @@ switch_resume:
switch_exited:
/*
- * Third phase: restore saved context.
+ * Restore saved context.
*
* Registers:
* %rax, %rcx, %rdx - scratch
@@ -1020,15 +854,13 @@ switch_restored:
sti
switch_return:
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_unlock_idle)
-#endif
+#if 0
+ /* Violation of lock ordering, since we're holding the sched_lock */
movl $IPL_NONE,%edi
call _C_LABEL(Xspllower)
movl $IPL_HIGH,CPUVAR(ILEVEL)
-
- movl %ebx,%eax
+#endif
popq %r15
popq %r14
@@ -1038,87 +870,16 @@ switch_return:
popq %rbx
ret
-ENTRY(cpu_switchto)
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
-
- movq %rdi,%r13
- movq %rsi,%r12
-
- movq $0,CPUVAR(CURPROC)
-
- xorq %rax,%rax
- jmp switch_resume
-
-
-/*
- * void switch_exit(struct proc *l, void (*exit)(struct proc *));
- * Switch to proc0's saved context and deallocate the address space and kernel
- * stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
- */
- .globl _C_LABEL(proc0)
-ENTRY(switch_exit)
-#ifdef MULTIPROCESSOR
- movq CPUVAR(IDLE_PCB),%r8
- movl CPUVAR(IDLE_TSS_SEL),%edx
-#else
- leaq _C_LABEL(proc0)(%rip),%r9
- movq P_ADDR(%r9),%r8
- movl P_MD_TSS_SEL(%r9),%edx
-#endif
-
- /* In case we fault... */
- movq $0,CPUVAR(CURPROC)
-
- cli
-
- /* Restore stack pointers. */
- movq PCB_RSP(%r8),%rsp
- movq PCB_RBP(%r8),%rbp
-
- /* Load TSS info. */
-#ifdef MULTIPROCESSOR
- movq CPUVAR(GDT),%rax
-#else
- movq _C_LABEL(gdtstore)(%rip),%rax
-#endif
-
- /* Switch address space. */
- movq PCB_CR3(%r8),%rcx
- movq %rcx,%cr3
-
- /* Switch TSS. */
- andl $~0x0200,4-SEL_KPL(%rax,%rdx,1)
- ltr %dx
-
- /* We're always in the kernel, so we don't need the LDT. */
-
- /* Restore cr0 (including FPU state). */
- movl PCB_CR0(%r8),%ecx
- movq %rcx,%cr0
-
- /* Record new pcb. */
- SET_CURPCB(%r8)
-
- /* Interrupts are okay again. */
- sti
+ENTRY(cpu_idle_enter)
+ ret
- /*
- * Schedule the dead process's vmspace and stack to be freed.
- * {lpw_}exit2(l). Function still in %rsi (2nd arg), proc in
- * %rdi (first arg).
- */
+ENTRY(cpu_idle_cycle)
+ hlt
+ ret
- call *%rsi
+ENTRY(cpu_idle_leave)
+ ret
- /* Jump into cpu_switch() with the right state. */
- xorq %r13,%r13
- movq %r13, CPUVAR(CURPROC)
- jmp switch_search
/*
* savectx(struct pcb *pcb);
diff --git a/sys/arch/amd64/amd64/mptramp.S b/sys/arch/amd64/amd64/mptramp.S
index 397c672f36c..20346084081 100644
--- a/sys/arch/amd64/amd64/mptramp.S
+++ b/sys/arch/amd64/amd64/mptramp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: mptramp.S,v 1.3 2005/07/26 08:38:29 art Exp $ */
+/* $OpenBSD: mptramp.S,v 1.4 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: mptramp.S,v 1.1 2003/04/26 18:39:30 fvdl Exp $ */
/*-
@@ -235,5 +235,4 @@ _C_LABEL(cpu_spinup_trampoline_end): #end of code copied to MP_TRAMPOLINE
movl PCB_CR0(%rsi),%eax
movq %rax,%cr0
call _C_LABEL(cpu_hatch)
- xorq %r13,%r13
- jmp _C_LABEL(mpidle)
+ /* NOTREACHED */
diff --git a/sys/arch/amd64/amd64/vm_machdep.c b/sys/arch/amd64/amd64/vm_machdep.c
index 7cb5179c909..1b6fa8463b8 100644
--- a/sys/arch/amd64/amd64/vm_machdep.c
+++ b/sys/arch/amd64/amd64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.10 2007/05/27 20:59:25 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.11 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.1 2003/04/26 18:39:33 fvdl Exp $ */
/*-
@@ -169,12 +169,8 @@ cpu_exit(struct proc *p)
if (p->p_md.md_flags & MDP_USEDMTRR)
mtrr_clean(p);
- /*
- * No need to do user LDT cleanup here; it's handled in
- * pmap_destroy().
- */
-
- switch_exit(p, exit2);
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64
index 3c5fa842f78..6cd06816c70 100644
--- a/sys/arch/amd64/conf/files.amd64
+++ b/sys/arch/amd64/conf/files.amd64
@@ -1,4 +1,4 @@
-# $OpenBSD: files.amd64,v 1.35 2007/10/07 18:41:07 mbalmer Exp $
+# $OpenBSD: files.amd64,v 1.36 2007/10/10 15:53:51 art Exp $
maxpartitions 16
maxusers 2 16 128
@@ -18,7 +18,6 @@ file arch/amd64/amd64/syscall.c
file arch/amd64/amd64/trap.c
file arch/amd64/amd64/vm_machdep.c
file arch/amd64/amd64/fpu.c
-file arch/amd64/amd64/Locore.c
file arch/amd64/amd64/softintr.c
file arch/amd64/amd64/i8259.c
file arch/amd64/amd64/cacheinfo.c
diff --git a/sys/arch/arm/arm/cpuswitch.S b/sys/arch/arm/arm/cpuswitch.S
index d94219d88c0..544f7c27243 100644
--- a/sys/arch/arm/arm/cpuswitch.S
+++ b/sys/arch/arm/arm/cpuswitch.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpuswitch.S,v 1.7 2007/05/14 07:07:09 art Exp $ */
+/* $OpenBSD: cpuswitch.S,v 1.8 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
/*
@@ -123,142 +123,11 @@
.text
-.Lwhichqs:
- .word _C_LABEL(whichqs)
-
-.Lqs:
- .word _C_LABEL(qs)
-
-/*
- * On entry
- * r0 = process
- */
-
-ENTRY(setrunqueue)
- /*
- * Local register usage
- * r0 = process
- * r1 = queue
- * r2 = &qs[queue] and temp
- * r3 = temp
- * r12 = whichqs
- */
-#ifdef DIAGNOSTIC
- ldr r1, [r0, #(P_BACK)]
- teq r1, #0x00000000
- bne Lsetrunqueue_erg
-
- ldr r1, [r0, #(P_WCHAN)]
- teq r1, #0x00000000
- bne Lsetrunqueue_erg
-#endif
-
- /* Get the priority of the queue */
- ldrb r1, [r0, #(P_PRIORITY)]
- mov r1, r1, lsr #2
-
- /* Indicate that there is a process on this queue */
- ldr r12, .Lwhichqs
- ldr r2, [r12]
- mov r3, #0x00000001
- mov r3, r3, lsl r1
- orr r2, r2, r3
- str r2, [r12]
-
- /* Get the address of the queue */
- ldr r2, .Lqs
- add r1, r2, r1, lsl # 3
-
- /* Hook the process in */
- str r1, [r0, #(P_FORW)]
- ldr r2, [r1, #(P_BACK)]
-
- str r0, [r1, #(P_BACK)]
-#ifdef DIAGNOSTIC
- teq r2, #0x00000000
- beq Lsetrunqueue_erg
-#endif
- str r0, [r2, #(P_FORW)]
- str r2, [r0, #(P_BACK)]
-
- mov pc, lr
-
-#ifdef DIAGNOSTIC
-Lsetrunqueue_erg:
- mov r2, r1
- mov r1, r0
- add r0, pc, #Ltext1 - . - 8
- bl _C_LABEL(printf)
-
- ldr r2, .Lqs
- ldr r1, [r2]
- add r0, pc, #Ltext2 - . - 8
- b _C_LABEL(panic)
-
-Ltext1:
- .asciz "setrunqueue : %08x %08x\n"
-Ltext2:
- .asciz "setrunqueue : [qs]=%08x qs=%08x\n"
- .align 0
-#endif
-
-/*
- * On entry
- * r0 = process
- */
-
-ENTRY(remrunqueue)
- /*
- * Local register usage
- * r0 = oldproc
- * r1 = queue
- * r2 = &qs[queue] and scratch
- * r3 = scratch
- * r12 = whichqs
- */
-
- /* Get the priority of the queue */
- ldrb r1, [r0, #(P_PRIORITY)]
- mov r1, r1, lsr #2
-
- /* Unhook the process */
- ldr r2, [r0, #(P_FORW)]
- ldr r3, [r0, #(P_BACK)]
-
- str r3, [r2, #(P_BACK)]
- str r2, [r3, #(P_FORW)]
-
- /* If the queue is now empty clear the queue not empty flag */
- teq r2, r3
-
- /* This could be reworked to avoid the use of r4 */
- ldreq r12, .Lwhichqs
- ldreq r2, [r12]
- moveq r3, #0x00000001
- moveq r3, r3, lsl r1
- biceq r2, r2, r3
- streq r2, [r12]
-
- /* Remove the back pointer for the process */
- mov r1, #0x00000000
- str r1, [r0, #(P_BACK)]
-
- mov pc, lr
-
-
-/*
- * cpuswitch()
- *
- * preforms a process context switch.
- * This function has several entry points
- */
-
.Lcpu_info_store:
.word _C_LABEL(cpu_info_store)
.Lcurproc:
.word _C_LABEL(cpu_info_store) + CI_CURPROC
-
.Lwant_resched:
.word _C_LABEL(want_resched)
@@ -289,320 +158,123 @@ _C_LABEL(curpcb):
/*
* Idle loop, exercised while waiting for a process to wake up.
- *
- * NOTE: When we jump back to .Lswitch_search, we must have a
- * pointer to whichqs in r7, which is what it is when we arrive
- * here.
*/
-/* LINTSTUB: Ignore */
-ASENTRY_NP(idle)
- ldr r6, .Lcpu_do_powersave
- IRQenable /* Enable interrupts */
- ldr r6, [r6] /* r6 = cpu_do_powersave */
+ENTRY(cpu_idle_enter)
+ stmfd sp!, {lr}
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bl _C_LABEL(sched_unlock_idle)
-#endif
+ IRQenable /* Enable interrupts */
/* Drop to spl0 (returns the current spl level in r0). */
mov r0, #(IPL_NONE)
bl _C_LABEL(_spllower)
+ ldmfd sp!, {pc}
+
+ENTRY(cpu_idle_cycle)
+ stmfd sp!, {r6, lr}
+
+ ldr r6, .Lcpu_do_powersave
+ ldr r6, [r6] /* r6 = cpu_do_powersave */
+
teq r6, #0 /* cpu_do_powersave non zero? */
ldrne r6, .Lcpufuncs
- mov r4, r0 /* Old interrupt level to r4 */
ldrne r6, [r6, #(CF_SLEEP)]
- /*
- * Main idle loop.
- * r6 points to power-save idle function if required, else NULL.
- */
-1: ldr r3, [r7] /* r3 = sched_whichqs */
- teq r3, #0
- bne 2f /* We have work to do */
teq r6, #0 /* Powersave idle? */
- beq 1b /* Nope. Just sit-n-spin. */
+ beq 1f /* Nope. Just continue. */
/*
- * Before going into powersave idle mode, disable interrupts
- * and check sched_whichqs one more time.
+ * Before going into powersave idle mode, disable interrupts.
*/
IRQdisableALL
- ldr r3, [r7]
- mov r0, #0
- teq r3, #0 /* sched_whichqs still zero? */
- moveq lr, pc
- moveq pc, r6 /* If so, do powersave idle */
+ mov lr, pc
+ mov pc, r6 /* If so, do powersave idle */
IRQenableALL
- b 1b /* Back around */
- /*
- * sched_whichqs indicates that at least one proc is ready to run.
- * Restore the original interrupt priority level, grab the
- * scheduler lock if necessary, and jump back into cpu_switch.
- */
-2: mov r0, r4
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bl _C_LABEL(splx)
- adr lr, .Lswitch_search
- b _C_LABEL(sched_lock_idle)
-#else
- adr lr, .Lswitch_search
- b _C_LABEL(splx)
-#endif
+1: ldmfd sp!, {r6, pc}
+
+ENTRY(cpu_idle_leave)
+ stmfd sp!, {lr}
+
+ mov r0, #(IPL_SCHED)
+ bl _C_LABEL(_splraise)
+
+ ldmfd sp!, {pc}
/*
- * Find a new lwp to run, save the current context and
- * load the new context
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
+ *
+ * Performs a process context switch from oldproc (which may be NULL)
+ * to newproc.
*
* Arguments:
- * r0 'struct proc *' of the current LWP
+ * r0 'struct proc *' of the context to switch from
+ * r1 'struct proc *' of the context to switch to
*/
-ENTRY(cpu_switch)
-/*
- * Local register usage. Some of these registers are out of date.
- * r1 = oldproc
- * r2 = spl level
- * r3 = whichqs
- * r4 = queue
- * r5 = &qs[queue]
- * r6 = newlwp
- * r7 = scratch
- */
+ENTRY(cpu_switchto)
stmfd sp!, {r4-r7, lr}
- /*
- * Indicate that there is no longer a valid process (curlwp = 0).
- * Zero the current PCB pointer while we're at it.
- */
- ldr r7, .Lcurproc
- ldr r6, .Lcurpcb
- mov r2, #0x00000000
- str r2, [r7] /* curproc = NULL */
- str r2, [r6] /* curpcb = NULL */
-
- /* stash the old proc while we call functions */
- mov r5, r0
-
- /* First phase : find a new proc */
- ldr r7, .Lwhichqs
-
- /* rem: r5 = old proc */
- /* rem: r7 = &whichqs */
-
-.Lswitch_search:
- IRQdisable
-
- /* Do we have any active queues */
- ldr r3, [r7]
-
- /* If not we must idle until we do. */
- teq r3, #0x00000000
- beq _ASM_LABEL(idle)
-
- /* put old proc back in r1 */
- mov r1, r5
-
- /* rem: r1 = old proc */
- /* rem: r3 = whichqs */
- /* rem: interrupts are disabled */
-
- /* used further down, saves SA stall */
- ldr r6, .Lqs
-
- /*
- * We have found an active queue. Currently we do not know which queue
- * is active just that one of them is.
- */
- /* Non-Xscale version of the ffs algorithm devised by d.seal and
- * posted to comp.sys.arm on 16 Feb 1994.
- */
- rsb r5, r3, #0
- ands r0, r3, r5
-
-#ifndef __XSCALE__
- adr r5, .Lcpu_switch_ffs_table
-
- /* X = R0 */
- orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */
- orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */
- rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */
-
- /* now lookup in table indexed on top 6 bits of a4 */
- ldrb r4, [ r5, r4, lsr #26 ]
-
-#else /* __XSCALE__ */
- clz r4, r0
- rsb r4, r4, #31
-#endif /* __XSCALE__ */
-
- /* rem: r0 = bit mask of chosen queue (1 << r4) */
- /* rem: r1 = old proc */
- /* rem: r3 = whichqs */
- /* rem: r4 = queue number */
- /* rem: interrupts are disabled */
-
- /* Get the address of the queue (&qs[queue]) */
- add r5, r6, r4, lsl #3
-
- /*
- * Get the proc from the queue and place the next process in
- * the queue at the head. This basically unlinks the lwp at
- * the head of the queue.
- */
- ldr r6, [r5, #(P_FORW)]
-
-#ifdef DIAGNOSTIC
- cmp r6, r5
- beq .Lswitch_bogons
-#endif
-
- /* rem: r6 = new proc */
- ldr r7, [r6, #(P_FORW)]
- str r7, [r5, #(P_FORW)]
-
- /*
- * Test to see if the queue is now empty. If the head of the queue
- * points to the queue itself then there are no more procs in
- * the queue. We can therefore clear the queue not empty flag held
- * in r3.
- */
-
- teq r5, r7
- biceq r3, r3, r0
-
- /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */
-
- /* Fix the back pointer for the lwp now at the head of the queue. */
- ldr r0, [r6, #(P_BACK)]
- str r0, [r7, #(P_BACK)]
-
- /* Update the RAM copy of the queue not empty flags word. */
- ldreq r7, .Lwhichqs
- streq r3, [r7]
-
- /* rem: r1 = old proc */
- /* rem: r3 = whichqs - NOT NEEDED ANY MORE */
- /* rem: r4 = queue number - NOT NEEDED ANY MORE */
- /* rem: r6 = new proc */
- /* rem: interrupts are disabled */
-
/* Clear the want_resched flag */
ldr r7, .Lwant_resched
- mov r0, #0x00000000
- str r0, [r7]
-
- /*
- * Clear the back pointer of the proc we have removed from
- * the head of the queue. The new proc is isolated now.
- */
- str r0, [r6, #(P_BACK)]
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- /*
- * unlock the sched_lock, but leave interrupts off, for now.
- */
- mov r7, r1
- bl _C_LABEL(sched_unlock_idle)
- mov r1, r7
-#endif
-
-
-.Lswitch_resume:
- /* rem: r1 = old proc */
- /* rem: r4 = return value [not used if came from cpu_switchto()] */
- /* rem: r6 = new process */
- /* rem: interrupts are disabled */
+ mov r2, #0x00000000
+ str r2, [r7]
#ifdef MULTIPROCESSOR
/* XXX use curcpu() */
- ldr r0, .Lcpu_info_store
- str r0, [r6, #(P_CPU)]
+ ldr r2, .Lcpu_info_store
+ str r2, [r1, #(P_CPU)]
#else
- /* l->l_cpu initialized in fork1() for single-processor */
+ /* p->p_cpu initialized in fork1() for single-processor */
#endif
/* Process is now on a processor. */
- mov r0, #SONPROC /* p->p_stat = SONPROC */
- strb r0, [r6, #(P_STAT)]
+ mov r2, #SONPROC /* p->p_stat = SONPROC */
+ strb r2, [r1, #(P_STAT)]
/* We have a new curproc now so make a note it */
ldr r7, .Lcurproc
- str r6, [r7]
+ str r1, [r7]
/* Hook in a new pcb */
ldr r7, .Lcurpcb
- ldr r0, [r6, #(P_ADDR)]
- str r0, [r7]
-
- /* At this point we can allow IRQ's again. */
- IRQenable
-
- /* rem: r1 = old proc */
- /* rem: r4 = return value */
- /* rem: r6 = new process */
- /* rem: interrupts are enabled */
-
- /*
- * If the new process is the same as the process that called
- * cpu_switch() then we do not need to save and restore any
- * contexts. This means we can make a quick exit.
- * The test is simple if curproc on entry (now in r1) is the
- * same as the proc removed from the queue we can jump to the exit.
- */
- teq r1, r6
- moveq r4, #0x00000000 /* default to "didn't switch" */
- beq .Lswitch_return
-
- /*
- * At this point, we are guaranteed to be switching to
- * a new proc.
- */
- mov r4, #0x00000001
-
- /* Remember the old proc in r0 */
- mov r0, r1
+ ldr r6, [r7] /* Remember the old PCB */
+ ldr r2, [r1, #(P_ADDR)]
+ str r2, [r7]
/*
* If the old proc on entry to cpu_switch was zero then the
* process that called it was exiting. This means that we do
- * not need to save the current context. Instead we can jump
- * straight to restoring the context for the new process.
+ * not need to save the current context (we nevertheless need
+ * to clear the cache and TLB).
*/
teq r0, #0x00000000
beq .Lswitch_exited
- /* rem: r0 = old proc */
- /* rem: r4 = return value */
- /* rem: r6 = new process */
- /* rem: interrupts are enabled */
-
/* Stage two : Save old context */
- /* Get the user structure for the old proc. */
- ldr r1, [r0, #(P_ADDR)]
-
/* Save all the registers in the old proc's pcb */
#ifndef __XSCALE__
- add r7, r1, #(PCB_R8)
+ add r7, r6, #(PCB_R8)
stmia r7, {r8-r13}
#else
- strd r8, [r1, #(PCB_R8)]
- strd r10, [r1, #(PCB_R10)]
- strd r12, [r1, #(PCB_R12)]
+ strd r8, [r6, #(PCB_R8)]
+ strd r10, [r6, #(PCB_R10)]
+ strd r12, [r6, #(PCB_R12)]
#endif
+.Lswitch_exited:
/*
* NOTE: We can now use r8-r13 until it is time to restore
* them for the new process.
*/
/* Remember the old PCB. */
- mov r8, r1
+ mov r8, r6
- /* r1 now free! */
+ /* Save new proc in r6 now. */
+ mov r6, r1
/* Get the user structure for the new process in r9 */
ldr r9, [r6, #(P_ADDR)]
@@ -616,28 +288,24 @@ ENTRY(cpu_switch)
orr r2, r2, #(PSR_UND32_MODE | I32_bit)
msr cpsr_c, r2
+#ifdef notworthit
+ teq r0, #0x00000000
+ strne sp, [r8, #(PCB_UND_SP)]
+#else
str sp, [r8, #(PCB_UND_SP)]
+#endif
msr cpsr_c, r3 /* Restore the old mode */
/* rem: r0 = old proc */
- /* rem: r4 = return value */
- /* rem: r6 = new process */
+ /* rem: r1 = r6 = new process */
/* rem: r8 = old PCB */
/* rem: r9 = new PCB */
- /* rem: interrupts are enabled */
/* What else needs to be saved Only FPA stuff when that is supported */
/* Third phase : restore saved context */
- /* rem: r0 = old proc */
- /* rem: r4 = return value */
- /* rem: r6 = new proc */
- /* rem: r8 = old PCB */
- /* rem: r9 = new PCB */
- /* rem: interrupts are enabled */
-
/*
* Get the new L1 table pointer into r11. If we're switching to
* an LWP with the same address space as the outgoing one, we can
@@ -652,7 +320,7 @@ ENTRY(cpu_switch)
ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */
ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
ldr r8, [r9, #(PCB_CSTATE)] /* r8 = &new_pmap->pm_cstate */
- ldr r5, .Llast_cache_state_ptr /* Previous thread's cstate */
+ ldr r5, .Llast_cache_state_ptr /* Previous proc's cstate */
teq r10, r11 /* Same L1? */
ldr r5, [r5]
@@ -681,7 +349,7 @@ ENTRY(cpu_switch)
beq .Lcs_cache_purge_skipped /* VM space is not in cache */
/*
- * Definately need to flush the cache.
+ * Definitely need to flush the cache.
* Mark the old VM space as NOT being resident in the cache.
*/
mov r2, #0x00000000
@@ -696,7 +364,6 @@ ENTRY(cpu_switch)
.Lcs_cache_purge_skipped:
/* rem: r1 = new DACR */
- /* rem: r4 = return value */
/* rem: r5 = &old_pmap->pm_cstate (or NULL) */
/* rem: r6 = new proc */
/* rem: r8 = &new_pmap->pm_cstate */
@@ -787,7 +454,6 @@ ENTRY(cpu_switch)
cmp r5, r8
strne r8, [r0]
- /* rem: r4 = return value */
/* rem: r6 = new proc */
/* rem: r9 = new PCB */
@@ -820,14 +486,6 @@ ENTRY(cpu_switch)
ldr r13, [r7, #(PCB_SP)]
#endif
-#if 0
- ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */
-#else
- mov r5, r6
-#endif
-
- /* rem: r4 = return value */
- /* rem: r5 = new proc's proc */
/* rem: r6 = new proc */
/* rem: r7 = new pcb */
@@ -840,234 +498,16 @@ ENTRY(cpu_switch)
/* We can enable interrupts again */
IRQenableALL
- /* rem: r4 = return value */
- /* rem: r5 = new proc's proc */
/* rem: r6 = new proc */
/* rem: r7 = new PCB */
-#if 0
- /*
- * Check for restartable atomic sequences (RAS).
- */
-
- ldr r2, [r5, #(P_RASLIST)]
- ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */
- teq r2, #0 /* p->p_nras == 0? */
- bne .Lswitch_do_ras /* no, check for one */
-#endif
-
.Lswitch_return:
- /* cpu_switch returns 1 == switched, 0 == didn't switch */
- mov r0, r4
-
/*
* Pull the registers that got pushed when either savectx() or
* cpu_switch() was called and return.
*/
ldmfd sp!, {r4-r7, pc}
-#if 0
-.Lswitch_do_ras:
- ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */
- mov r0, r5 /* first ras_lookup() arg */
- bl _C_LABEL(ras_lookup)
- cmn r0, #1 /* -1 means "not in a RAS" */
- ldrne r1, [r7, #(PCB_TF)]
- strne r0, [r1, #(TF_PC)]
- b .Lswitch_return
-#endif
-
-.Lswitch_exited:
- /*
- * We skip the cache purge because switch_exit() already did it.
- * Load up registers the way .Lcs_cache_purge_skipped expects.
- * Userpsace access already blocked by switch_exit().
- */
- ldr r9, [r6, #(P_ADDR)] /* r9 = new PCB */
- mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
- mov r5, #0 /* No previous cache state */
- ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */
- ldr r8, [r9, #(PCB_CSTATE)] /* r8 = new cache state */
- ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
- b .Lcs_cache_purge_skipped
-
-
-#ifdef DIAGNOSTIC
-.Lswitch_bogons:
- adr r0, .Lswitch_panic_str
- bl _C_LABEL(panic)
-1: nop
- b 1b
-
-.Lswitch_panic_str:
- .asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
-#endif
-
-/*
- * cpu_switchto(struct proc *current, struct proc *next)
- * Switch to the specified next LWP
- * Arguments:
- *
- * r0 'struct proc *' of the current LWP
- * r1 'struct proc *' of the LWP to switch to
- */
-ENTRY(cpu_switchto)
- stmfd sp!, {r4-r7, lr}
-
- mov r6, r1 /* save new proc */
-
-#if defined(LOCKDEBUG)
- mov r5, r0 /* save old proc */
- bl _C_LABEL(sched_unlock_idle)
- mov r1, r5
-#else
- mov r1, r0
-#endif
-
- IRQdisable
-
- /*
- * Okay, set up registers the way cpu_switch() wants them,
- * and jump into the middle of it (where we bring up the
- * new process).
- *
- * r1 = old proc (r6 = new proc)
- */
- b .Lswitch_resume
-
-/*
- * void switch_exit(struct proc *l, struct proc *l0,
- * void (*exit)(struct proc *));
- * Switch to proc0's saved context and deallocate the address space and kernel
- * stack for l. Then jump into cpu_switch(), as if we were in proc0 all along.
- */
-
-/* LINTSTUB: Func: void switch_exit(struct proc *l, struct proc *l0,
- void (*func)(struct proc *)) */
-ENTRY(switch_exit)
- /*
- * The process is going away, so we can use callee-saved
- * registers here without having to save them.
- */
-
- mov r4, r0
- ldr r0, .Lcurproc
-
- mov r5, r1
- mov r6, r2
-
- /*
- * r4 = proc
- * r5 = proc0
- * r6 = exit func
- */
-
- mov r2, #0x00000000 /* curproc = NULL */
- str r2, [r0]
-
- /*
- * We're about to clear both the cache and the TLB.
- * Make sure to zap the 'last cache state' pointer since the
- * pmap might be about to go away. Also ensure the outgoing
- * VM space's cache state is marked as NOT resident in the
- * cache, and that proc0's cache state IS resident.
- */
- ldr r7, [r4, #(P_ADDR)] /* r7 = old proc's PCB */
- ldr r0, .Llast_cache_state_ptr /* Last userland cache state */
- ldr r9, [r7, #(PCB_CSTATE)] /* Fetch cache state pointer */
- ldr r3, [r5, #(P_ADDR)] /* r3 = proc0's PCB */
- str r2, [r0] /* No previous cache state */
- str r2, [r9, #(CS_ALL)] /* Zap old proc's cache state */
- ldr r3, [r3, #(PCB_CSTATE)] /* proc0's cache state */
- mov r2, #-1
- str r2, [r3, #(CS_ALL)] /* proc0 is in da cache! */
-
- /* Switch to proc0 context */
-
- ldr r9, .Lcpufuncs
- mov lr, pc
- ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
-
- ldr r0, [r7, #(PCB_PL1VEC)]
- ldr r1, [r7, #(PCB_DACR)]
-
- /*
- * r0 = Pointer to L1 slot for vector_page (or NULL)
- * r1 = proc0's DACR
- * r4 = proc we're switching from
- * r5 = proc0
- * r6 = exit func
- * r7 = proc0's PCB
- * r9 = cpufuncs
- */
-
- IRQdisableALL
-
- /*
- * Ensure the vector table is accessible by fixing up proc0's L1
- */
- cmp r0, #0 /* No need to fixup vector table? */
- ldrne r3, [r0] /* But if yes, fetch current value */
- ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
- mcr p15, 0, r1, c3, c0, 0 /* Update DACR for proc0's context */
- cmpne r3, r2 /* Stuffing the same value? */
- strne r2, [r0] /* Store if not. */
-
-#ifdef PMAP_INCLUDE_PTE_SYNC
- /*
- * Need to sync the cache to make sure that last store is
- * visible to the MMU.
- */
- movne r1, #4
- movne lr, pc
- ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
-#endif /* PMAP_INCLUDE_PTE_SYNC */
-
- /*
- * Note: We don't do the same optimisation as cpu_switch() with
- * respect to avoiding flushing the TLB if we're switching to
- * the same L1 since this process' VM space may be about to go
- * away, so we don't want *any* turds left in the TLB.
- */
-
- /* Switch the memory to the new process */
- ldr r0, [r7, #(PCB_PAGEDIR)]
- mov lr, pc
- ldr pc, [r9, #CF_CONTEXT_SWITCH]
-
- ldr r0, .Lcurpcb
-
- /* Restore all the save registers */
-#ifndef __XSCALE__
- add r1, r7, #PCB_R8
- ldmia r1, {r8-r13}
-#else
- ldr r8, [r7, #(PCB_R8)]
- ldr r9, [r7, #(PCB_R9)]
- ldr r10, [r7, #(PCB_R10)]
- ldr r11, [r7, #(PCB_R11)]
- ldr r12, [r7, #(PCB_R12)]
- ldr r13, [r7, #(PCB_SP)]
-#endif
- str r7, [r0] /* curpcb = proc0's PCB */
-
- IRQenableALL
-
- /*
- * Schedule the vmspace and stack to be freed.
- */
- mov r0, r4 /* {proc_}exit2(l) */
- mov lr, pc
- mov pc, r6
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bl _C_LABEL(sched_lock_idle)
-#endif
-
- ldr r7, .Lwhichqs /* r7 = &whichqs */
- mov r5, #0x00000000 /* r5 = old proc = NULL */
- b .Lswitch_search
-
/* LINTSTUB: Func: void savectx(struct pcb *pcb) */
ENTRY(savectx)
/*
@@ -1110,18 +550,3 @@ ENTRY(proc_trampoline)
PULLFRAME
movs pc, lr /* Exit */
-
-#ifndef __XSCALE__
- .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT;
-.Lcpu_switch_ffs_table:
-/* same as ffs table but all nums are -1 from that */
-/* 0 1 2 3 4 5 6 7 */
- .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */
- .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */
- .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */
- .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */
- .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */
- .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */
- .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */
- .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */
-#endif /* !__XSCALE_ */
diff --git a/sys/arch/arm/arm/genassym.cf b/sys/arch/arm/arm/genassym.cf
index 63839e12196..3ffdb707531 100644
--- a/sys/arch/arm/arm/genassym.cf
+++ b/sys/arch/arm/arm/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.7 2007/05/14 07:07:09 art Exp $
+# $OpenBSD: genassym.cf,v 1.8 2007/10/10 15:53:51 art Exp $
# $NetBSD: genassym.cf,v 1.27 2003/11/04 10:33:16 dsl Exp$
# Copyright (c) 1982, 1990 The Regents of the University of California.
@@ -82,11 +82,9 @@ export P_PROFIL
export SONPROC
struct proc
-member p_forw
-member p_back
member p_addr
-member p_priority
-member p_wchan
+#member p_priority
+#member p_wchan
member p_stat
# XXX use PROC_SIZEOF in new code whenever possible
define PROCSIZE sizeof(struct proc)
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index b19bce07a63..d9c0035b3a8 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.13 2007/05/18 14:41:55 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.14 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -3100,11 +3100,6 @@ pmap_activate(struct proc *p)
}
void
-pmap_deactivate(struct proc *p)
-{
-}
-
-void
pmap_update(pmap_t pm)
{
@@ -3211,7 +3206,7 @@ pmap_destroy(pmap_t pm)
pmap_update(pm);
/*
- * Make sure cpu_switch(), et al, DTRT. This is safe to do
+ * Make sure cpu_switchto(), et al, DTRT. This is safe to do
* since this process has no remaining mappings of its own.
*/
curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
diff --git a/sys/arch/arm/arm/vm_machdep.c b/sys/arch/arm/arm/vm_machdep.c
index d5e109dc121..a3b4115793b 100644
--- a/sys/arch/arm/arm/vm_machdep.c
+++ b/sys/arch/arm/arm/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.6 2007/05/27 20:59:25 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.7 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
/*
@@ -73,8 +73,6 @@ extern pv_addr_t systempage;
int process_read_regs (struct proc *p, struct reg *regs);
int process_read_fpregs (struct proc *p, struct fpreg *regs);
-void switch_exit (struct proc *p, struct proc *p0,
- void (*)(struct proc *));
extern void proc_trampoline (void);
/*
@@ -85,23 +83,6 @@ extern void proc_trampoline (void);
* the amount of stack used.
*/
-#if 0
-void
-cpu_proc_fork(p1, p2)
- struct proc *p1, *p2;
-{
-
-#if defined(PERFCTRS)
- if (PMC_ENABLED(p1))
- pmc_md_fork(p1, p2);
- else {
- p2->p_md.pmc_enabled = 0;
- p2->p_md.pmc_state = NULL;
- }
-#endif
-}
-#endif
-
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb and trap frame, making the child ready to run.
@@ -201,27 +182,14 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
}
-#if 0
-void
-cpu_setfunc(struct proc *p, void (*func)(void *), void *arg)
-{
- struct pcb *pcb = &p->p_addr->u_pcb;
- struct trapframe *tf = pcb->pcb_tf;
- struct switchframe *sf = (struct switchframe *)tf - 1;
-
- sf->sf_r4 = (u_int)func;
- sf->sf_r5 = (u_int)arg;
- sf->sf_pc = (u_int)proc_trampoline;
- pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
-}
-#endif
-
-
void
cpu_exit(struct proc *p)
{
+#if 0
pmap_update(p->p_vmspace->vm_map.pmap); /* XXX DSR help stability */
- switch_exit(p, &proc0, exit2);
+#endif
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/arm/include/pmap.h b/sys/arch/arm/include/pmap.h
index 847374df0ae..2e5b139324d 100644
--- a/sys/arch/arm/include/pmap.h
+++ b/sys/arch/arm/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.8 2007/09/10 18:49:44 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.9 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
/*
@@ -241,12 +241,13 @@ extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
#define pmap_is_referenced(pg) \
(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
-#define pmap_copy(dp, sp, da, l, sa) /* nothing */
+#define pmap_deactivate(p) do { /* nothing */ } while (0)
+#define pmap_copy(dp, sp, da, l, sa) do { /* nothing */ } while (0)
#define pmap_phys_address(ppn) (ptoa(ppn))
-#define pmap_proc_iflush(p, va, len) /* nothing */
-#define pmap_unuse_final(p) /* nothing */
+#define pmap_proc_iflush(p, va, len) do { /* nothing */ } while (0)
+#define pmap_unuse_final(p) do { /* nothing */ } while (0)
#define pmap_remove_holes(map) do { /* nothing */ } while (0)
/*
diff --git a/sys/arch/aviion/aviion/locore.S b/sys/arch/aviion/aviion/locore.S
index 3b9ef76b003..dae5f31e0c4 100644
--- a/sys/arch/aviion/aviion/locore.S
+++ b/sys/arch/aviion/aviion/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.3 2006/05/20 22:40:43 miod Exp $ */
+/* $OpenBSD: locore.S,v 1.4 2007/10/10 15:53:51 art Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat.
* Copyright (c) 1998 Steve Murphree, Jr.
@@ -343,14 +343,6 @@ GLOBAL(secondary_start)
bsr.n _C_LABEL(secondary_main)
addu r31, r3, USIZE /* switch to idle stack */
- /*
- * At this point, the CPU has been correctly initialized and has
- * identified itself on the console.
- * All it needs now is to jump to the idle loop and wait for work to
- * be offered.
- */
- br _ASM_LABEL(cpu_switch_idle)
-
#endif /* MULTIPROCESSOR */
/*
diff --git a/sys/arch/aviion/aviion/machdep.c b/sys/arch/aviion/aviion/machdep.c
index 4676e19f088..8464a564ee1 100644
--- a/sys/arch/aviion/aviion/machdep.c
+++ b/sys/arch/aviion/aviion/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.10 2007/06/06 17:15:11 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.11 2007/10/10 15:53:51 art Exp $ */
/*
* Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -225,8 +225,6 @@ cpu_startup()
{
caddr_t v;
int sz, i;
- vsize_t size;
- int base, residual;
vaddr_t minaddr, maxaddr;
/*
@@ -605,19 +603,18 @@ void
secondary_main()
{
struct cpu_info *ci = curcpu();
+ int s;
cpu_configuration_print(0);
+ sched_init_cpu(ci);
ncpus++;
__cpu_simple_unlock(&cpu_mutex);
microuptime(&ci->ci_schedstate.spc_runtime);
ci->ci_curproc = NULL;
- /*
- * Upon return, the secondary cpu bootstrap code in locore will
- * enter the idle loop, waiting for some food to process on this
- * processor.
- */
+ SCHED_LOCK(s);
+ cpu_switchto(NULL, sched_chooseproc());
}
#endif /* MULTIPROCESSOR */
diff --git a/sys/arch/hp300/hp300/locore.s b/sys/arch/hp300/hp300/locore.s
index 40a595f59c0..fe200e1a6fe 100644
--- a/sys/arch/hp300/hp300/locore.s
+++ b/sys/arch/hp300/hp300/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.60 2007/05/15 13:46:22 martin Exp $ */
+/* $OpenBSD: locore.s,v 1.61 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: locore.s,v 1.91 1998/11/11 06:41:25 thorpej Exp $ */
/*
@@ -1345,51 +1345,18 @@ Ldorte:
*/
#include <m68k/m68k/support.s>
-/*
- * Use common m68k process manipulation routines.
- */
-#include <m68k/m68k/proc_subr.s>
-
.data
GLOBAL(curpcb)
.long 0
ASBSS(nullpcb,SIZEOF_PCB)
-/*
- * At exit of a process, do a switch for the last time.
- * Switch to a safe stack and PCB, and deallocate the process's resources.
- */
-ENTRY(switch_exit)
- movl sp@(4),a0
- /* save state into garbage pcb */
- movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
- lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
-
- /* Schedule the vmspace and stack to be freed. */
- movl a0,sp@- | exit2(p)
- jbsr _C_LABEL(exit2)
- lea sp@(4),sp | pop args
-
- jra _C_LABEL(cpu_switch)
-
-/*
- * When no processes are on the runq, Swtch branches to Idle
- * to wait for something to come ready.
- */
-ASENTRY_NOPROFILE(Idle)
+ENTRY_NOPROFILE(cpu_idle_cycle)
stop #PSL_LOWIPL
- movw #PSL_HIGHIPL,sr
- movl _C_LABEL(whichqs),d0
- jeq _ASM_LABEL(Idle)
- jra Lsw1
-
-Lbadsw:
- PANIC("switch")
- /*NOTREACHED*/
+ rts
/*
- * cpu_switch()
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
*
* NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
* entire ATC. The effort involved in selective flushing may not be
@@ -1399,55 +1366,15 @@ Lbadsw:
* user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
* bit). For now, we just always flush the full ATC.
*/
-ENTRY(cpu_switch)
- movl _C_LABEL(curpcb),a0 | current pcb
- movw sr,a0@(PCB_PS) | save sr before changing ipl
-#ifdef notyet
- movl CURPROC,sp@- | remember last proc running
-#endif
- clrl CURPROC
+ENTRY(cpu_switchto)
+ movl sp@(4), d0 | oldproc
+ beq Lswnofpsave | is NULL, don't save anything
/*
- * Find the highest-priority queue that isn't empty,
- * then take the first proc from that queue.
- */
- movw #PSL_HIGHIPL,sr | lock out interrupts
- movl _C_LABEL(whichqs),d0
- jeq _ASM_LABEL(Idle)
-Lsw1:
- movl d0,d1
- negl d0
- andl d1,d0
- bfffo d0{#0:#32},d1
- eorib #31,d1
-
- movl d1,d0
- lslb #3,d1 | convert queue number to index
- addl #_C_LABEL(qs),d1 | locate queue (q)
- movl d1,a1
- movl a1@(P_FORW),a0 | p = q->p_forw
- cmpal d1,a0 | anyone on queue?
- jeq Lbadsw | no, panic
- movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
- movl a0@(P_FORW),a1 | n = p->p_forw
- movl d1,a1@(P_BACK) | n->p_back = q
- cmpal d1,a1 | anyone left on queue?
- jne Lsw2 | yes, skip
- movl _C_LABEL(whichqs),d1
- bclr d0,d1 | no, clear bit
- movl d1,_C_LABEL(whichqs)
-Lsw2:
- movl a0,CURPROC
- clrl _C_LABEL(want_resched)
-#ifdef notyet
- movl sp@+,a1
- cmpl a0,a1 | switching to same proc?
- jeq Lswdone | yes, skip save and restore
-#endif
- /*
* Save state of previous process in its pcb.
*/
movl _C_LABEL(curpcb),a1
+ movw sr, a1@(PCB_PS) | save sr before switching context
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
@@ -1460,16 +1387,12 @@ Lsw2:
jeq Lswnofpsave | yes, all done
fmovem fp0-fp7,a2@(FPF_REGS) | save FP general registers
fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control registers
+
Lswnofpsave:
+ movl sp@(8), a0 | newproc
-#ifdef DIAGNOSTIC
- tstl a0@(P_WCHAN)
- jne Lbadsw
- cmpb #SRUN,a0@(P_STAT)
- jne Lbadsw
-#endif
+ movl a0, CURPROC
movb #SONPROC,a0@(P_STAT)
- clrl a0@(P_BACK) | clear back link
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
diff --git a/sys/arch/hp300/hp300/vm_machdep.c b/sys/arch/hp300/hp300/vm_machdep.c
index a23b64964ed..abc6af74605 100644
--- a/sys/arch/hp300/hp300/vm_machdep.c
+++ b/sys/arch/hp300/hp300/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.43 2007/05/27 20:59:25 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.44 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.60 2001/07/06 05:53:35 chs Exp $ */
/*
@@ -133,10 +133,10 @@ void
cpu_exit(p)
struct proc *p;
{
+ (void)splhigh();
- (void) splhigh();
- switch_exit(p);
- /* NOTREACHED */
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/hppa/hppa/genassym.cf b/sys/arch/hppa/hppa/genassym.cf
index 8f4351a2877..dab3e08efc2 100644
--- a/sys/arch/hppa/hppa/genassym.cf
+++ b/sys/arch/hppa/hppa/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.30 2007/05/14 19:54:21 martin Exp $
+# $OpenBSD: genassym.cf,v 1.31 2007/10/10 15:53:51 art Exp $
#
# Copyright (c) 1982, 1990, 1993
@@ -130,8 +130,6 @@ member tf_cr30
# proc fields and values
struct proc
-member p_forw
-member p_back
member p_addr
member p_priority
member p_stat
diff --git a/sys/arch/hppa/hppa/locore.S b/sys/arch/hppa/hppa/locore.S
index a6e0e572e0c..fa875d8a35c 100644
--- a/sys/arch/hppa/hppa/locore.S
+++ b/sys/arch/hppa/hppa/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.155 2007/07/20 22:09:23 kettenis Exp $ */
+/* $OpenBSD: locore.S,v 1.156 2007/10/10 15:53:51 art Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -2771,171 +2771,23 @@ $spstrcpy_exit
copy r0, ret0
EXIT(spstrcpy)
- .import whichqs, data
- .import qs, data
/*
- * setrunqueue(struct proc *p);
- * Insert a process on the appropriate queue. Should be called at splclock().
+ * int cpu_switchto(struct proc *old, struct proc *new)
+ * Switch from "old" proc to "new".
*/
.align 32
-ENTRY(setrunqueue,0)
-#ifdef DIAGNOSTIC
- ldw P_BACK(arg0), t1
- comb,<>,n r0, t1, Lsetrunqueue_panic
- ldw P_WCHAN(arg0), t1
- comb,<>,n r0, t1, Lsetrunqueue_panic
- ldb P_STAT(arg0), t1
- comib,=,n SRUN, t1, Lsetrunqueue_ok
-Lsetrunqueue_panic
- copy arg0, arg1
- ldil L%panic, r1
- ldil L%Lsrqpstr, arg0
- ldo R%panic(r1), r1
- ldo R%Lsrqpstr(arg0), arg0
- .call
- blr %r0, rp
- bv,n %r0(r1)
- nop
-Lsrqpstr
- .asciz "setrunqueue(%p)"
- .align 8
-Lsetrunqueue_ok
-#endif
-
- ldb P_PRIORITY(arg0), t2
- ldil L%qs, t4
- extru t2, 29, 5, t1
- ldo R%qs(t4), t4
- sh3add t1, t4, t4
- ldil L%whichqs, arg3
- ldw P_BACK(t4), t2
- stw t4, P_FORW(arg0)
- stw arg0, P_BACK(t4)
- ldw R%whichqs(arg3), t3
- stw arg0, P_FORW(t2)
- mtctl t1, sar
- stw t2, P_BACK(arg0)
- vdepi 1, 1, t3
- bv 0(rp)
- stw t3, R%whichqs(arg3)
-EXIT(setrunqueue)
-
-/*
- * remrunqueue(struct proc *p);
- * Remove a process from its queue. Should be called at splclock().
- */
- .align 32
-ENTRY(remrunqueue,0)
- ldb P_PRIORITY(arg0), t2
- extru t2, 29, 5, arg2
- ldil L%whichqs, t2
- mtsar arg2
- ldw R%whichqs(t2), t3
-
-#ifdef DIAGNOSTIC
- bvb,<,n t3, remrunqueue_ok
-
-Lremrunqueue_panic
- copy arg0, arg1
- ldil L%panic, r1
- ldil L%Lrrqpstr, arg0
- ldo R%panic(r1), r1
- ldo R%Lrrqpstr(arg0), arg0
- .call
- blr %r0, rp
- bv,n %r0(r1)
- nop
-
-Lrrqpstr
- .asciz "remrunqueue(%p), bit=%x"
- .align 8
-remrunqueue_ok
-#endif
- ldw P_BACK(arg0), t4
- stw r0, P_BACK(arg0)
- ldw P_FORW(arg0), arg0
- stw arg0, P_FORW(t4)
- vdepi 0, 1, t3
- sub,<> t4, arg0, r0
- stw t3, R%whichqs(t2)
- bv 0(rp)
- stw t4, P_BACK(arg0)
-EXIT(remrunqueue)
-
-/*
- * cpu_switch()
- * Find the highest priority process and resume it.
- */
- .align 32
-ENTRY(cpu_switch,128)
- ldil L%cpl, t1
- ldw R%cpl(t1), ret0
+ENTRY(cpu_switchto,128)
copy r3, r1
stw rp, HPPA_FRAME_CRP(sp)
copy sp, r3
- stw ret0, HPPA_FRAME_SL(sp)
stwm r1, HPPA_FRAME_SIZE+20*4(sp)
- /*
- * Clear curproc so that we don't accumulate system time while idle.
- */
- ldil L%curproc, t1
- ldw R%curproc(t1), arg2
- b switch_search
- stw r0, R%curproc(t1)
- /* remain on the old (curproc)'s stack until we have a better choice */
-
-cpu_idle
- copy r0, arg0
- break HPPA_BREAK_KERNEL, HPPA_BREAK_SPLLOWER
- .import uvm, data
- ldil L%(uvm + PAGE_IDLE_ZERO), t3
- ldw R%(uvm + PAGE_IDLE_ZERO)(t3), t4
- sub,<> r0, t4, r0
- b cpu_loop
-
- stw arg2, 4(r3)
- ldil L%uvm_pageidlezero, t1
- ldo R%uvm_pageidlezero(t1), t2
- .call
- ble 0(sr0, t2)
- copy r31, rp
-
- ldw HPPA_FRAME_SL(r3), ret0
- ldw 4(r3), arg2
-
-cpu_loop
- ldil L%cpl, arg0
- stw ret0, R%cpl(arg0)
-
-switch_search
- /*
- * t1: &whichqs
- * arg2: old curproc
- *
- */
- ldil L%whichqs, t1
- ldw R%whichqs(t1), t3
- comb,=,n r0, t3, cpu_idle
- copy r0, arg0
-
- ldi 0, t4
-getbit
- mtsar t4
- bvb,>=,n t3, getbit
- ldo 1(t4), t4
-
- ldil L%qs, t2
- ldo R%qs(t2), t2
- sh3add t4, t2, t2
-
- ldw P_FORW(t2), arg1
#ifdef DIAGNOSTIC
- comb,<> t2, arg1, link_ok
+ b kstack_check
nop
switch_error
- copy t4, arg1
- copy t2, arg2
+ copy arg1, arg2
+ copy arg0, arg1
ldil L%panic, r1
ldil L%Lcspstr, arg0
ldo R%panic(r1), r1
@@ -2945,61 +2797,48 @@ switch_error
bv,n %r0(r1)
nop
Lcspstr
- .asciz "cpu_switch: bit=%x, q/p=%p"
+ .asciz "cpu_switch:old=%p, new=%p"
.align 8
-link_ok
-#endif
- ldil L%want_resched, t4
- stw r0, R%want_resched(t4)
-
- ldw P_FORW(arg1), arg0
- stw arg0, P_FORW(t2)
- stw t2, P_BACK(arg0)
- stw r0, P_BACK(arg1)
-
- vdepi 0, 1, t3
- sub,<> t2, arg0, r0
- stw t3, R%whichqs(t1)
-
- /* don't need &whichqs (t1) starting here */
-#ifdef DIAGNOSTIC
+kstack_check
ldw P_WCHAN(arg1), t1
comb,<>,n r0, t1, switch_error
- copy arg1, t2
+ nop
ldb P_STAT(arg1), t1
comib,<>,n SRUN, t1, switch_error
- copy arg1, t2
+ nop
/*
- * Either we must be switching to the same process, or
- * the new process' kernel stack must be reasonable.
+ * The new process' kernel stack must be reasonable.
*/
- comb,=,n arg1, arg2, kstack_ok
- ldw P_ADDR(arg1), arg0
- ldw U_PCB+PCB_KSP(arg0), t1
- ldo NBPG(arg0), arg0
- comb,>>,n arg0, t1, switch_error
- copy arg1, t2
- sub t1, arg0, t1
- ldil L%USPACE, arg0
- ldo R%USPACE(arg0), arg0
- comb,<<=,n arg0, t1, switch_error
- copy arg1, t2
+ ldw P_ADDR(arg1), arg2
+ ldw U_PCB+PCB_KSP(arg2), t1
+ ldo NBPG(arg2), arg2
+ comb,>>,n arg2, t1, switch_error
+ nop
+ sub t1, arg2, t1
+ ldil L%USPACE, arg2
+ ldo R%USPACE(arg2), arg2
+ comb,<<=,n arg2, t1, switch_error
+ nop
kstack_ok
#endif
+ ldil L%want_resched, t4
+ stw r0, R%want_resched(t4)
+
+ /* Record new proc. */
ldi SONPROC, t1
stb t1, P_STAT(arg1)
- /* Skip context switch if same process. */
- comb,=,n arg1, arg2, switch_return
+ ldil L%curproc, t1
+ stw arg1, R%curproc(t1)
/* If old process exited, don't bother. */
- comb,=,n r0, arg2, switch_exited
+ comb,=,n r0, arg0, switch_exited
/*
* 2. save old proc context
*
- * arg2: old proc
+ * arg0: old proc
*/
- ldw P_ADDR(arg2), t1
+ ldw P_ADDR(arg0), t1
/* save callee-save registers */
stw r4, 1*4(r3)
stw sp, U_PCB+PCB_KSP(t1)
@@ -3021,13 +2860,14 @@ kstack_ok
stw r0, HPPA_FRAME_ARG(1)(sp) /* say no trampoline */
sync
- /* don't need old curproc (arg2) starting from here */
+ /* don't need old curproc (arg0) starting from here */
switch_exited
/*
* 3. restore new proc context
*
* arg1: new proc
*/
+ /* XXX disable interrupts? */
ldw P_ADDR(arg1), t2
ldw P_MD_REGS(arg1), t1
ldw U_PCB+PCB_KSP(t2), sp
@@ -3036,11 +2876,12 @@ switch_exited
ldw TF_CR9(t1), t3
mtctl t2, cr30
mtctl t3, pidr2
+ /* XXX enable interrupts? */
ldo -(HPPA_FRAME_SIZE+20*4)(sp), r3
ldw HPPA_FRAME_ARG(0)(sp), arg0
ldw HPPA_FRAME_ARG(1)(sp), t4 /* in case we're on trampoline */
sub,= r0, t4, r0
- b switch_gonnajump
+ b switch_return
ldw 1*4(r3), r4
ldw 2*4(r3), r5
ldw 3*4(r3), r6
@@ -3056,21 +2897,31 @@ switch_exited
ldw 13*4(r3), r16
ldw 14*4(r3), r17
ldw 15*4(r3), r18
-switch_gonnajump
- ldw HPPA_FRAME_SL(r3), ret0
- ldil L%cpl, t1
- stw ret0, R%cpl(t1)
- sync
switch_return
- ldil L%curproc, t1
- stw arg1, R%curproc(t1)
ldw HPPA_FRAME_CRP(r3), rp
bv 0(rp)
ldwm -(HPPA_FRAME_SIZE+20*4)(sp), r3
-EXIT(cpu_switch)
+EXIT(cpu_switchto)
+
+LEAF_ENTRY(cpu_idle_enter)
+ bv 0(rp)
+ nop
+EXIT(cpu_idle_enter)
+
+LEAF_ENTRY(cpu_idle_cycle)
+ bv 0(rp)
+ nop
+EXIT(cpu_idle_cycle)
+
+LEAF_ENTRY(cpu_idle_leave)
+ bv 0(rp)
+ nop
+EXIT(cpu_idle_leave)
ENTRY(switch_trampoline,0)
+ ldil L%cpl, t1
+ stw r0, R%cpl(t1)
.call
blr r0, rp
bv,n r0(t4)
diff --git a/sys/arch/hppa/hppa/vm_machdep.c b/sys/arch/hppa/hppa/vm_machdep.c
index a4b8e0f5d5a..528a1dba26c 100644
--- a/sys/arch/hppa/hppa/vm_machdep.c
+++ b/sys/arch/hppa/hppa/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.61 2007/06/20 17:29:34 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.62 2007/10/10 15:53:51 art Exp $ */
/*
* Copyright (c) 1999-2004 Michael Shalayeff
@@ -160,12 +160,11 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
tf->tf_sp = (register_t)stack;
/*
- * Build stack frames for the cpu_switch & co.
+ * Build stack frames for the cpu_switchto & co.
*/
osp = sp + HPPA_FRAME_SIZE;
*(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
*(register_t*)(osp + HPPA_FRAME_CRP) = (register_t)&switch_trampoline;
- *(register_t*)(osp + HPPA_FRAME_SL) = 0; /* cpl */
*(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);
sp = osp + HPPA_FRAME_SIZE + 20*4; /* frame + calee-save registers */
@@ -187,8 +186,8 @@ cpu_exit(p)
fpu_curpcb = 0;
}
- exit2(p);
- cpu_switch(p);
+ pmap_deactivate(p);
+ sched_exit(p);
}
void
diff --git a/sys/arch/hppa/include/cpu.h b/sys/arch/hppa/include/cpu.h
index 58586ebc26a..ff9f6aa916c 100644
--- a/sys/arch/hppa/include/cpu.h
+++ b/sys/arch/hppa/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.51 2007/07/20 22:12:39 kettenis Exp $ */
+/* $OpenBSD: cpu.h,v 1.52 2007/10/10 15:53:51 art Exp $ */
/*
* Copyright (c) 2000-2004 Michael Shalayeff
@@ -64,6 +64,7 @@
#ifndef _LOCORE
#ifdef _KERNEL
+#include <sys/queue.h>
#include <sys/sched.h>
struct cpu_info {
diff --git a/sys/arch/i386/i386/cpu.c b/sys/arch/i386/i386/cpu.c
index 726f3e816bb..6e9202f114b 100644
--- a/sys/arch/i386/i386/cpu.c
+++ b/sys/arch/i386/i386/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.25 2007/05/29 18:18:20 tom Exp $ */
+/* $OpenBSD: cpu.c,v 1.26 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: cpu.c,v 1.1.2.7 2000/06/26 02:04:05 sommerfeld Exp $ */
/*-
@@ -295,6 +295,7 @@ cpu_attach(struct device *parent, struct device *self, void *aux)
cpu_alloc_ldt(ci);
ci->ci_flags |= CPUF_PRESENT | CPUF_AP;
identifycpu(ci);
+ sched_init_cpu(ci);
ci->ci_next = cpu_info_list->ci_next;
cpu_info_list->ci_next = ci;
ncpus++;
@@ -463,6 +464,9 @@ cpu_hatch(void *v)
ci->ci_dev.dv_xname, ci->ci_cpuid);
microuptime(&ci->ci_schedstate.spc_runtime);
splx(s);
+
+ SCHED_LOCK(s);
+ cpu_switchto(NULL, sched_chooseproc());
}
void
diff --git a/sys/arch/i386/i386/genassym.cf b/sys/arch/i386/i386/genassym.cf
index 88c086d6526..211a677dfa5 100644
--- a/sys/arch/i386/i386/genassym.cf
+++ b/sys/arch/i386/i386/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.27 2007/10/03 07:51:26 kettenis Exp $
+# $OpenBSD: genassym.cf,v 1.28 2007/10/10 15:53:51 art Exp $
#
# Copyright (c) 1982, 1990 The Regents of the University of California.
# All rights reserved.
@@ -88,8 +88,6 @@ export VM_MAXUSER_ADDRESS
# proc fields and values
struct proc
member p_addr
-member p_back
-member p_forw
member p_priority
member p_stat
member p_wchan
diff --git a/sys/arch/i386/i386/locore.s b/sys/arch/i386/i386/locore.s
index d8e379cb506..ff5a97e22ce 100644
--- a/sys/arch/i386/i386/locore.s
+++ b/sys/arch/i386/i386/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.115 2007/10/03 07:51:26 kettenis Exp $ */
+/* $OpenBSD: locore.s,v 1.116 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */
/*-
@@ -1295,279 +1295,52 @@ ENTRY(longjmp)
ret
/*****************************************************************************/
-
-/*
- * The following primitives manipulate the run queues.
- * whichqs tells which of the 32 queues qs have processes in them.
- * Setrq puts processes into queues, Remrq removes them from queues.
- * The running process is on no queue, other processes are on a queue
- * related to p->p_pri, divided by 4 actually to shrink the 0-127 range
- * of priorities into the 32 available queues.
- */
- .globl _C_LABEL(whichqs),_C_LABEL(qs),_C_LABEL(uvmexp),_C_LABEL(panic)
-/*
- * setrunqueue(struct proc *p);
- * Insert a process on the appropriate queue. Should be called at splclock().
- */
-NENTRY(setrunqueue)
- movl 4(%esp),%eax
-#ifdef DIAGNOSTIC
- cmpl $0,P_BACK(%eax) # should not be on q already
- jne 1f
- cmpl $0,P_WCHAN(%eax)
- jne 1f
- cmpb $SRUN,P_STAT(%eax)
- jne 1f
-#endif /* DIAGNOSTIC */
- movzbl P_PRIORITY(%eax),%edx
- shrl $2,%edx
- btsl %edx,_C_LABEL(whichqs) # set q full bit
- leal _C_LABEL(qs)(,%edx,8),%edx # locate q hdr
- movl P_BACK(%edx),%ecx
- movl %edx,P_FORW(%eax) # link process on tail of q
- movl %eax,P_BACK(%edx)
- movl %eax,P_FORW(%ecx)
- movl %ecx,P_BACK(%eax)
- ret
-#ifdef DIAGNOSTIC
-1: pushl $2f
- call _C_LABEL(panic)
- /* NOTREACHED */
-2: .asciz "setrunqueue"
-#endif /* DIAGNOSTIC */
-
-/*
- * remrunqueue(struct proc *p);
- * Remove a process from its queue. Should be called at splclock().
- */
-NENTRY(remrunqueue)
- movl 4(%esp),%ecx
- movzbl P_PRIORITY(%ecx),%eax
-#ifdef DIAGNOSTIC
- shrl $2,%eax
- btl %eax,_C_LABEL(whichqs)
- jnc 1f
-#endif /* DIAGNOSTIC */
- movl P_BACK(%ecx),%edx # unlink process
- movl $0,P_BACK(%ecx) # zap reverse link to indicate off list
- movl P_FORW(%ecx),%ecx
- movl %ecx,P_FORW(%edx)
- movl %edx,P_BACK(%ecx)
- cmpl %ecx,%edx # q still has something?
- jne 2f
-#ifndef DIAGNOSTIC
- shrl $2,%eax
-#endif
- btrl %eax,_C_LABEL(whichqs) # no; clear bit
-2: ret
+
#ifdef DIAGNOSTIC
-1: pushl $3f
+NENTRY(switch_error1)
+ pushl %edi
+ pushl $1f
call _C_LABEL(panic)
/* NOTREACHED */
-3: .asciz "remrunqueue"
-#endif /* DIAGNOSTIC */
-
-#if NAPM > 0
- .globl _C_LABEL(apm_cpu_idle),_C_LABEL(apm_cpu_busy)
-#endif
-/*
- * When no processes are on the runq, cpu_switch() branches to here to wait for
- * something to come ready.
- */
-ENTRY(idle)
- /* Skip context saving if we have none. */
- testl %esi,%esi
- jz 1f
-
- /*
- * idling: save old context.
- *
- * Registers:
- * %eax, %ebx, %ecx - scratch
- * %esi - old proc, then old pcb
- * %edi - idle pcb
- * %edx - idle TSS selector
- */
-
- pushl %esi
- call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
- addl $4,%esp
-
- movl P_ADDR(%esi),%esi
-
- /* Save stack pointers. */
- movl %esp,PCB_ESP(%esi)
- movl %ebp,PCB_EBP(%esi)
-
- /* Find idle PCB for this CPU */
-#ifndef MULTIPROCESSOR
- movl $_C_LABEL(proc0),%ebx
- movl P_ADDR(%ebx),%edi
- movl P_MD_TSS_SEL(%ebx),%edx
-#else
- movl CPUVAR(IDLE_PCB), %edi
- movl CPUVAR(IDLE_TSS_SEL), %edx
-#endif
-
- /* Restore the idle context (avoid interrupts) */
- cli
-
- /* Restore stack pointers. */
- movl PCB_ESP(%edi),%esp
- movl PCB_EBP(%edi),%ebp
-
-
- /* Switch address space. */
- movl PCB_CR3(%edi),%ecx
- movl %ecx,%cr3
-
- /* Switch TSS. Reset "task busy" flag before loading. */
- movl CPUVAR(GDT), %eax
- andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
- ltr %dx
-
- /* We're always in the kernel, so we don't need the LDT. */
-
- /* Restore cr0 (including FPU state). */
- movl PCB_CR0(%edi),%ecx
- movl %ecx,%cr0
-
- /* Record new pcb. */
- SET_CURPCB(%edi)
-
- xorl %esi,%esi
- sti
-
-1:
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_unlock_idle)
-#endif
-
- movl $IPL_NONE,CPL # spl0()
- call _C_LABEL(Xspllower) # process pending interrupts
- jmp _C_LABEL(idle_start)
-
-ENTRY(idle_loop)
-#if NAPM > 0
- call _C_LABEL(apm_cpu_idle)
-#else
-#if NPCTR > 0
- addl $1,_C_LABEL(pctr_idlcnt)
- adcl $0,_C_LABEL(pctr_idlcnt)+4
-#endif
- sti
- hlt
-#endif
-ENTRY(idle_start)
- cli
- cmpl $0,_C_LABEL(whichqs)
- jz _C_LABEL(idle_loop)
-
-ENTRY(idle_exit)
- movl $IPL_HIGH,CPL # splhigh
- sti
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_lock_idle)
-#endif
-#if NAPM > 0
- call _C_LABEL(apm_cpu_busy)
-#endif
- jmp switch_search
-
-#ifdef DIAGNOSTIC
-NENTRY(switch_error)
+1: .asciz "cpu_switch1 %p"
+NENTRY(switch_error2)
+ pushl %edi
pushl $1f
call _C_LABEL(panic)
/* NOTREACHED */
-1: .asciz "cpu_switch"
+1: .asciz "cpu_switch2 %p"
#endif /* DIAGNOSTIC */
/*
- * cpu_switch(void);
- * Find a runnable process and switch to it. Wait if necessary. If the new
- * process is the same as the old one, we short-circuit the context save and
- * restore.
+ * cpu_switchto(struct proc *old, struct proc *new)
+ * Switch from the "old" proc to the "new" proc. If "old" is NULL, we
+ * don't need to bother saving old context.
*/
-ENTRY(cpu_switch)
+ENTRY(cpu_switchto)
pushl %ebx
pushl %esi
pushl %edi
- pushl CPL
-
- movl CPUVAR(CURPROC), %esi
-
- /*
- * Clear curproc so that we don't accumulate system time while idle.
- * This also insures that schedcpu() will move the old process to
- * the correct queue if it happens to get called from the spllower()
- * below and changes the priority. (See corresponding comment in
- * userret()).
- */
- movl $0, CPUVAR(CURPROC)
-
-switch_search:
- /*
- * First phase: find new process.
- *
- * Registers:
- * %eax - queue head, scratch, then zero
- * %ebx - queue number
- * %ecx - cached value of whichqs
- * %edx - next process in queue
- * %esi - old process
- * %edi - new process
- */
- /* Wait for new process. */
- movl _C_LABEL(whichqs),%ecx
- bsfl %ecx,%ebx # find a full q
- jz _C_LABEL(idle) # if none, idle
- leal _C_LABEL(qs)(,%ebx,8),%eax # select q
- movl P_FORW(%eax),%edi # unlink from front of process q
-#ifdef DIAGNOSTIC
- cmpl %edi,%eax # linked to self (i.e. nothing queued)?
- je _C_LABEL(switch_error) # not possible
-#endif /* DIAGNOSTIC */
- movl P_FORW(%edi),%edx
- movl %edx,P_FORW(%eax)
- movl %eax,P_BACK(%edx)
+ movl 16(%esp), %esi
+ movl 20(%esp), %edi
- cmpl %edx,%eax # q empty?
- jne 3f
-
- btrl %ebx,%ecx # yes, clear to indicate empty
- movl %ecx,_C_LABEL(whichqs) # update q status
+ xorl %eax, %eax
-3: xorl %eax, %eax
- /* We just did it. */
- movl $0, CPUVAR(RESCHED)
+ movl %eax, CPUVAR(RESCHED)
#ifdef DIAGNOSTIC
cmpl %eax,P_WCHAN(%edi) # Waiting for something?
- jne _C_LABEL(switch_error) # Yes; shouldn't be queued.
+ jne _C_LABEL(switch_error1) # Yes; shouldn't be queued.
cmpb $SRUN,P_STAT(%edi) # In run state?
- jne _C_LABEL(switch_error) # No; shouldn't be queued.
+ jne _C_LABEL(switch_error2) # No; shouldn't be queued.
#endif /* DIAGNOSTIC */
- /* Isolate process. XXX Is this necessary? */
- movl %eax,P_BACK(%edi)
-
- /* Record new process. */
- movb $SONPROC,P_STAT(%edi) # p->p_stat = SONPROC
- movl CPUVAR(SELF), %ecx
- movl %edi, CPUVAR(CURPROC)
- movl %ecx, P_CPU(%edi)
-
- /* Skip context switch if same process. */
- cmpl %edi,%esi
- je switch_return
-
/* If old process exited, don't bother. */
testl %esi,%esi
jz switch_exited
/*
- * Second phase: save old context.
+ * Save old context.
*
* Registers:
* %eax, %ecx - scratch
@@ -1597,18 +1370,19 @@ switch_exited:
/* No interrupts while loading new state. */
cli
+
+ /* Record new process. */
+ movl CPUVAR(SELF), %ebx
+ movl %edi, CPUVAR(CURPROC)
+ movb $SONPROC, P_STAT(%edi)
+ movl %ebx, P_CPU(%edi)
+
movl P_ADDR(%edi),%esi
/* Restore stack pointers. */
movl PCB_ESP(%esi),%esp
movl PCB_EBP(%esi),%ebp
-#if 0
- /* Don't bother with the rest if switching to a system process. */
- testl $P_SYSTEM,P_FLAG(%edi)
- jnz switch_restored
-#endif
-
/*
* Activate the address space. We're curproc, so %cr3 will
* be reloaded, but we're not yet curpcb, so the LDT won't
@@ -1639,7 +1413,6 @@ switch_exited:
lldt %dx
#endif /* USER_LDT */
-switch_restored:
/* Restore cr0 (including FPU state). */
movl PCB_CR0(%esi),%ecx
#ifdef MULTIPROCESSOR
@@ -1661,103 +1434,29 @@ switch_restored:
/* Interrupts are okay again. */
sti
-switch_return:
-#if 0
- pushl %edi
- movl CPUVAR(NAME), %ebx
- leal CPU_INFO_NAME(%ebx),%ebx
- pushl %ebx
- pushl $1f
- call _C_LABEL(printf)
- addl $0xc,%esp
-#endif
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_unlock_idle)
-#endif
- /*
- * Restore old cpl from stack. Note that this is always an increase,
- * due to the spl0() on entry.
- */
- popl CPL
-
- movl %edi,%eax # return (p);
popl %edi
popl %esi
popl %ebx
ret
-1: .asciz "%s: scheduled %x\n"
-/*
- * switch_exit(struct proc *p);
- * Switch to the appropriate idle context (proc0's if uniprocessor; the cpu's if
- * multiprocessor) and deallocate the address space and kernel stack for p.
- * Then jump into cpu_switch(), as if we were in the idle proc all along.
- */
-#ifndef MULTIPROCESSOR
- .globl _C_LABEL(proc0)
-#endif
-ENTRY(switch_exit)
- movl 4(%esp),%edi # old process
-#ifndef MULTIPROCESSOR
- movl $_C_LABEL(proc0),%ebx
- movl P_ADDR(%ebx),%esi
- movl P_MD_TSS_SEL(%ebx),%edx
-#else
- movl CPUVAR(IDLE_PCB), %esi
- movl CPUVAR(IDLE_TSS_SEL), %edx
-#endif
-
- /* In case we fault... */
- movl $0, CPUVAR(CURPROC)
-
- /* Restore the idle context. */
- cli
-
- /* Restore stack pointers. */
- movl PCB_ESP(%esi),%esp
- movl PCB_EBP(%esi),%ebp
-
- /* Load TSS info. */
- movl CPUVAR(GDT), %eax
-
- /* Switch address space. */
- movl PCB_CR3(%esi),%ecx
- movl %ecx,%cr3
-
- /* Switch TSS. */
- andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
- ltr %dx
- /* We're always in the kernel, so we don't need the LDT. */
-
- /* Clear segment registers; always null in proc0. */
- xorl %ecx,%ecx
- movw %cx,%gs
-
- /* Point to cpu_info */
- movl $GSEL(GCPU_SEL, SEL_KPL),%ecx
- movw %cx,%fs
-
- /* Restore cr0 (including FPU state). */
- movl PCB_CR0(%esi),%ecx
- movl %ecx,%cr0
-
- /* Record new pcb. */
- SET_CURPCB(%esi)
+ENTRY(cpu_idle_enter)
+ ret
- /* Interrupts are okay again. */
+ENTRY(cpu_idle_cycle)
+#if NAPM > 0
+ call _C_LABEL(apm_cpu_idle)
+#else
+#if NPCTR > 0
+ addl $1,_C_LABEL(pctr_idlcnt)
+ adcl $0,_C_LABEL(pctr_idlcnt)+4
+#endif
sti
+ hlt
+#endif
+ ret
- /*
- * Schedule the dead process's vmspace and stack to be freed.
- */
- pushl %edi /* exit2(p) */
- call _C_LABEL(exit2)
- addl $4,%esp
-
- /* Jump into cpu_switch() with the right state. */
- xorl %esi,%esi
- movl $0, CPUVAR(CURPROC)
- jmp switch_search
+ENTRY(cpu_idle_leave)
+ ret
/*
* savectx(struct pcb *pcb);
diff --git a/sys/arch/i386/i386/mptramp.s b/sys/arch/i386/i386/mptramp.s
index b68ec50ed12..de120c4de55 100644
--- a/sys/arch/i386/i386/mptramp.s
+++ b/sys/arch/i386/i386/mptramp.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: mptramp.s,v 1.8 2007/02/20 21:15:01 tom Exp $ */
+/* $OpenBSD: mptramp.s,v 1.9 2007/10/10 15:53:51 art Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -249,9 +249,7 @@ mp_cont:
HALTT(0x30,%ecx)
pushl %ecx
call _C_LABEL(cpu_hatch)
- HALT(0x33)
- xorl %esi,%esi
- jmp _C_LABEL(idle_start)
+ /* NOTREACHED */
.data
_C_LABEL(mp_pdirpa):
diff --git a/sys/arch/i386/i386/vm_machdep.c b/sys/arch/i386/i386/vm_machdep.c
index 4f969a96aaa..a12b279cfb1 100644
--- a/sys/arch/i386/i386/vm_machdep.c
+++ b/sys/arch/i386/i386/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.52 2007/05/27 20:59:25 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.53 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.61 1996/05/03 19:42:35 christos Exp $ */
/*-
@@ -126,7 +126,6 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
tf->tf_esp = (u_int)stack + stacksize;
sf = (struct switchframe *)tf - 1;
- sf->sf_ppl = 0;
sf->sf_esi = (int)func;
sf->sf_ebx = (int)arg;
sf->sf_eip = (int)proc_trampoline;
@@ -151,7 +150,7 @@ cpu_exit(struct proc *p)
#endif
pmap_deactivate(p);
- switch_exit(p);
+ sched_exit(p);
}
void
diff --git a/sys/arch/i386/include/frame.h b/sys/arch/i386/include/frame.h
index 8ce02c8753f..cc1263e57c8 100644
--- a/sys/arch/i386/include/frame.h
+++ b/sys/arch/i386/include/frame.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: frame.h,v 1.8 2006/11/27 18:04:28 gwk Exp $ */
+/* $OpenBSD: frame.h,v 1.9 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: frame.h,v 1.12 1995/10/11 04:20:08 mycroft Exp $ */
/*-
@@ -106,7 +106,6 @@ struct intrframe {
* Stack frame inside cpu_switch()
*/
struct switchframe {
- int sf_ppl;
int sf_edi;
int sf_esi;
int sf_ebx;
diff --git a/sys/arch/luna88k/luna88k/locore.S b/sys/arch/luna88k/luna88k/locore.S
index 81a5477308a..9f8e0ec9a2e 100644
--- a/sys/arch/luna88k/luna88k/locore.S
+++ b/sys/arch/luna88k/luna88k/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.13 2007/01/12 21:41:53 aoyama Exp $ */
+/* $OpenBSD: locore.S,v 1.14 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -150,8 +150,8 @@ ASLOCAL(main_start)
* Now we will compete with the other processors to see which one
* will be elected as the main one.
*/
- or.u r11, r0, hi16(_ASM_LABEL(cpu_mutex))
- or r11, r11, lo16(_ASM_LABEL(cpu_mutex))
+ or.u r11, r0, hi16(_C_LABEL(cpu_mutex))
+ or r11, r11, lo16(_C_LABEL(cpu_mutex))
1:
FLUSH_PIPELINE
or r22, r0, 1
@@ -282,15 +282,6 @@ ASLOCAL(secondary_init)
bsr.n _C_LABEL(secondary_pre_main) /* set cpu number */
or r31, r31, lo16(_ASM_LABEL(slavestack_end))
- /*
- * Release cpu_mutex; we have a race with other secondary CPUs here
- * because the stack has not been switched yet. However, since our
- * interrupts are disabled, the worst we can get is an NMI, and, oh
- * well, it means we're in deep trouble anyway.
- */
- or.u r10, r0, hi16(_ASM_LABEL(cpu_mutex))
- st r0, r10, lo16(_ASM_LABEL(cpu_mutex))
-
ldcr r2, CPU
1:
ld r3, r2, CI_CURPCB
@@ -299,13 +290,6 @@ ASLOCAL(secondary_init)
br.n _C_LABEL(secondary_main)
add r31, r3, USIZE /* switch to idle stack */
- /*
- * At this point, the CPU has been correctly initialized and has
- * identified itself on the console.
- * All it needs now is to jump to the idle loop and wait for work to
- * be offered.
- */
- br _ASM_LABEL(cpu_switch_search)
#else
/*
@@ -315,15 +299,6 @@ ASLOCAL(secondary_init)
#endif /* MULTIPROCESSOR */
- /*
- * Release the cpu_mutex; secondary processors will now have their
- * chance to initialize.
- */
-GLOBAL(cpu_boot_secondary_processors)
- or.u r2, r0, hi16(_ASM_LABEL(cpu_mutex))
- jmp.n r1
- st r0, r2, lo16(_ASM_LABEL(cpu_mutex))
-
/*
* void delay(int count)
*
@@ -384,10 +359,6 @@ GLOBAL(proc0paddr)
ASLOCAL(master_mpu)
word 0
-/* XMEM spin lock -- controls access to master_mpu */
-ASLOCAL(cpu_mutex)
- word 0
-
#if defined(DDB) || NKSYMS > 0
GLOBAL(esym)
word 0
diff --git a/sys/arch/luna88k/luna88k/machdep.c b/sys/arch/luna88k/luna88k/machdep.c
index 0a40e973bd7..bc12f69d4fb 100644
--- a/sys/arch/luna88k/luna88k/machdep.c
+++ b/sys/arch/luna88k/luna88k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.44 2007/06/06 17:15:12 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.45 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -85,6 +85,7 @@
#include <machine/cmmu.h>
#include <machine/cpu.h>
#include <machine/kcore.h>
+#include <machine/lock.h>
#include <machine/reg.h>
#include <machine/trap.h>
#include <machine/m88100.h>
@@ -105,6 +106,7 @@
caddr_t allocsys(caddr_t);
void consinit(void);
+void cpu_boot_secondary_processors(void);
void dumpconf(void);
void dumpsys(void);
int getcpuspeed(void);
@@ -183,6 +185,8 @@ int physmem; /* available physical memory, in pages */
struct vm_map *exec_map = NULL;
struct vm_map *phys_map = NULL;
+__cpu_simple_lock_t cpu_mutex = __SIMPLELOCK_UNLOCKED;
+
/*
* Declare these as initialized data so we can patch them.
*/
@@ -756,6 +760,16 @@ abort:
#ifdef MULTIPROCESSOR
/*
+ * Release the cpu_mutex; secondary processors will now have their
+ * chance to initialize.
+ */
+void
+cpu_boot_secondary_processors()
+{
+ __cpu_simple_unlock(&cpu_mutex);
+}
+
+/*
* Secondary CPU early initialization routine.
* Determine CPU number and set it, then allocate the idle pcb (and stack).
*
@@ -793,17 +807,18 @@ void
secondary_main()
{
struct cpu_info *ci = curcpu();
+ int s;
cpu_configuration_print(0);
+ sched_init_cpu(ci);
ncpus++;
+ __cpu_simple_unlock(&cpu_mutex);
microuptime(&ci->ci_schedstate.spc_runtime);
+ ci->ci_curproc = NULL;
- /*
- * Upon return, the secondary cpu bootstrap code in locore will
- * enter the idle loop, waiting for some food to process on this
- * processor.
- */
+ SCHED_LOCK(s);
+ cpu_switchto(NULL, sched_chooseproc());
}
#endif /* MULTIPROCESSOR */
@@ -966,7 +981,6 @@ luna88k_bootstrap()
cpuid_t cpu;
extern void m8820x_initialize_cpu(cpuid_t);
extern void m8820x_set_sapr(cpuid_t, apr_t);
- extern void cpu_boot_secondary_processors(void);
cmmu = &cmmu8820x;
diff --git a/sys/arch/m68k/include/cpu.h b/sys/arch/m68k/include/cpu.h
index 9a88a3d80ea..9b3843e760e 100644
--- a/sys/arch/m68k/include/cpu.h
+++ b/sys/arch/m68k/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.17 2007/05/22 10:31:08 martin Exp $ */
+/* $OpenBSD: cpu.h,v 1.18 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: cpu.h,v 1.3 1997/02/02 06:56:57 thorpej Exp $ */
/*
@@ -66,6 +66,7 @@
#ifdef _KERNEL
#ifndef _LOCORE
+#include <sys/queue.h>
#include <sys/sched.h>
struct cpu_info {
@@ -269,6 +270,9 @@ int cachectl(struct proc *, int, vaddr_t, int);
*/
#define PROC_PC(p) (((struct trapframe *)((p)->p_md.md_regs))->tf_pc)
+#define cpu_idle_enter() do { /* nothing */ } while (0)
+#define cpu_idle_leave() do { /* nothing */ } while (0)
+
#endif /* _KERNEL */
#endif /* _M68K_CPU_H_ */
diff --git a/sys/arch/m68k/m68k/genassym.cf b/sys/arch/m68k/m68k/genassym.cf
index d608d7fa85b..05ce97c1a37 100644
--- a/sys/arch/m68k/m68k/genassym.cf
+++ b/sys/arch/m68k/m68k/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.12 2007/05/15 13:46:22 martin Exp $
+# $OpenBSD: genassym.cf,v 1.13 2007/10/10 15:53:52 art Exp $
#
# Copyright (c) 1995 Theo de Raadt
@@ -71,15 +71,10 @@ export NBPG
# proc fields and values
struct proc
-member p_forw
-member p_back
-member p_priority
member p_stat
-member p_wchan
member p_vmspace
member p_addr
-export SRUN
export SONPROC
# contexts
diff --git a/sys/arch/m68k/m68k/proc_subr.s b/sys/arch/m68k/m68k/proc_subr.s
index 23d3f4d61b8..e69de29bb2d 100644
--- a/sys/arch/m68k/m68k/proc_subr.s
+++ b/sys/arch/m68k/m68k/proc_subr.s
@@ -1,129 +0,0 @@
-/* $OpenBSD: proc_subr.s,v 1.2 2003/06/02 23:27:48 millert Exp $ */
-/* $NetBSD: proc_subr.s,v 1.2 1997/04/25 02:22:01 thorpej Exp $ */
-
-/*
- * Copyright (c) 1988 University of Utah.
- * Copyright (c) 1980, 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * the Systems Programming Group of the University of Utah Computer
- * Science Department.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from: Utah $Hdr: locore.s 1.66 92/12/22$
- *
- * @(#)locore.s 8.6 (Berkeley) 5/27/94
- */
-
-/*
- * Assembly routines related to process manipulation.
- */
-
-/*
- * NOTICE: This is not a standalone file. To use it, #include it in
- * your port's locore.s, like so:
- *
- * #include <m68k/m68k/proc_subr.s>
- */
-
-/*
- * The following primitives manipulate the run queues. _whichqs tells which
- * of the 32 queues _qs have processes in them. Setrunqueue puts processes
- * into queues, remrunqueue removes them from queues. The running process is
- * on no queue, other processes are on a queue related to p->p_priority,
- * divided by 4 actually to shrink the 0-127 range of priorities into the 32
- * available queues.
- */
-
-/*
- * Setrunqueue(p)
- *
- * Call should be made at spl6(), and p->p_stat should be SRUN
- */
-ENTRY(setrunqueue)
- movl sp@(4),a0
-#ifdef DIAGNOSTIC
- tstl a0@(P_BACK)
- jne Lset1
- tstl a0@(P_WCHAN)
- jne Lset1
- cmpb #SRUN,a0@(P_STAT)
- jne Lset1
-#endif
- clrl d0
- movb a0@(P_PRIORITY),d0
- lsrb #2,d0
- movl _C_LABEL(whichqs),d1
- bset d0,d1
- movl d1,_C_LABEL(whichqs)
- lslb #3,d0
- addl #_C_LABEL(qs),d0
- movl d0,a0@(P_FORW)
- movl d0,a1
- movl a1@(P_BACK),a0@(P_BACK)
- movl a0,a1@(P_BACK)
- movl a0@(P_BACK),a1
- movl a0,a1@(P_FORW)
- rts
-#ifdef DIAGNOSTIC
-Lset1:
- PANIC("setrunqueue")
-#endif
-
-/*
- * remrunqueue(p)
- *
- * Call should be made at spl6().
- */
-ENTRY(remrunqueue)
- movl sp@(4),a0
- movb a0@(P_PRIORITY),d0
-#ifdef DIAGNOSTIC
- lsrb #2,d0
- movl _C_LABEL(whichqs),d1
- btst d0,d1
- jeq Lrem2
-#endif
- movl a0@(P_BACK),a1
- clrl a0@(P_BACK)
- movl a0@(P_FORW),a0
- movl a0,a1@(P_FORW)
- movl a1,a0@(P_BACK)
- cmpal a0,a1
- jne Lrem1
-#ifndef DIAGNOSTIC
- lsrb #2,d0
- movl _C_LABEL(whichqs),d1
-#endif
- bclr d0,d1
- movl d1,_C_LABEL(whichqs)
-Lrem1:
- rts
-#ifdef DIAGNOSTIC
-Lrem2:
- PANIC("remrunqueue")
-#endif
diff --git a/sys/arch/m88k/include/cpu.h b/sys/arch/m88k/include/cpu.h
index 317cc61704f..ab3342c7734 100644
--- a/sys/arch/m88k/include/cpu.h
+++ b/sys/arch/m88k/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.22 2007/05/19 20:34:32 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.23 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* Copyright (c) 1992, 1993
@@ -58,6 +58,7 @@
#include <machine/pcb.h>
#include <machine/psl.h>
#include <machine/intr.h>
+#include <sys/queue.h>
#include <sys/sched.h>
#if defined(MULTIPROCESSOR)
@@ -165,6 +166,8 @@ void set_cpu_number(cpuid_t);
#define cpu_exec(p) do { /* nothing */ } while (0)
#define cpu_wait(p) do { /* nothing */ } while (0)
+#define cpu_idle_cycle() do { /* nothing */ } while (0)
+
#if defined(MULTIPROCESSOR)
#include <sys/lock.h>
#include <sys/mplock.h>
diff --git a/sys/arch/m88k/m88k/genassym.cf b/sys/arch/m88k/m88k/genassym.cf
index 5738186c08a..2afe41589d1 100644
--- a/sys/arch/m88k/m88k/genassym.cf
+++ b/sys/arch/m88k/m88k/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.8 2007/05/12 19:59:52 miod Exp $
+# $OpenBSD: genassym.cf,v 1.9 2007/10/10 15:53:52 art Exp $
#
# Copyright (c) 1982, 1990 The Regents of the University of California.
# All rights reserved.
@@ -28,7 +28,7 @@
# SUCH DAMAGE.
#
# @(#)genassym.c 7.8 (Berkeley) 5/7/91
-# $Id: genassym.cf,v 1.8 2007/05/12 19:59:52 miod Exp $
+# $Id: genassym.cf,v 1.9 2007/10/10 15:53:52 art Exp $
#
include <sys/param.h>
@@ -49,15 +49,11 @@ include <sys/user.h>
# proc fields and values
struct proc
-member p_forw
-member p_back
member p_addr
member p_stat
-member p_wchan
member p_cpu
member P_ASTPENDING p_md.md_astpending
-export SRUN
export SONPROC
# cpu fields
diff --git a/sys/arch/m88k/m88k/m88k_machdep.c b/sys/arch/m88k/m88k/m88k_machdep.c
index e9d7b9838ac..76e5329a1c8 100644
--- a/sys/arch/m88k/m88k/m88k_machdep.c
+++ b/sys/arch/m88k/m88k/m88k_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: m88k_machdep.c,v 1.21 2007/05/29 18:10:42 miod Exp $ */
+/* $OpenBSD: m88k_machdep.c,v 1.22 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -202,50 +202,6 @@ copystr(fromaddr, toaddr, maxlength, lencopied)
return (ENAMETOOLONG);
}
-void
-setrunqueue(p)
- struct proc *p;
-{
- struct prochd *q;
- struct proc *oldlast;
- int which = p->p_priority >> 2;
-
-#ifdef DIAGNOSTIC
- if (p->p_back != NULL)
- panic("setrunqueue %p", p);
-#endif
- q = &qs[which];
- whichqs |= 1 << which;
- p->p_forw = (struct proc *)q;
- p->p_back = oldlast = q->ph_rlink;
- q->ph_rlink = p;
- oldlast->p_forw = p;
-}
-
-/*
- * Remove process p from its run queue, which should be the one
- * indicated by its priority. Calls should be made at splstatclock().
- */
-void
-remrunqueue(vp)
- struct proc *vp;
-{
- struct proc *p = vp;
- int which = p->p_priority >> 2;
- struct prochd *q;
-
-#ifdef DIAGNOSTIC
- if ((whichqs & (1 << which)) == 0)
- panic("remrq %p", p);
-#endif
- p->p_forw->p_back = p->p_back;
- p->p_back->p_forw = p->p_forw;
- p->p_back = NULL;
- q = &qs[which];
- if (q->ph_link == (struct proc *)q)
- whichqs &= ~(1 << which);
-}
-
#ifdef DDB
int longformat = 1;
void
diff --git a/sys/arch/m88k/m88k/process.S b/sys/arch/m88k/m88k/process.S
index 70050662724..786b40d5c38 100644
--- a/sys/arch/m88k/m88k/process.S
+++ b/sys/arch/m88k/m88k/process.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: process.S,v 1.16 2007/05/12 19:59:52 miod Exp $ */
+/* $OpenBSD: process.S,v 1.17 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1996 Nivas Madhur
* All rights reserved.
@@ -35,242 +35,63 @@
#include <machine/psl.h>
#include <machine/intr.h>
-#ifdef DIAGNOSTIC
- data
- align 4
-ASLOCAL(swchanpanic)
- string "switch wchan %x\0"
- align 4
-ASLOCAL(swsrunpanic)
- string "switch SRUN %x\0"
-
- text
- align 8
-ASLOCAL(Lswchanpanic)
- or.u r2, r0, hi16(_ASM_LABEL(swchanpanic))
- or r2, r2, lo16(_ASM_LABEL(swchanpanic))
- bsr.n _C_LABEL(panic)
- or r3, r0, r9
-
-ASLOCAL(Lswsrunpanic)
- or.u r2, r0, hi16(_ASM_LABEL(swsrunpanic))
- or r2, r2, lo16(_ASM_LABEL(swsrunpanic))
- bsr.n _C_LABEL(panic)
- or r3, r0, r9
-#endif
-
/*
- * void switch_exit(struct proc *p)
+ * void cpu_idle_enter()
*
- * Do the final work to exit from a process. After switching to the
- * idle stack and pcb, invoke exit2() on behalf of the exiting process,
- * then continue into cpu_switch() to select another process to run.
+ * Enable all interrupts and lower spl. This actually enables interrupts
+ * in the psr; bootstrap of secondary processors depends on this.
*/
-
-ENTRY(switch_exit)
- /*
- * Disable interrupts since we are about to change the kernel
- * stack.
- */
- ldcr r3, PSR
- set r3, r3, 1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r3, PSR
+ENTRY(cpu_idle_enter)
+ ldcr r2, PSR
+ clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
+ stcr r2, PSR
FLUSH_PIPELINE
- /*
- * Change pcb to idle u. area, i.e., set r31 to top of stack
- * and set curpcb to point to the cpu's idle stack.
- * r2 contains proc *p.
- */
- ldcr r10, CPU
- ld r30, r10, CI_IDLE_PCB
- addu r31, r30, USIZE /* now on idle stack */
- st r30, r10, CI_CURPCB /* curpcb = idle_pcb */
-
- /* Schedule the vmspace and stack to be freed. */
- bsr.n _C_LABEL(exit2)
- st r0, r10, CI_CURPROC /* curproc = NULL */
-
- /*
- * exit2() has acquired the scheduler lock for us. Jump into
- * cpu_switch(), after the context save since we do not need
- * to save anything.
- */
- br _ASM_LABEL(cpu_switch_search)
+ br.n _C_LABEL(setipl)
+ or r2, r0, IPL_NONE
/*
- * void cpu_switch(struct proc *p)
+ * void cpu_idle_leave()
*
- * Find a runnable process and switch to it. On entry, the scheduler lock is
- * held; it has to be released before returning to the process.
+ * Raise ipl to IPL_SCHED, but do not attempt to disable interrupts.
+ */
+ENTRY(cpu_idle_leave)
+ br.n _C_LABEL(setipl)
+ or r2, r0, IPL_SCHED
+
+/*
+ * void cpu_switchto(struct proc *oldproc, struct proc *newproc)
*
- * Note that this code ignores its proc parameter and assumes it has the
- * same value as curproc. This may change in mi_switch() in the future,
- * be careful.
+ * Switch context from oldproc to newproc. oldproc may be NULL if there is
+ * no need to save the current context.
*/
-ENTRY(cpu_switch)
+ENTRY(cpu_switchto)
/*
- * Disable interrupts, we do not want to be disturbed while
- * saving context.
+ * Save state of previous process in its pcb if required.
*/
- ldcr r2, PSR
- set r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r2, PSR
- FLUSH_PIPELINE
+ bcnd eq0, r2, 1f
- /*
- * Save state of previous process in its pcb, and pmap_deactivate()
- * the process.
- */
ldcr r2, CPU
ld r2, r2, CI_CURPCB
st r1, r2, PCB_PC /* save return address */
bsr _ASM_LABEL(__savectx)
/* note that we don't need to recover r1 at this point */
+1:
ldcr r11, CPU
- ld r2, r11, CI_CURPROC
-
- /*
- * Note that we can still use curpcb as our stack after
- * pmap_deactivate() has been called, as it does not affect the u
- * area mappings.
- */
- bsr.n _C_LABEL(pmap_deactivate)
- st r0, r11, CI_CURPROC /* curproc = NULL */
-
-#ifdef MULTIPROCESSOR
- /*
- * We need to switch to the processor's idle stack now (in case the
- * process we are using the stack of gets scheduled on another
- * processor).
- */
- ldcr r10, CPU
- ld r30, r10, CI_IDLE_PCB
- addu r31, r30, USIZE /* now on idle stack */
- st r30, r10, CI_CURPCB /* curpcb = idle_pcb */
-#endif
-
-ASLOCAL(cpu_switch_search)
- /*
- * This is the start of the idle loop. Find the highest-priority
- * queue that isn't empty, then take the first proc from that queue.
- */
- or.u r7, r0, hi16(_C_LABEL(whichqs))
- ld r7, r7, lo16(_C_LABEL(whichqs))
- bcnd ne0, r7, _ASM_LABEL(cpu_switch_found)
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bsr _C_LABEL(sched_unlock_idle)
-#endif
-
-#ifdef MULTIPROCESSOR
-ASGLOBAL(cpu_switch_idle)
-#else
-ASLOCAL(cpu_switch_idle)
-#endif
- /*
- * There were no runnable processes. Enable all interrupts and
- * busy-wait for this to change.
- * Note that, besides doing setipl(IPL_NONE), this will actually enable
- * interrupts in the psr. Bootstrap of secondary processors
- * relies upon this.
- */
- ldcr r2, PSR
- clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r2, PSR
- FLUSH_PIPELINE
-
- bsr.n _C_LABEL(setipl)
- or r2, r0, IPL_NONE
-
- or.u r7, r0, hi16(_C_LABEL(whichqs))
- ld r7, r7, lo16(_C_LABEL(whichqs))
- bcnd eq0, r7, _ASM_LABEL(cpu_switch_idle)
- /* XXX run fancy things here, such as page zeroing... */
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bsr _C_LABEL(sched_lock_idle)
-#endif
-
-ASLOCAL(cpu_switch_found)
- /*
- * Disable interrupts.
- */
- ldcr r2, PSR
- set r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r2, PSR
- FLUSH_PIPELINE
-
- /*
- * An interrupt could have occured between the last whichqs check
- * and the call to setipl(). Check again that whichqs is nonzero.
- */
- or.u r7, r0, hi16(_C_LABEL(whichqs)) /* reload whichqs */
- ld r7, r7, lo16(_C_LABEL(whichqs))
- bcnd eq0, r7, _ASM_LABEL(cpu_switch_search)
-
- /* XXX use ff1, like powerpc... needs *runqueue() adjustments */
- xor r6, r6, r6 /* set r6 to 0 */
-1: bb1 0, r7, 2f /* if rightmost bit set, done */
- extu r7, r7, 0<1> /* else, right shift whichqs, */
- br.n 1b /* increment r6, and repeat */
- addu r6, r6, 1
-2:
- or.u r7, r0, hi16(_C_LABEL(qs))
- or r7, r7, lo16(_C_LABEL(qs))
-
- /*
- * Need to make
- * p->p_forw->p_back = p->p_back and
- * p->p_back->p_forw = p->p_forw where
- * p is q->p_forw.
- * Remember that q->p_forw == p and p->p_back == q.
- */
-
- lda.d r8, r7[r6] /* r8 = &qs[ff1(whichqs)] */
- ld r9, r8, P_FORW /* r8 is q, r9 is p */
-
- ld r12, r9, P_FORW /* r12 = p->p_forw */
- st r8, r12, P_BACK /* p->p_forw->p_back = q (p->p_back) */
- st r12, r8, P_FORW /* q->p_forw = p->p_forw */
- lda.d r8, r7[r6] /* reload r8 with qs[ff1(whichqs)] */
- ld r12, r8, P_FORW /* q->p_forw */
- cmp r12, r12, r8 /* q == q->p_forw; anyone left on queue? */
- bb1 ne, r12, 3f /* yes, skip clearing bit in whichqs */
-
- or r12, r0, 1
- mak r12, r12, r6
- or.u r7, r0, hi16(_C_LABEL(whichqs))
- ld r8, r7, lo16(_C_LABEL(whichqs))
- and.c r8, r8, r12 /* whichqs &= ~the bit */
- st r8, r7, lo16(_C_LABEL(whichqs))
-3:
-#ifdef DIAGNOSTIC
- ld r2, r9, P_WCHAN
- bcnd ne0, r2, _ASM_LABEL(Lswchanpanic)
- ld.b r2, r9, P_STAT
- cmp r2, r2, SRUN
- bb1 ne, r2, _ASM_LABEL(Lswsrunpanic)
-#endif
-
- ldcr r11, CPU
- st r0, r11, CI_WANT_RESCHED /* clear want_resched */
+ st r3, r11, CI_CURPROC /* curproc = newproc */
- st r9, r11, CI_CURPROC /* curproc = p */
or r2, r0, SONPROC
- st.b r2, r9, P_STAT
+ st.b r2, r3, P_STAT
#ifdef MULTIPROCESSOR
- st r11, r9, P_CPU /* p->p_cpu = curcpu */
+ st r11, r3, P_CPU /* p->p_cpu = curcpu */
#endif
-
- ld r3, r9, P_ADDR
- st r0, r9, P_BACK /* p->p_back = 0 */
- st r3, r11, CI_CURPCB /* curpcb = p->p_addr */
+ ld r2, r3, P_ADDR
+ st r2, r11, CI_CURPCB /* curpcb = p->p_addr */
/* pmap_activate() the process' pmap */
bsr.n _C_LABEL(pmap_activate)
- or r2, r0, r9
+ or r2, r0, r3
ldcr r10, CPU
ld r10, r10, CI_CURPCB
@@ -297,25 +118,9 @@ ASLOCAL(cpu_switch_found)
ld r29, r10, PCB_R29
ld r30, r10, PCB_R30 /* restore frame pointer & stack */
ld r31, r10, PCB_SP
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bsr.n _C_LABEL(sched_unlock_idle)
- or r14, r10, r0
- ld r1, r14, PCB_PC
- ld r14, r14, PCB_R14
-#else
ld r1, r10, PCB_PC
- ld r14, r10, PCB_R14
-#endif
-
- /*
- * Enable interrupts again.
- */
- ldcr r2, PSR
- clr r2, r2, 1<PSR_INTERRUPT_DISABLE_BIT>
- stcr r2, PSR
- FLUSH_PIPELINE
-
- jmp r1
+ jmp.n r1
+ ld r14, r10, PCB_R14
/*
* savectx(pcb)
diff --git a/sys/arch/m88k/m88k/vm_machdep.c b/sys/arch/m88k/m88k/vm_machdep.c
index f6283b85b56..e967b892cf8 100644
--- a/sys/arch/m88k/m88k/vm_machdep.c
+++ b/sys/arch/m88k/m88k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.15 2007/06/20 17:29:36 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.16 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
@@ -136,20 +136,17 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
/*
* cpu_exit is called as the last action during exit.
- * We release the address space and machine-dependent resources,
- * including the memory for the user structure and kernel stack.
- * Once finished, we call switch_exit, which switches to a temporary
- * pcb and stack and never returns. We block memory allocation
- * until switch_exit has made things safe again.
+ *
+ * Block context switches and then call switch_exit() which will
+ * switch to another process thus we never return.
*/
void
cpu_exit(struct proc *p)
{
- splhigh();
+ (void)splhigh();
pmap_deactivate(p);
- switch_exit(p);
- /* NOTREACHED */
+ sched_exit(p);
}
/*
diff --git a/sys/arch/mac68k/mac68k/locore.s b/sys/arch/mac68k/mac68k/locore.s
index 16809cabe7d..1c589abad89 100644
--- a/sys/arch/mac68k/mac68k/locore.s
+++ b/sys/arch/mac68k/mac68k/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.58 2007/05/15 13:46:22 martin Exp $ */
+/* $OpenBSD: locore.s,v 1.59 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: locore.s,v 1.103 1998/07/09 06:02:50 scottr Exp $ */
/*
@@ -954,52 +954,18 @@ Ldorte:
*/
#include <m68k/m68k/support.s>
-/*
- * Use common m68k process manipulation routines.
- */
-#include <m68k/m68k/proc_subr.s>
-
.data
GLOBAL(curpcb)
.long 0
ASBSS(nullpcb,SIZEOF_PCB)
-/*
- * At exit of a process, do a switch for the last time.
- * Switch to a safe stack and PCB, and select a new process to run. The
- * old stack and u-area will be freed by the reaper.
- */
-ENTRY(switch_exit)
- movl sp@(4),a0
- /* save state into garbage pcb */
- movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
- lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
-
- /* Schedule the vmspace and stack to be freed. */
- movl a0,sp@- | exit2(p)
- jbsr _C_LABEL(exit2)
- lea sp@(4),sp | pop args
-
- jra _C_LABEL(cpu_switch)
-
-/*
- * When no processes are on the runq, Swtch branches to Idle
- * to wait for something to come ready.
- */
-ASENTRY_NOPROFILE(Idle)
+ENTRY_NOPROFILE(cpu_idle_cycle)
stop #PSL_LOWIPL
- movw #PSL_HIGHIPL,sr
- movl _C_LABEL(whichqs),d0
- jeq _ASM_LABEL(Idle)
- jra Lsw1
-
-Lbadsw:
- PANIC("switch")
- /*NOTREACHED*/
+ rts
/*
- * cpu_switch()
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
*
* NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
* entire ATC. The effort involved in selective flushing may not be
@@ -1009,55 +975,15 @@ Lbadsw:
* user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
* bit). For now, we just always flush the full ATC.
*/
-ENTRY(cpu_switch)
- movl _C_LABEL(curpcb),a0 | current pcb
- movw sr,a0@(PCB_PS) | save sr before changing ipl
-#ifdef notyet
- movl CURPROC,sp@- | remember last proc running
-#endif
- clrl CURPROC
+ENTRY(cpu_switchto)
+ movl sp@(4), d0 | oldproc
+ beq Lswnofpsave | is NULL, don't save anything
/*
- * Find the highest-priority queue that isn't empty,
- * then take the first proc from that queue.
- */
- movw #PSL_HIGHIPL,sr | lock out interrupts
- movl _C_LABEL(whichqs),d0
- jeq _ASM_LABEL(Idle)
-Lsw1:
- movl d0,d1
- negl d0
- andl d1,d0
- bfffo d0{#0:#32},d1
- eorib #31,d1
-
- movl d1,d0
- lslb #3,d1 | convert queue number to index
- addl #_C_LABEL(qs),d1 | locate queue (q)
- movl d1,a1
- movl a1@(P_FORW),a0 | p = q->p_forw
- cmpal d1,a0 | anyone on queue?
- jeq Lbadsw | no, panic
- movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
- movl a0@(P_FORW),a1 | n = p->p_forw
- movl d1,a1@(P_BACK) | n->p_back = q
- cmpal d1,a1 | anyone left on queue?
- jne Lsw2 | yes, skip
- movl _C_LABEL(whichqs),d1
- bclr d0,d1 | no, clear bit
- movl d1,_C_LABEL(whichqs)
-Lsw2:
- movl a0,CURPROC
- clrl _C_LABEL(want_resched)
-#ifdef notyet
- movl sp@+,a1
- cmpl a0,a1 | switching to same proc?
- jeq Lswdone | yes, skip save and restore
-#endif
- /*
* Save state of previous process in its pcb.
*/
movl _C_LABEL(curpcb),a1
+ movw sr, a1@(PCB_PS) | save sr before switching context
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
@@ -1070,16 +996,12 @@ Lsw2:
jeq Lswnofpsave | yes, all done
fmovem fp0-fp7,a2@(FPF_REGS) | save FP general registers
fmovem fpcr/fpsr/fpi,a2@(FPF_FPCR) | save FP control registers
+
Lswnofpsave:
+ movl sp@(8), a0 | newproc
-#ifdef DIAGNOSTIC
- tstl a0@(P_WCHAN)
- jne Lbadsw
- cmpb #SRUN,a0@(P_STAT)
- jne Lbadsw
-#endif
+ movl a0, CURPROC
movb #SONPROC,a0@(P_STAT)
- clrl a0@(P_BACK) | clear back link
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
diff --git a/sys/arch/mac68k/mac68k/vm_machdep.c b/sys/arch/mac68k/mac68k/vm_machdep.c
index 00bb0bce0d7..6ff6156056b 100644
--- a/sys/arch/mac68k/mac68k/vm_machdep.c
+++ b/sys/arch/mac68k/mac68k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.39 2007/05/27 20:59:25 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.40 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.29 1998/07/28 18:34:55 thorpej Exp $ */
/*
@@ -119,19 +119,18 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
/*
* cpu_exit is called as the last action during exit.
- * We release the address space and machine-dependent resources,
- * block context switches and then call switch_exit() which will
- * free our stack and user area and switch to another process.
- * Thus, we never return.
+ *
+ * Block context switches and then call switch_exit() which will
+ * switch to another process thus we never return.
*/
void
cpu_exit(p)
struct proc *p;
{
-
(void)splhigh();
- switch_exit(p);
- /* NOTREACHED */
+
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/macppc/macppc/cpu.c b/sys/arch/macppc/macppc/cpu.c
index 9838e1d9c9c..c3ca93afd75 100644
--- a/sys/arch/macppc/macppc/cpu.c
+++ b/sys/arch/macppc/macppc/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.43 2007/05/23 23:40:21 kettenis Exp $ */
+/* $OpenBSD: cpu.c,v 1.44 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1997 Per Fogelstrom
@@ -676,7 +676,7 @@ void
cpu_hatch(void)
{
volatile struct cpu_hatch_data *h = cpu_hatch_data;
- int scratch, i;
+ int scratch, i, s;
/* Initialize timebase. */
__asm ("mttbl %0; mttbu %0; mttbl %0" :: "r"(0));
@@ -759,5 +759,12 @@ cpu_hatch(void)
curcpu()->ci_ipending = 0;
curcpu()->ci_cpl = 0;
+
+ s = splhigh();
+ microuptime(&curcpu()->ci_schedstate.spc_runtime);
+ splx(s);
+
+ SCHED_LOCK(s);
+ cpu_switchto(NULL, sched_chooseproc());
}
#endif
diff --git a/sys/arch/macppc/macppc/genassym.cf b/sys/arch/macppc/macppc/genassym.cf
index 4249820a8c6..2efd74a41b6 100644
--- a/sys/arch/macppc/macppc/genassym.cf
+++ b/sys/arch/macppc/macppc/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.14 2007/03/22 19:26:28 kettenis Exp $
+# $OpenBSD: genassym.cf,v 1.15 2007/10/10 15:53:52 art Exp $
#
# Copyright (c) 1982, 1990 The Regents of the University of California.
# All rights reserved.
@@ -61,7 +61,6 @@ define SFRAMELEN roundup(sizeof(struct switchframe), 16)
struct pcb
member PCB_PMR pcb_pmreal
member pcb_sp
-member pcb_spl
member PCB_FAULT pcb_onfault
struct pmap
@@ -70,8 +69,6 @@ member PM_USRSR pm_sr[PPC_USER_SR]
member PM_KERNELSR pm_sr[PPC_KERNEL_SR]
struct proc
-member p_forw
-member p_back
member p_addr
member p_stat
member p_cpu
diff --git a/sys/arch/macppc/macppc/locore.S b/sys/arch/macppc/macppc/locore.S
index b2658a7fd48..b840919182d 100644
--- a/sys/arch/macppc/macppc/locore.S
+++ b/sys/arch/macppc/macppc/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.32 2007/03/20 20:59:54 kettenis Exp $ */
+/* $OpenBSD: locore.S,v 1.33 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
@@ -133,150 +133,41 @@ _ENTRY(_C_LABEL(cpu_spinup_trampoline))
lis %r3,_C_LABEL(cpu_hatch_stack)@ha
lwz %r1,_C_LABEL(cpu_hatch_stack)@l(%r3)
- bl _C_LABEL(cpu_hatch)
- bl _C_LABEL(sched_lock_idle)
- li %r30,0
- b _C_LABEL(idle)
+ b _C_LABEL(cpu_hatch)
+ /* NOTREACHED */
#endif
-/*
- * No processes are runnable, so loop waiting for one.
- * Separate label here for accounting purposes.
- */
- .globl _C_LABEL(idle)
- .type _C_LABEL(idle),@function
-_C_LABEL(idle):
- mfmsr %r3
- /* disable interrupts while manipulating runque */
- andi. %r3,%r3,~PSL_EE@l
- mtmsr %r3
-
- lis %r8,_C_LABEL(whichqs)@ha
- lwz %r9,_C_LABEL(whichqs)@l(%r8)
-
- or. %r9,%r9,%r9
- bne- _C_LABEL(sw1) /* at least one queue non-empty */
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bl _C_LABEL(sched_unlock_idle)
-#endif
-
- ori %r3,%r3,PSL_EE /* reenable ints again */
- mtmsr %r3
- isync
- sync
- /* low power mode */
- mfmsr %r3
- oris %r3, %r3, PSL_POW@h
- mtmsr %r3
- isync
-
-/* May do some power saving here? */
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bl _C_LABEL(sched_lock_idle)
-#endif
- b _C_LABEL(idle)
-
-/*
- * switchexit gets called from cpu_exit to free the user structure
- * and kernel stack of the current process.
- */
-_ENTRY(_C_LABEL(switchexit))
-/* First switch to the idle pcb/kernel stack */
- GET_CPUINFO(%r7)
- lwz %r6,CI_IDLE_PCB(%r7)
- stw %r6,CI_CURPCB(%r7)
- addi %r1,%r6,USPACE-16 /* 16 bytes are reserved at stack top */
- /*
- * Schedule the vmspace and stack to be freed (the proc arg is
- * already in r3).
- */
- bl _C_LABEL(exit2)
-
- /* Fall through to cpu_switch to actually select another proc */
- li %r3,0 /* indicate exited process */
-
-/* Fall through to cpu_switch to actually select another proc */
/*
- * void cpu_switch(struct proc *p)
- * Find a runnable process and switch to it.
+ * void cpu_switchto(struct proc *old, struct proc *new)
+ * Switch from "old" proc to "new".
*/
-_ENTRY(_C_LABEL(cpu_switch))
+_ENTRY(_C_LABEL(cpu_switchto))
mflr %r0 /* save lr */
stw %r0,4(%r1)
stwu %r1,-16(%r1)
stw %r31,12(%r1)
stw %r30,8(%r1)
- mr %r30,%r3
- GET_CPUINFO(%r3)
+/*
+ * r3 - old proc
+ * r4 - new proc
+ * r5 - cpuinfo
+ */
+ GET_CPUINFO(%r5)
li %r31,0
- /* Zero to not accumulate cpu time */
- stw %r31,CI_CURPROC(%r3)
- lwz %r31,CI_CURPCB(%r3)
-
- li %r3,0
- bl _C_LABEL(lcsplx)
- stw %r3,PCB_SPL(%r31) /* save spl */
-
-/* Find a new process */
- mfmsr %r3
- andi. %r3,%r3,~PSL_EE@l /* disable interrupts while
- manipulating runque */
- mtmsr %r3
- isync
-
- lis %r8,_C_LABEL(whichqs)@ha
- lwz %r9,_C_LABEL(whichqs)@l(%r8)
-
- or. %r9,%r9,%r9
- beq- _C_LABEL(idle) /* all queues empty */
-_C_LABEL(sw1):
- cntlzw %r10,%r9
- lis %r4,_C_LABEL(qs)@ha
- addi %r4,%r4,_C_LABEL(qs)@l
- slwi %r3,%r10,3
- add %r3,%r3,%r4 /* select queue */
-
- lwz %r31,P_FORW(%r3) /* unlink first proc from queue */
- lwz %r4,P_FORW(%r31)
- stw %r4,P_FORW(%r3)
- stw %r3,P_BACK(%r4)
-
- cmpl 0,%r3,%r4 /* queue empty? */
- bne 1f
-
- lis %r3,0x80000000@ha
- srw %r3,%r3,%r10
- andc %r9,%r9,%r3
- stw %r9,_C_LABEL(whichqs)@l(%r8) /* mark it empty */
-
-1:
/* just did this resched thing, clear resched */
- li %r3,0
- GET_CPUINFO(%r4)
- stw %r3,CI_WANT_RESCHED(%r4)
-
- stw %r3,P_BACK(%r31) /* probably superfluous */
+ stw %r31,CI_WANT_RESCHED(%r5)
#ifdef MULTIPROCESSOR
- stw %r4,P_CPU(%r31)
+ stw %r5,P_CPU(%r4)
#endif
- stw %r31,CI_CURPROC(%r4) /* record new process */
-
- li %r3,SONPROC
- stb %r3,P_STAT(%r31)
-
- mfmsr %r3
- ori %r3,%r3,PSL_EE /* Now we can interrupt again */
- mtmsr %r3
+ stw %r4,CI_CURPROC(%r5) /* record new process */
- cmpl 0,%r31,%r30 /* is it the same process? */
- beq switch_return
+ li %r31,SONPROC
+ stb %r31,P_STAT(%r4)
- or. %r30,%r30,%r30 /* old process was exiting? */
+ or. %r3,%r3,%r3 /* old process was exiting? */
beq switch_exited
mfsr %r10,PPC_USER_SR /* save PPC_USER_SR for copyin/copyout*/
@@ -284,33 +175,31 @@ _C_LABEL(sw1):
mr %r12,%r2 /* save r2 */
stwu %r1,-SFRAMELEN(%r1) /* still running on old stack */
stmw %r10,8(%r1)
- lwz %r3,P_ADDR(%r30)
- stw %r1,PCB_SP(%r3) /* save SP */
+ lwz %r31,P_ADDR(%r3)
+ stw %r1,PCB_SP(%r31) /* save SP */
switch_exited:
/* disable interrupts while actually switching */
- mfmsr %r3
- andi. %r3,%r3,~PSL_EE@l
- mtmsr %r3
+ mfmsr %r30
+ andi. %r30,%r30,~PSL_EE@l
+ mtmsr %r30
- lwz %r4,P_ADDR(%r31)
- GET_CPUINFO(%r5)
- stw %r4,CI_CURPCB(%r5) /* indicate new pcb */
+ lwz %r31,P_ADDR(%r4)
+ stw %r31,CI_CURPCB(%r5) /* indicate new pcb */
- lwz %r5,PCB_PMR(%r4)
+ lwz %r6,PCB_PMR(%r31)
/* save real pmap pointer for spill fill */
- GET_CPUINFO(%r6)
- stwu %r5,CI_CURPM(%r6)
- stwcx. %r5,%r0,%r6 /* clear possible reservation */
+ stwu %r6,CI_CURPM(%r5)
+ stwcx. %r6,%r0,%r5 /* clear possible reservation */
- addic. %r5,%r5,64
- li %r6,0
+ addic. %r6,%r6,64
+ li %r5,0
- lwz %r1,PCB_SP(%r4) /* get new procs SP */
+ lwz %r1,PCB_SP(%r31) /* get new procs SP */
- ori %r3,%r3,PSL_EE /* interrupts are okay again */
- mtmsr %r3
+ ori %r30,%r30,PSL_EE /* interrupts are okay again */
+ mtmsr %r30
lmw %r10,8(%r1) /* get other regs */
lwz %r1,0(%r1) /* get saved SP */
@@ -320,17 +209,6 @@ switch_exited:
mtsr PPC_USER_SR,%r10 /* get saved PPC_USER_SR */
isync
-switch_return:
- mr %r30,%r7 /* save proc pointer */
- lwz %r3,PCB_SPL(%r4)
- bl _C_LABEL(lcsplx)
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- bl _C_LABEL(sched_unlock_idle)
-#endif
-
- mr %r3,%r30 /* curproc for special fork returns */
-
lwz %r31,12(%r1)
lwz %r30,8(%r1)
addi %r1,%r1,16
@@ -338,6 +216,19 @@ switch_return:
mtlr %r0
blr
+_ENTRY(_C_LABEL(cpu_idle_enter))
+ blr
+
+_ENTRY(_C_LABEL(cpu_idle_cycle))
+ /* low power mode */
+ mfmsr %r3
+ oris %r3, %r3, PSL_POW@h
+ mtmsr %r3
+ isync
+ blr
+
+_ENTRY(_C_LABEL(cpu_idle_leave))
+ blr
/*
* Data used during primary/secondary traps/interrupts
diff --git a/sys/arch/mips64/mips64/context.S b/sys/arch/mips64/mips64/context.S
index 79f9869a438..24deaf3b2ec 100644
--- a/sys/arch/mips64/mips64/context.S
+++ b/sys/arch/mips64/mips64/context.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: context.S,v 1.13 2007/07/16 20:22:18 miod Exp $ */
+/* $OpenBSD: context.S,v 1.14 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -69,92 +69,11 @@ LEAF(savectx, 0)
move v0, zero
END(savectx)
-/*
- * The following primitives manipulate the run queues. _whichqs tells which
- * of the 32 queues _qs have processes in them. Setrunqueue puts processes
- * into queues, Remrq removes them from queues. The running process is on
- * no queue, other processes are on a queue related to p->p_priority, divided
- * by 4 actually to shrink the 0-127 range of priorities into the 32 available
- * queues.
- */
-/*
- * setrunqueue(p)
- * proc *p;
- *
- * Call should be made at splclock(), and p->p_stat should be SRUN.
- */
-NON_LEAF(setrunqueue, FRAMESZ(CF_SZ), ra)
- .mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
- PTR_L t0, P_BACK(a0) ## firewall: p->p_back must be 0
- bne t0, zero, 1f ##
- lbu t0, P_PRIORITY(a0) # put on p->p_priority / 4 queue
- li t1, 1 # compute corresponding bit
- srl t0, t0, 2 # compute index into 'whichqs'
- sll t1, t1, t0
- lw t2, whichqs # set corresponding bit
- sll t0, t0, LOGREGSZ+1 # compute index into 'qs'
- or t2, t2, t1
- sw t2, whichqs
- LA t1, qs
- PTR_ADDU t0, t0, t1 # t0 = qp = &qs[pri >> 2]
- PTR_L t1, P_BACK(t0) # t1 = qp->ph_rlink
- PTR_S t0, P_FORW(a0) # p->p_forw = qp
- PTR_S t1, P_BACK(a0) # p->p_back = qp->ph_rlink
- PTR_S a0, P_FORW(t1) # p->p_back->p_forw = p;
- j ra
- PTR_S a0, P_BACK(t0) # qp->ph_rlink = p
-
-1:
+NON_LEAF(cpu_idle_enter, FRAMESZ(CF_SZ), ra)
PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
PTR_S ra, CF_RA_OFFS(sp)
- PANIC("setrunqueue")
- jr ra
- nop
-END(setrunqueue)
-
-/*
- * Remrq(p)
- *
- * Call should be made at splclock().
- */
-NON_LEAF(remrunqueue, FRAMESZ(CF_SZ), ra)
.mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
- lbu t0, P_PRIORITY(a0) # get from p->p_priority / 4 queue
- li t1, 1 # compute corresponding bit
- srl t0, t0, 2 # compute index into 'whichqs'
- lw t2, whichqs # check corresponding bit
- sll t1, t1, t0
- and v0, t2, t1
- beqz v0, 2f # oops! queue is empty!
- PTR_L v0, P_BACK(a0) # v0 = p->p_back
-
- PTR_L v1, P_FORW(a0) # v1 = p->p_forw
- PTR_SLL t0, t0, LOGREGSZ+1 # compute index into 'qs'
- PTR_S v1, P_FORW(v0) # p->p_back->p_forw = p->p_forw;
- PTR_S v0, P_BACK(v1) # p->p_forw->p_back = p->r_rlink
- LA v0, qs
- PTR_ADDU t0, t0, v0 # t0 = qp = &qs[pri >> 2]
- PTR_L v0, P_FORW(t0) # check if queue empty
- bne v0, t0, 1f # No. qp->ph_link != qp
- xor t2, t2, t1 # clear corresponding bit in 'whichqs'
- sw t2, whichqs
-1:
- j ra
- PTR_S zero, P_BACK(a0) # for firewall checking
-2:
- PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
- PTR_S ra, CF_RA_OFFS(sp)
- PANIC("remrunqueue empty")
- jr ra
- nop
-END(remrunqueue)
-
-/*
- * Idle, this is where we spend time when nothing to do.
- */
-LEAF(idle, 0)
-_idle:
sw zero, cpl # lower to spl0
lw t0, ipending
beqz t0, 1f
@@ -175,63 +94,57 @@ _idle:
jal updateimask # Make sure SR imask is updated
xor a0, a0
- li t1,1
#if defined(TGT_CP7000) || defined(TGT_CP7000G)
PTR_L t2, misc_h # if non zero, do Ocelot LEDs.
beqz t2, 1f
li t0, 0x40
sb t0, 0x0d(t2)
-#endif
1:
- lw t0, whichqs # look for non-empty queue
- beq t0, zero, 1b
- nop
+#endif
+
+ PTR_L ra, CF_RA_OFFS(sp)
+ j ra
+ PTR_ADDU sp, sp, FRAMESZ(CF_SZ)
+END(cpu_idle_enter)
+
+LEAF(cpu_idle_leave, 0)
#if defined(TGT_CP7000) || defined(TGT_CP7000G)
- beqz t2, sw1
+ PTR_L t2, misc_h # if non zero, do Ocelot LEDs.
+ beqz t2, 1f
li t0, 0x40
sb t0, 0x0c(t2)
+1:
#endif
- b sw1 # Hey, time to do some work!
- nop
- jr ra # DDB trace
- nop
- .globl e_idle
-e_idle:
-END(idle)
-/*
- * switch_exit(p)
- *
- * At exit of a process, do a cpu_switch for the last time.
- * All interrupts should be blocked at this point.
- */
-LEAF(switch_exit, 0)
- mfc0 v0, COP_0_STATUS_REG
- li v1, ~SR_INT_ENAB
- and v0, v0, v1
- mtc0 v0, COP_0_STATUS_REG
+ mfc0 a0, COP_0_STATUS_REG # disable interrupts
+ li a1, ~SR_INT_ENAB
+ and a0, a0, a1
+ mtc0 a0, COP_0_STATUS_REG
ITLBNOPFIX
- LA sp, idle_stack - FRAMESZ(CF_SZ)
- jal exit2
- nop
- PTR_S zero, curproc
- b sw0
+ j ra
nop
- jr ra # DDB trace
+END(cpu_idle_leave)
+
+LEAF(cpu_idle_cycle, 0)
+ j ra
nop
-END(switch_exit)
+END(cpu_idle_cycle)
/*
- * cpu_switch()
- * Find the highest priority process and resume it.
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
*/
-NON_LEAF(cpu_switch, FRAMESZ(CF_SZ), ra)
+NON_LEAF(cpu_switchto, FRAMESZ(CF_SZ), ra)
PTR_L t3, curprocpaddr
REG_S sp, PCB_CONTEXT+8*REGSZ(t3) # save old sp
+
PTR_SUBU sp, sp, FRAMESZ(CF_SZ)
REG_S ra, CF_RA_OFFS(sp)
.mask 0x80000000, (CF_RA_OFFS - FRAMESZ(CF_SZ))
+
+ beqz a0, 1f
+ nop
+
lw t0, cpl
REG_S t0, PCB_CONTEXT+13*REGSZ(t3)
REG_S s0, PCB_CONTEXT+0*REGSZ(t3) # do a 'savectx()'
@@ -249,59 +162,23 @@ NON_LEAF(cpu_switch, FRAMESZ(CF_SZ), ra)
REG_S t0, PCB_CONTEXT+11*REGSZ(t3)
REG_S t1, PCB_CONTEXT+12*REGSZ(t3)
-sw0:
-# lw t2, cnt+V_SWTCH # for statistics
- lw t1, whichqs # look for non-empty queue
-# addu t2, t2, 1
-# sw t2, cnt+V_SWTCH
- beq t1, zero, _idle # if none, idle
- nop
-sw1:
- mfc0 v0, COP_0_STATUS_REG
+1:
+ /*
+ * Disable interrupts
+ */
+ mfc0 v0, COP_0_STATUS_REG # disable interrupts
li v1, ~SR_INT_ENAB
and v0, v0, v1
mtc0 v0, COP_0_STATUS_REG
ITLBNOPFIX
- lw t0, whichqs # look for non-empty queue
- li t2, -1 # t2 = lowest bit set
- beq t0, zero, _idle # if none, idle
- move t3, t0 # t3 = saved whichqs
-1:
- addu t2, t2, 1
- and t1, t0, 1 # bit set?
- beq t1, zero, 1b
- srl t0, t0, 1 # try next bit
-/*
- * Remove process from queue.
- */
- PTR_SLL t0, t2, LOGREGSZ+1
- LA t1, qs
- PTR_ADDU t0, t0, t1 # t0 = qp = &qs[highbit]
- PTR_L a0, P_FORW(t0) # a0 = p = highest pri process
- PTR_L v0, P_FORW(a0) # v0 = p->p_forw
- beq t0, a0, 4f # make sure something in queue
- PTR_S v0, P_FORW(t0) # qp->ph_link = p->p_forw;
- PTR_S t0, P_BACK(v0) # p->p_forw->p_back = qp
- bne v0, t0, 2f # queue still not empty
- PTR_S zero, P_BACK(a0) ## for firewall checking
- li v1, 1 # compute bit in 'whichqs'
- sll v1, v1, t2
- xor t3, t3, v1 # clear bit in 'whichqs'
- sw t3, whichqs
-2:
-/*
- * Switch to new context.
- */
- sw zero, want_resched
- jal pmap_activate # v0 = TLB PID
- move s0, a0 # BDSLOT: save p
-/*
- * We need to wire the process kernel stack mapping so there
- * will be no tlb misses in exception handlers. This is done
- * by invalidating any tlb entries mapping the U-area and
- * put valid mappings in tlb entries 0 and 1.
- */
+ /*
+ * Switch to new context
+ */
+ move s0, a1 # save p
+ sw zero, want_resched
+ jal pmap_activate
+ move a0, s0
PTR_L t3, P_ADDR(s0) # get uarea pointer.
PTR_S s0, curproc # set curproc
@@ -310,6 +187,13 @@ sw1:
li t1, SONPROC
sb t1, P_STAT(s0) # set to onproc.
+ /*
+ * We need to wire the process kernel stack mapping so there
+ * will be no tlb misses in exception handlers. This is done
+ * by invalidating any tlb entries mapping the U-area and
+ * put valid mappings in tlb entries 0 and 1.
+ */
+
or v0, t3
dmtc0 v0, COP_0_TLB_HI # init high entry (tlbid)
LA t1, (VM_MIN_KERNEL_ADDRESS)
@@ -399,9 +283,10 @@ ctx2:
ctx3:
-/*
- * Restore registers and return.
- */
+ /*
+ * Restore registers and return.
+ */
+
REG_L a0, PCB_CONTEXT+13*REGSZ(t3)
REG_L s0, PCB_CONTEXT+0*REGSZ(t3)
REG_L s1, PCB_CONTEXT+1*REGSZ(t3)
@@ -428,6 +313,4 @@ ctx3:
ITLBNOPFIX
j ra
nop
-4:
- PANIC("cpu_switch") # nothing in queue
-END(cpu_switch)
+END(cpu_switchto)
diff --git a/sys/arch/mips64/mips64/db_machdep.c b/sys/arch/mips64/mips64/db_machdep.c
index 86f6a435b1a..4e2ee91e71f 100644
--- a/sys/arch/mips64/mips64/db_machdep.c
+++ b/sys/arch/mips64/mips64/db_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_machdep.c,v 1.12 2007/09/03 01:15:50 krw Exp $ */
+/* $OpenBSD: db_machdep.c,v 1.13 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
@@ -221,7 +221,6 @@ db_stack_trace_print(addr, have_addr, count, modif, pr)
extern char edata[];
extern char k_intr[];
extern char k_general[];
- extern char idle[];
struct trap_frame *regs = &ddb_regs;
/* get initial values from the exception frame */
@@ -396,14 +395,10 @@ loop:
}
done:
- if (symname == NULL) {
- if (subr == (long)idle)
- (*pr)("idle ");
- else
- (*pr)("%p ", subr);
- } else {
+ if (symname == NULL)
+ (*pr)("%p ", subr);
+ else
(*pr)("%s+%p ", symname, diff);
- }
(*pr)("(%llx,%llx,%llx,%llx) sp %llx ra %llx, sz %d\n", a0, a1, a2, a3, sp, ra, stksize);
if (subr == (long)k_intr || subr == (long)k_general) {
diff --git a/sys/arch/mips64/mips64/vm_machdep.c b/sys/arch/mips64/mips64/vm_machdep.c
index 30f280cc396..d6517d55ffd 100644
--- a/sys/arch/mips64/mips64/vm_machdep.c
+++ b/sys/arch/mips64/mips64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.15 2007/09/03 17:35:51 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.16 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1992, 1993
@@ -134,9 +134,9 @@ cpu_exit(p)
if (machFPCurProcPtr == p)
machFPCurProcPtr = (struct proc *)0;
- (void) splhigh();
- switch_exit(p);
- /* NOTREACHED */
+ (void)splhigh();
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/mvme68k/mvme68k/locore.s b/sys/arch/mvme68k/mvme68k/locore.s
index 38015370541..c92d4fb2978 100644
--- a/sys/arch/mvme68k/mvme68k/locore.s
+++ b/sys/arch/mvme68k/mvme68k/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.54 2007/05/15 13:46:22 martin Exp $ */
+/* $OpenBSD: locore.s,v 1.55 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -1165,51 +1165,18 @@ Ldorte:
*/
#include <m68k/m68k/support.s>
-/*
- * Use common m68k process manipulation routines.
- */
-#include <m68k/m68k/proc_subr.s>
-
.data
GLOBAL(curpcb)
.long 0
ASBSS(nullpcb,SIZEOF_PCB)
-/*
- * At exit of a process, do a switch for the last time.
- * Switch to a safe stack and PCB, and deallocate the process's resources.
- */
-ENTRY(switch_exit)
- movl sp@(4),a0
- | save state into garbage pcb
- movl #_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
- lea _ASM_LABEL(tmpstk),sp | goto a tmp stack
-
- /* Schedule the vmspace and stack to be freed. */
- movl a0,sp@- | exit2(p)
- jbsr _C_LABEL(exit2)
- lea sp@(4),sp | pop args
-
- jra _C_LABEL(cpu_switch)
-
-/*
- * When no processes are on the runq, Swtch branches to Idle
- * to wait for something to come ready.
- */
-ASENTRY_NOPROFILE(Idle)
+ENTRY_NOPROFILE(cpu_idle_cycle)
stop #PSL_LOWIPL
- movw #PSL_HIGHIPL,sr
- movl _C_LABEL(whichqs),d0
- jeq _ASM_LABEL(Idle)
- jra Lsw1
-
-Lbadsw:
- PANIC("switch")
- /*NOTREACHED*/
+ rts
/*
- * cpu_switch()
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
*
* NOTE: On the mc68851 we attempt to avoid flushing the
* entire ATC. The effort involved in selective flushing may not be
@@ -1219,55 +1186,15 @@ Lbadsw:
* user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
* bit). For now, we just always flush the full ATC.
*/
-ENTRY(cpu_switch)
- movl _C_LABEL(curpcb),a0 | current pcb
- movw sr,a0@(PCB_PS) | save sr before changing ipl
-#ifdef notyet
- movl CURPROC,sp@- | remember last proc running
-#endif
- clrl CURPROC
+ENTRY(cpu_switchto)
+ movl sp@(4), d0 | oldproc
+ beq Lswnofpsave | is NULL, don't save anything
/*
- * Find the highest-priority queue that isn't empty,
- * then take the first proc from that queue.
- */
- movw #PSL_HIGHIPL,sr | lock out interrupts
- movl _C_LABEL(whichqs),d0
- jeq _ASM_LABEL(Idle)
-Lsw1:
- movl d0,d1
- negl d0
- andl d1,d0
- bfffo d0{#0:#32},d1
- eorib #31,d1
-
- movl d1,d0
- lslb #3,d1 | convert queue number to index
- addl #_C_LABEL(qs),d1 | locate queue (q)
- movl d1,a1
- movl a1@(P_FORW),a0 | p = q->p_forw
- cmpal d1,a0 | anyone on queue?
- jeq Lbadsw | no, panic
- movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw
- movl a0@(P_FORW),a1 | n = p->p_forw
- movl d1,a1@(P_BACK) | n->p_back = q
- cmpal d1,a1 | anyone left on queue?
- jne Lsw2 | yes, skip
- movl _C_LABEL(whichqs),d1
- bclr d0,d1 | no, clear bit
- movl d1,_C_LABEL(whichqs)
-Lsw2:
- movl a0,CURPROC
- clrl _C_LABEL(want_resched)
-#ifdef notyet
- movl sp@+,a1
- cmpl a0,a1 | switching to same proc?
- jeq Lswdone | yes, skip save and restore
-#endif
- /*
* Save state of previous process in its pcb.
*/
movl _C_LABEL(curpcb),a1
+ movw sr, a1@(PCB_PS) | save sr before switching context
moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers
movl usp,a2 | grab USP (a2 has been saved)
movl a2,a1@(PCB_USP) | and save it
@@ -1295,15 +1222,12 @@ Lsavfp60:
fmovem fpsr,a2@(FPF_FPSR)
fmovem fpi,a2@(FPF_FPI)
#endif /* M68060 */
+
Lswnofpsave:
-#ifdef DIAGNOSTIC
- tstl a0@(P_WCHAN)
- jne Lbadsw
- cmpb #SRUN,a0@(P_STAT)
- jne Lbadsw
-#endif
+ movl sp@(8), a0 | newproc
+
+ movl a0, CURPROC
movb #SONPROC,a0@(P_STAT)
- clrl a0@(P_BACK) | clear back link
movl a0@(P_ADDR),a1 | get p_addr
movl a1,_C_LABEL(curpcb)
diff --git a/sys/arch/mvme68k/mvme68k/vm_machdep.c b/sys/arch/mvme68k/mvme68k/vm_machdep.c
index e8ac2262a05..df44535cfda 100644
--- a/sys/arch/mvme68k/mvme68k/vm_machdep.c
+++ b/sys/arch/mvme68k/mvme68k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.45 2007/06/20 17:29:36 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.46 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -117,20 +117,18 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
/*
* cpu_exit is called as the last action during exit.
- * We release the address space and machine-dependent resources,
- * including the memory for the user structure and kernel stack.
- * Once finished, we call switch_exit, which switches to a temporary
- * pcb and stack and never returns. We block memory allocation
- * until switch_exit has made things safe again.
+ *
+ * Block context switches and then call switch_exit() which will
+ * switch to another process thus we never return.
*/
void
cpu_exit(p)
struct proc *p;
{
+ (void)splhigh();
- splhigh();
- switch_exit(p);
- /* NOTREACHED */
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/mvme88k/mvme88k/locore.S b/sys/arch/mvme88k/mvme88k/locore.S
index 980804454bc..355d0dae81a 100644
--- a/sys/arch/mvme88k/mvme88k/locore.S
+++ b/sys/arch/mvme88k/mvme88k/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.49 2006/05/08 14:03:35 miod Exp $ */
+/* $OpenBSD: locore.S,v 1.50 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat.
* Copyright (c) 1998 Steve Murphree, Jr.
@@ -346,14 +346,6 @@ GLOBAL(secondary_start)
bsr.n _C_LABEL(secondary_main)
addu r31, r3, USIZE /* switch to idle stack */
- /*
- * At this point, the CPU has been correctly initialized and has
- * identified itself on the console.
- * All it needs now is to jump to the idle loop and wait for work to
- * be offered.
- */
- br _ASM_LABEL(cpu_switch_idle)
-
#endif /* MULTIPROCESSOR */
/*
diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c
index c743056aafa..6c94e8288aa 100644
--- a/sys/arch/mvme88k/mvme88k/machdep.c
+++ b/sys/arch/mvme88k/mvme88k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.192 2007/06/06 17:15:12 deraadt Exp $ */
+/* $OpenBSD: machdep.c,v 1.193 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 1998, 1999, 2000, 2001 Steve Murphree, Jr.
* Copyright (c) 1996 Nivas Madhur
@@ -719,19 +719,18 @@ void
secondary_main()
{
struct cpu_info *ci = curcpu();
+ int s;
cpu_configuration_print(0);
+ sched_init_cpu(ci);
ncpus++;
__cpu_simple_unlock(&cpu_mutex);
microuptime(&ci->ci_schedstate.spc_runtime);
ci->ci_curproc = NULL;
- /*
- * Upon return, the secondary cpu bootstrap code in locore will
- * enter the idle loop, waiting for some food to process on this
- * processor.
- */
+ SCHED_LOCK(s);
+ cpu_switchto(NULL, sched_chooseproc());
}
#endif /* MULTIPROCESSOR */
diff --git a/sys/arch/powerpc/conf/files.powerpc b/sys/arch/powerpc/conf/files.powerpc
index 9bfb6960e27..d3effdc83a4 100644
--- a/sys/arch/powerpc/conf/files.powerpc
+++ b/sys/arch/powerpc/conf/files.powerpc
@@ -1,8 +1,7 @@
-# $OpenBSD: files.powerpc,v 1.42 2007/03/22 19:26:27 kettenis Exp $
+# $OpenBSD: files.powerpc,v 1.43 2007/10/10 15:53:52 art Exp $
#
file arch/powerpc/powerpc/setjmp.S ddb
-file arch/powerpc/powerpc/Locore.c
file arch/powerpc/powerpc/bcopy.c
file arch/powerpc/powerpc/copystr.c
file arch/powerpc/powerpc/cpu_subr.c
diff --git a/sys/arch/powerpc/include/pcb.h b/sys/arch/powerpc/include/pcb.h
index a49d03f1488..7560dee7b17 100644
--- a/sys/arch/powerpc/include/pcb.h
+++ b/sys/arch/powerpc/include/pcb.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pcb.h,v 1.10 2007/03/20 20:59:53 kettenis Exp $ */
+/* $OpenBSD: pcb.h,v 1.11 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: pcb.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
@@ -49,7 +49,6 @@ struct pcb {
struct pmap *pcb_pm; /* pmap of our vmspace */
struct pmap *pcb_pmreal; /* real address of above */
register_t pcb_sp; /* saved SP */
- int pcb_spl; /* saved SPL */
faultbuf *pcb_onfault; /* For use during copyin/copyout */
int pcb_flags;
#define PCB_FPU 1 /* Process had FPU initialized */
diff --git a/sys/arch/powerpc/powerpc/vm_machdep.c b/sys/arch/powerpc/powerpc/vm_machdep.c
index 5a7d92622de..89755b7c774 100644
--- a/sys/arch/powerpc/powerpc/vm_machdep.c
+++ b/sys/arch/powerpc/powerpc/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.44 2007/06/20 17:29:36 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.45 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.1 1996/09/30 16:34:57 ws Exp $ */
/*
@@ -123,7 +123,6 @@ cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
sf->sp = (int)cf;
sf->user_sr = pmap_kernel()->pm_sr[PPC_USER_SR]; /* just in case */
pcb->pcb_sp = (int)stktop2;
- pcb->pcb_spl = 0;
}
/*
@@ -155,8 +154,8 @@ cpu_exit(struct proc *p)
pool_put(&ppc_vecpl, pcb->pcb_vr);
#endif /* ALTIVEC */
- (void)splsched();
- switchexit(p);
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/sgi/localbus/macebus.c b/sys/arch/sgi/localbus/macebus.c
index f02bafba377..4e981dac5ef 100644
--- a/sys/arch/sgi/localbus/macebus.c
+++ b/sys/arch/sgi/localbus/macebus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: macebus.c,v 1.26 2007/07/09 21:40:24 jasper Exp $ */
+/* $OpenBSD: macebus.c,v 1.27 2007/10/10 15:53:52 art Exp $ */
/*
* Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se)
@@ -780,7 +780,6 @@ macebus_iointr(intrmask_t hwpend, struct trap_frame *cf)
intrmask_t
macebus_aux(intrmask_t hwpend, struct trap_frame *cf)
{
- extern char idle[], e_idle[];
u_int64_t mask;
mask = bus_space_read_8(&macebus_tag, mace_h, MACE_ISA_MISC_REG);
@@ -791,7 +790,8 @@ macebus_aux(intrmask_t hwpend, struct trap_frame *cf)
/* RED - User Mode */
if (cf->sr & SR_KSU_USER) {
mask &= ~MACE_ISA_MISC_RLED_OFF;
- } else if (cf->pc >= (long)idle && cf->pc < (long)e_idle) {
+ } else if (curproc == NULL ||
+ curproc == curcpu()->ci_schedstate.spc_idleproc) {
mask &= ~MACE_ISA_MISC_GLED_OFF;
} else {
mask &= ~(MACE_ISA_MISC_RLED_OFF | MACE_ISA_MISC_GLED_OFF);
diff --git a/sys/arch/sgi/sgi/genassym.cf b/sys/arch/sgi/sgi/genassym.cf
index a57571199d8..2e5ac9202f8 100644
--- a/sys/arch/sgi/sgi/genassym.cf
+++ b/sys/arch/sgi/sgi/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.9 2007/06/18 20:25:55 miod Exp $
+# $OpenBSD: genassym.cf,v 1.10 2007/10/10 15:53:52 art Exp $
#
# Copyright (c) 1997 Per Fogelstrom / Opsycon AB
#
@@ -38,9 +38,7 @@ include <machine/cpu.h>
export SONPROC
struct proc
-member p_forw
-member p_back
-member p_priority
+#member p_priority
member p_stat
member p_addr
#member P_UPTE p_md.md_upte
diff --git a/sys/arch/sh/include/cpu.h b/sys/arch/sh/include/cpu.h
index b29d7247ce0..8b43e78767b 100644
--- a/sys/arch/sh/include/cpu.h
+++ b/sys/arch/sh/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.10 2007/06/06 17:15:12 deraadt Exp $ */
+/* $OpenBSD: cpu.h,v 1.11 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: cpu.h,v 1.41 2006/01/21 04:24:12 uwe Exp $ */
/*-
@@ -125,6 +125,11 @@ extern int want_resched; /* need_resched() was called */
* We need a machine-independent name for this.
*/
#define DELAY(x) delay(x)
+
+#define cpu_idle_enter() do { /* nothing */ } while (0)
+#define cpu_idle_cycle() __asm volatile("sleep")
+#define cpu_idle_leave() do { /* nothing */ } while (0)
+
#endif /* _KERNEL */
/*
diff --git a/sys/arch/sh/sh/genassym.cf b/sys/arch/sh/sh/genassym.cf
index 9227338a9a3..d864f1397ec 100644
--- a/sys/arch/sh/sh/genassym.cf
+++ b/sys/arch/sh/sh/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.3 2007/05/14 07:05:49 art Exp $
+# $OpenBSD: genassym.cf,v 1.4 2007/10/10 15:53:52 art Exp $
# $NetBSD: genassym.cf,v 1.10 2005/12/11 12:19:00 christos Exp $
#-
@@ -54,8 +54,6 @@ member tf_spc
struct proc
member p_addr
-member p_back
-member p_forw
member p_stat
member p_wchan
member P_MD_UPTE p_md.md_upte
diff --git a/sys/arch/sh/sh/locore_c.c b/sys/arch/sh/sh/locore_c.c
index ede226184c7..d1df8aeef7b 100644
--- a/sys/arch/sh/sh/locore_c.c
+++ b/sys/arch/sh/sh/locore_c.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore_c.c,v 1.5 2007/09/09 11:57:55 miod Exp $ */
+/* $OpenBSD: locore_c.c,v 1.6 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: locore_c.c,v 1.13 2006/03/04 01:13:35 uwe Exp $ */
/*-
@@ -127,26 +127,14 @@
#include <sh/ubcreg.h>
void (*__sh_switch_resume)(struct proc *);
-struct proc *cpu_switch_search(struct proc *);
-struct proc *cpu_switch_prepare(struct proc *, struct proc *);
-void switch_exit(struct proc *, void (*)(struct proc *));
-void idle(void);
+void cpu_switch_prepare(struct proc *, struct proc *);
int want_resched;
-#ifdef LOCKDEBUG
-#define SCHED_LOCK_IDLE() sched_lock_idle()
-#define SCHED_UNLOCK_IDLE() sched_unlock_idle()
-#else
-#define SCHED_LOCK_IDLE() do {} while (/* CONSTCOND */ 0)
-#define SCHED_UNLOCK_IDLE() do {} while (/* CONSTCOND */ 0)
-#endif
-
-
/*
* Prepare context switch from oproc to nproc.
- * This code is shared by cpu_switch and cpu_switchto.
+ * This code is used by cpu_switchto.
*/
-struct proc *
+void
cpu_switch_prepare(struct proc *oproc, struct proc *nproc)
{
nproc->p_stat = SONPROC;
@@ -154,10 +142,8 @@ cpu_switch_prepare(struct proc *oproc, struct proc *nproc)
if (oproc && (oproc->p_md.md_flags & MDP_STEP))
_reg_write_2(SH_(BBRB), 0);
- if (nproc != oproc) {
- curpcb = nproc->p_md.md_pcb;
- pmap_activate(nproc);
- }
+ curpcb = nproc->p_md.md_pcb;
+ pmap_activate(nproc);
if (nproc->p_md.md_flags & MDP_STEP) {
int pm_asid = nproc->p_vmspace->vm_map.pmap->pm_asid;
@@ -171,34 +157,6 @@ cpu_switch_prepare(struct proc *oproc, struct proc *nproc)
}
curproc = nproc;
- return (nproc);
-}
-
-/*
- * Find the highest priority proc and prepare to switching to it.
- */
-struct proc *
-cpu_switch_search(struct proc *oproc)
-{
- struct prochd *q;
- struct proc *p;
-
- curproc = NULL;
-
- SCHED_LOCK_IDLE();
- while (sched_is_idle()) {
- SCHED_UNLOCK_IDLE();
- idle();
- SCHED_LOCK_IDLE();
- }
-
- q = &qs[ffs(whichqs) - 1];
- p = q->ph_link;
- remrunqueue(p);
- want_resched = 0;
- SCHED_UNLOCK_IDLE();
-
- return (cpu_switch_prepare(oproc, p));
}
void
@@ -207,24 +165,8 @@ cpu_exit(struct proc *p)
if (p->p_md.md_flags & MDP_STEP)
_reg_write_2(SH_(BBRB), 0);
- switch_exit(p, exit2);
-}
-
-/*
- * void idle(void):
- * When no processes are on the run queue, wait for something to come
- * ready. Separated function for profiling.
- */
-void
-idle()
-{
- spl0();
-#if 0
- if (uvm.page_idle_zero)
- uvm_pageidlezero();
-#endif
- __asm volatile("sleep");
- splsched();
+ pmap_deactivate(p);
+ sched_exit(p);
}
#ifndef P1_STACK
diff --git a/sys/arch/sh/sh/locore_subr.S b/sys/arch/sh/sh/locore_subr.S
index de57709a5d5..5543b4870c6 100644
--- a/sys/arch/sh/sh/locore_subr.S
+++ b/sys/arch/sh/sh/locore_subr.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore_subr.S,v 1.6 2007/03/02 06:11:54 miod Exp $ */
+/* $OpenBSD: locore_subr.S,v 1.7 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: locore_subr.S,v 1.28 2006/01/23 22:52:09 uwe Exp $ */
/*
@@ -180,27 +180,26 @@
.text
.align 5 /* align cache line size (32B) */
/*
- * LINTSTUB: Func: void cpu_switch(struct proc *p, struct proc *XXX_IGNORED)
- * Find a runnable proc and switch to it. Wait if necessary.
+ * LINTSTUB: Func: void cpu_switchto(struct proc *old, struct proc *new)
+ * Switch proc contexts.
*/
-ENTRY(cpu_switch)
- /* Save current proc's context to switchframe */
+ENTRY(cpu_switchto)
+ mov r4, r0
+ cmp/eq #0, r0
+ bt 1f
+
+ /* Save old proc's context to switchframe */
mov.l .L_SF, r0
mov.l @(r0, r4), r1
SAVEPCB(r1)
add #PCB_FP, r1
SAVEFP(r1, r8, r9)
-.L_find_and_switch:
- /* Search next proc. cpu_switch_search may or may not sleep. */
- mov.l .L_cpu_switch_search, r0
+1:
+ mov.l .L_cpu_switch_prepare, r0
jsr @r0
- mov r4, r8 /* save old proc */
-
- /* Skip context switch if same proc. */
- cmp/eq r8, r0
- bt/s 1f
- mov r0, r4 /* new proc */
+ mov r5, r8 /* save new proc */
+ mov r8, r4
/* Setup kernel stack */
mov.l .L_SF, r0
@@ -224,7 +223,7 @@ ENTRY(cpu_switch)
/* Now OK to use kernel stack. */
/* Restore new proc's context from switchframe */
-1: mov.l .L_SF, r0
+ mov.l .L_SF, r0
mov.l @(r0, r4), r1
add #4, r1 /* r15 already restored */
mov.l @r1+, r14
@@ -248,49 +247,9 @@ ENTRY(cpu_switch)
nop
.align 2
.L_SF: .long (P_MD_PCB)
-.L_cpu_switch_search: .long _C_LABEL(cpu_switch_search)
+.L_cpu_switch_prepare: .long _C_LABEL(cpu_switch_prepare)
FUNC_SYMBOL(switch_resume)
-
-/*
- * LINTSTUB: Func: void switch_exit(struct proc *p, void (*exit_func)(struct proc *))
- * Called only from cpu_exit(p). Before we call exit_func to
- * free proc's resources (including kernel stack) we need to
- * switch to the proc0's kernel stack. Then we jump into the
- * middle of cpu_switch to find and switch to a new proc.
- */
-ALTENTRY(switch_exit)
- mov.l .L_switch_exit_proc0_pcb, r1
- mov.l .L_switch_exit_curpcb, r0
- mov.l @r1, r1
- mov.l r1, @r0 /* curpcb = proc0.p_md.md_pcb */
-
- mov.l @(SF_R7_BANK, r1), r0 /* stack top */
- mov.l @(SF_R6_BANK, r1), r2 /* current frame */
- mov.l @(SF_R15, r1), r3 /* current stack */
-
- /* switch to proc0's kernel stack */
- __EXCEPTION_BLOCK(r1, r6)
- ldc r0, r7_bank
- ldc r2, r6_bank
- mov r3, r15
- __EXCEPTION_UNBLOCK(r0, r1)
-
- /* safe to call (*exit_func)(p); now */
- jsr @r5
- nop /* proc is already in r4 */
-
- /* proceed to cpu_switch */
- bra .L_find_and_switch
- mov #0, r4 /* no "previous" proc */
-
- .align 2
-.L_switch_exit_proc0_pcb:
- .long _C_LABEL(proc0) + P_MD_PCB
-.L_switch_exit_curpcb:
- .long _C_LABEL(curpcb)
-
-
#ifdef SH3
/*
* LINTSTUB: Func: void sh3_switch_resume(struct proc *p)
diff --git a/sys/arch/sh/sh/sh_machdep.c b/sys/arch/sh/sh/sh_machdep.c
index d81017ac0f1..0cfcd801825 100644
--- a/sys/arch/sh/sh/sh_machdep.c
+++ b/sys/arch/sh/sh/sh_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sh_machdep.c,v 1.16 2007/06/06 17:15:12 deraadt Exp $ */
+/* $OpenBSD: sh_machdep.c,v 1.17 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: sh3_machdep.c,v 1.59 2006/03/04 01:13:36 uwe Exp $ */
/*
@@ -716,45 +716,6 @@ setregs(struct proc *p, struct exec_package *pack, u_long stack,
rval[1] = 0;
}
-void
-setrunqueue(struct proc *p)
-{
- int whichq = p->p_priority / PPQ;
- struct prochd *q;
- struct proc *prev;
-
-#ifdef DIAGNOSTIC
- if (p->p_back != NULL || p->p_wchan != NULL || p->p_stat != SRUN)
- panic("setrunqueue");
-#endif
- q = &qs[whichq];
- prev = q->ph_rlink;
- p->p_forw = (struct proc *)q;
- q->ph_rlink = p;
- prev->p_forw = p;
- p->p_back = prev;
- whichqs |= 1 << whichq;
-}
-
-void
-remrunqueue(struct proc *p)
-{
- struct proc *prev, *next;
- int whichq = p->p_priority / PPQ;
-
-#ifdef DIAGNOSTIC
- if (((whichqs & (1 << whichq)) == 0))
- panic("remrunqueue: bit %d not set", whichq);
-#endif
- prev = p->p_back;
- p->p_back = NULL;
- next = p->p_forw;
- prev->p_forw = next;
- next->p_back = prev;
- if (prev == next)
- whichqs &= ~(1 << whichq);
-}
-
/*
* Jump to reset vector.
*/
diff --git a/sys/arch/solbourne/solbourne/locore.s b/sys/arch/solbourne/solbourne/locore.s
index 8ff5842d209..0c8666cb599 100644
--- a/sys/arch/solbourne/solbourne/locore.s
+++ b/sys/arch/solbourne/solbourne/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.8 2006/04/15 17:36:47 miod Exp $ */
+/* $OpenBSD: locore.s,v 1.9 2007/10/10 15:53:52 art Exp $ */
/* OpenBSD: locore.s,v 1.64 2005/04/17 18:47:50 miod Exp */
/*
@@ -3233,291 +3233,52 @@ ENTRY(write_user_windows)
.comm _C_LABEL(masterpaddr), 4
/*
- * Switch statistics (for later tweaking):
- * nswitchdiff = p1 => p2 (i.e., chose different process)
- * nswitchexit = number of calls to switchexit()
- * _cnt.v_swtch = total calls to swtch+swtchexit
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
*/
- .comm _C_LABEL(nswitchdiff), 4
- .comm _C_LABEL(nswitchexit), 4
-
-/*
- * REGISTER USAGE IN cpu_switch AND switchexit:
- * This is split into two phases, more or less
- * `before we locate a new proc' and `after'.
- * Some values are the same in both phases.
- * Note that the %o0-registers are not preserved across
- * the psr change when entering a new process, since this
- * usually changes the CWP field (hence heavy usage of %g's).
- *
- * %g1 = oldpsr (excluding ipl bits)
- * %g2 = %hi(_C_LABEL(whichqs)); newpsr
- * %g3 = p
- * %g4 = lastproc
- * %g5 = <free>; newpcb
- * %g6 = %hi(_C_LABEL(cpcb))
- * %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o2 = tmp 3
- * %o3 = tmp 4; whichqs; vm
- * %o4 = tmp 4; which; sswap
- * %o5 = tmp 5; q; <free>
- */
-
-/*
- * switchexit is called only from cpu_exit() before the current process
- * has freed its kernel stack; we must free it. (curproc is already NULL.)
- *
- * We lay the process to rest by changing to the `idle' kernel stack,
- * and note that the `last loaded process' is nonexistent.
- */
-ENTRY(switchexit)
- mov %o0, %g2 ! save proc for exit2() call
-
- /*
- * Change pcb to idle u. area, i.e., set %sp to top of stack
- * and %psr to PSR_S|PSR_ET, and set cpcb to point to _idle_u.
- * Once we have left the old stack, we can call kmem_free to
- * destroy it. Call it any sooner and the register windows
- * go bye-bye.
- */
- set _C_LABEL(idle_u), %g5
- sethi %hi(_C_LABEL(cpcb)), %g6
- mov 1, %g7
- wr %g0, PSR_S, %psr ! change to window 0, traps off
- nop; nop; nop
- wr %g0, 2, %wim ! and make window 1 the trap window
- st %g5, [%g6 + %lo(_C_LABEL(cpcb))] ! cpcb = &idle_u
- st %g7, [%g5 + PCB_WIM] ! idle_u.pcb_wim = log2(2) = 1
- set _C_LABEL(idle_u) + USPACE-CCFSZ, %sp ! set new %sp
-#ifdef DEBUG
- set _C_LABEL(idle_u), %l6
- SET_SP_REDZONE(%l6, %l5)
-#endif
- wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
- call _C_LABEL(exit2) ! exit2(p)
- mov %g2, %o0
-
- /*
- * Now fall through to `the last switch'. %g6 was set to
- * %hi(_C_LABEL(cpcb)), but may have been clobbered in kmem_free,
- * so all the registers described below will be set here.
- *
- * REGISTER USAGE AT THIS POINT:
- * %g1 = oldpsr (excluding ipl bits)
- * %g2 = %hi(_C_LABEL(whichqs))
- * %g4 = lastproc
- * %g6 = %hi(_C_LABEL(cpcb))
- * %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o3 = whichqs
- */
-
- INCR(_C_LABEL(nswitchexit)) ! nswitchexit++;
- INCR(_C_LABEL(uvmexp)+V_SWTCH) ! cnt.v_switch++;
-
- mov PSR_S|PSR_ET, %g1 ! oldpsr = PSR_S | PSR_ET;
- sethi %hi(_C_LABEL(whichqs)), %g2
- clr %g4 ! lastproc = NULL;
+ENTRY(cpu_switchto)
sethi %hi(_C_LABEL(cpcb)), %g6
sethi %hi(_C_LABEL(curproc)), %g7
- /* FALLTHROUGH */
-
-/*
- * When no processes are on the runq, switch
- * idles here waiting for something to come ready.
- * The registers are set up as noted above.
- */
- .globl idle
-idle:
- st %g0, [%g7 + %lo(_C_LABEL(curproc))] ! curproc = NULL;
- wr %g1, 0, %psr ! (void) spl0();
-1: ! spin reading _whichqs until nonzero
- ld [%g2 + %lo(_C_LABEL(whichqs))], %o3
- tst %o3
- bnz,a Lsw_scan
- wr %g1, IPL_CLOCK << 8, %psr ! (void) splclock();
- b,a 1b
-
-Lsw_panic_rq:
- sethi %hi(1f), %o0
- call _C_LABEL(panic)
- or %lo(1f), %o0, %o0
-Lsw_panic_wchan:
- sethi %hi(2f), %o0
- call _C_LABEL(panic)
- or %lo(2f), %o0, %o0
-Lsw_panic_srun:
- sethi %hi(3f), %o0
- call _C_LABEL(panic)
- or %lo(3f), %o0, %o0
-1: .asciz "switch rq"
-2: .asciz "switch wchan"
-3: .asciz "switch SRUN"
- _ALIGN
-
-/*
- * cpu_switch() picks a process to run and runs it, saving the current
- * one away. On the assumption that (since most workstations are
- * single user machines) the chances are quite good that the new
- * process will turn out to be the current process, we defer saving
- * it here until we have found someone to load. If that someone
- * is the current process we avoid both store and load.
- *
- * cpu_switch() is always entered at splstatclock or splhigh.
- *
- * IT MIGHT BE WORTH SAVING BEFORE ENTERING idle TO AVOID HAVING TO
- * SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
- */
- .globl _C_LABEL(time)
-ENTRY(cpu_switch)
- /*
- * REGISTER USAGE AT THIS POINT:
- * %g1 = oldpsr (excluding ipl bits)
- * %g2 = %hi(_C_LABEL(whichqs))
- * %g3 = p
- * %g4 = lastproc
- * %g5 = tmp 0
- * %g6 = %hi(_C_LABEL(cpcb))
- * %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o2 = tmp 3
- * %o3 = tmp 4, then at Lsw_scan, whichqs
- * %o4 = tmp 5, then at Lsw_scan, which
- * %o5 = tmp 6, then at Lsw_scan, q
- */
- sethi %hi(_C_LABEL(whichqs)), %g2 ! set up addr regs
- sethi %hi(_C_LABEL(cpcb)), %g6
- ld [%g6 + %lo(_C_LABEL(cpcb))], %o0
- std %o6, [%o0 + PCB_SP] ! cpcb->pcb_<sp,pc> = <sp,pc>;
+ ld [%g6 + %lo(_C_LABEL(cpcb))], %o2
+ std %o6, [%o2 + PCB_SP] ! cpcb->pcb_<sp,pc> = <sp,pc>;
rd %psr, %g1 ! oldpsr = %psr;
- sethi %hi(_C_LABEL(curproc)), %g7
- ld [%g7 + %lo(_C_LABEL(curproc))], %g4 ! lastproc = curproc;
- st %g1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
+ st %g1, [%o2 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
andn %g1, PSR_PIL, %g1 ! oldpsr &= ~PSR_PIL;
/*
- * In all the fiddling we did to get this far, the thing we are
- * waiting for might have come ready, so let interrupts in briefly
- * before checking for other processes. Note that we still have
- * curproc set---we have to fix this or we can get in trouble with
- * the run queues below.
- */
- st %g0, [%g7 + %lo(_C_LABEL(curproc))] ! curproc = NULL;
- wr %g1, 0, %psr ! (void) spl0();
- nop; nop; nop ! paranoia
- wr %g1, IPL_CLOCK << 8 , %psr ! (void) splclock();
-
-Lsw_scan:
- nop; nop; nop ! paranoia
- ld [%g2 + %lo(_C_LABEL(whichqs))], %o3
-
- /*
- * Optimized inline expansion of `which = ffs(whichqs) - 1';
- * branches to idle if ffs(whichqs) was 0.
- */
- set ffstab, %o2
- andcc %o3, 0xff, %o1 ! byte 0 zero?
- bz,a 1f ! yes, try byte 1
- srl %o3, 8, %o0
- b 2f ! ffs = ffstab[byte0]; which = ffs - 1;
- ldsb [%o2 + %o1], %o0
-1: andcc %o0, 0xff, %o1 ! byte 1 zero?
- bz,a 1f ! yes, try byte 2
- srl %o0, 8, %o0
- ldsb [%o2 + %o1], %o0 ! which = ffstab[byte1] + 7;
- b 3f
- add %o0, 7, %o4
-1: andcc %o0, 0xff, %o1 ! byte 2 zero?
- bz,a 1f ! yes, try byte 3
- srl %o0, 8, %o0
- ldsb [%o2 + %o1], %o0 ! which = ffstab[byte2] + 15;
- b 3f
- add %o0, 15, %o4
-1: ldsb [%o2 + %o0], %o0 ! ffs = ffstab[byte3] + 24
- addcc %o0, 24, %o0 ! (note that ffstab[0] == -24)
- bz idle ! if answer was 0, go idle
- EMPTY
-2: sub %o0, 1, %o4 ! which = ffs(whichqs) - 1
-3: /* end optimized inline expansion */
-
- /*
- * We found a nonempty run queue. Take its first process.
- */
- set _C_LABEL(qs), %o5 ! q = &qs[which];
- sll %o4, 3, %o0
- add %o0, %o5, %o5
- ld [%o5], %g3 ! p = q->ph_link;
- cmp %g3, %o5 ! if (p == q)
- be Lsw_panic_rq ! panic("switch rq");
- EMPTY
- ld [%g3], %o0 ! tmp0 = p->p_forw;
- st %o0, [%o5] ! q->ph_link = tmp0;
- st %o5, [%o0 + 4] ! tmp0->p_back = q;
- cmp %o0, %o5 ! if (tmp0 == q)
- bne 1f
- EMPTY
- mov 1, %o1 ! whichqs &= ~(1 << which);
- sll %o1, %o4, %o1
- andn %o3, %o1, %o3
- st %o3, [%g2 + %lo(_C_LABEL(whichqs))]
-1:
- /*
- * PHASE TWO: NEW REGISTER USAGE:
+ * REGISTER USAGE:
* %g1 = oldpsr (excluding ipl bits)
* %g2 = newpsr
- * %g3 = p
- * %g4 = lastproc
* %g5 = newpcb
* %g6 = %hi(_C_LABEL(cpcb))
* %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
+ * %o0 = oldproc
+ * %o1 = newproc
* %o2 = tmp 3
* %o3 = vm
* %o4 = sswap
* %o5 = <free>
*/
- /* firewalls */
- ld [%g3 + P_WCHAN], %o0 ! if (p->p_wchan)
- tst %o0
- bne Lsw_panic_wchan ! panic("switch wchan");
- EMPTY
- ldsb [%g3 + P_STAT], %o0 ! if (p->p_stat != SRUN)
- cmp %o0, SRUN
- bne Lsw_panic_srun ! panic("switch SRUN");
- EMPTY
-
/*
- * Committed to running process p.
- * It may be the same as the one we were running before.
+ * Committed to running process p (in o1).
*/
+ mov %o1, %g3
+
mov SONPROC, %o0 ! p->p_stat = SONPROC
stb %o0, [%g3 + P_STAT]
sethi %hi(_C_LABEL(want_resched)), %o0
st %g0, [%o0 + %lo(_C_LABEL(want_resched))] ! want_resched = 0;
ld [%g3 + P_ADDR], %g5 ! newpcb = p->p_addr;
- st %g0, [%g3 + 4] ! p->p_back = NULL;
ld [%g5 + PCB_PSR], %g2 ! newpsr = newpcb->pcb_psr;
st %g3, [%g7 + %lo(_C_LABEL(curproc))] ! curproc = p;
- cmp %g3, %g4 ! p == lastproc?
- be,a Lsw_sameproc ! yes, go return 0
- wr %g2, 0, %psr ! (after restoring ipl)
-
/*
- * Not the old process. Save the old process, if any;
- * then load p.
+ * Save the old process, if any; then load p.
*/
- tst %g4
+ tst %o0
be,a Lsw_load ! if no old process, go load
wr %g1, (IPL_CLOCK << 8) | PSR_ET, %psr
- INCR(_C_LABEL(nswitchdiff)) ! clobbers %o0,%o1
/*
* save: write back all windows (including the current one).
* XXX crude; knows nwindows <= 8
@@ -3659,16 +3420,17 @@ badstack:
*/
retl
- EMPTY
-1:
+ nop
-Lsw_sameproc:
- /*
- * We are resuming the process that was running at the
- * call to switch(). Just set psr ipl and return.
- */
-! wr %g2, 0 %psr ! %psr = newpsr; (done earlier)
- nop
+ENTRY(cpu_idle_enter)
+ retl
+ nop
+
+ENTRY(cpu_idle_cycle)
+ retl
+ nop
+
+ENTRY(cpu_idle_leave)
retl
nop
diff --git a/sys/arch/sparc/conf/files.sparc b/sys/arch/sparc/conf/files.sparc
index 00110dace96..1e4de8736a5 100644
--- a/sys/arch/sparc/conf/files.sparc
+++ b/sys/arch/sparc/conf/files.sparc
@@ -1,4 +1,4 @@
-# $OpenBSD: files.sparc,v 1.78 2007/05/29 09:54:03 sobrado Exp $
+# $OpenBSD: files.sparc,v 1.79 2007/10/10 15:53:52 art Exp $
# $NetBSD: files.sparc,v 1.44 1997/08/31 21:29:16 pk Exp $
# @(#)files.sparc 8.1 (Berkeley) 7/19/93
@@ -291,7 +291,6 @@ file arch/sparc/sparc/emul.c
file arch/sparc/sparc/in_cksum.c inet
file arch/sparc/sparc/intr.c
file arch/sparc/sparc/kgdb_machdep.c kgdb
-file arch/sparc/sparc/locore2.c
file arch/sparc/sparc/machdep.c !solbourne
file arch/sparc/sparc/mem.c !solbourne
file arch/sparc/sparc/mutex.c
diff --git a/sys/arch/sparc/sparc/locore.s b/sys/arch/sparc/sparc/locore.s
index 580d51bf563..ac109ebffeb 100644
--- a/sys/arch/sparc/sparc/locore.s
+++ b/sys/arch/sparc/sparc/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.72 2007/05/29 09:54:01 sobrado Exp $ */
+/* $OpenBSD: locore.s,v 1.73 2007/10/10 15:53:52 art Exp $ */
/* $NetBSD: locore.s,v 1.73 1997/09/13 20:36:48 pk Exp $ */
/*
@@ -4383,290 +4383,52 @@ ENTRY(write_user_windows)
.comm _C_LABEL(masterpaddr), 4
/*
- * Switch statistics (for later tweaking):
- * nswitchdiff = p1 => p2 (i.e., chose different process)
- * nswitchexit = number of calls to switchexit()
- * _cnt.v_swtch = total calls to swtch+swtchexit
+ * cpu_switchto(struct proc *oldproc, struct proc *newproc)
*/
- .comm _C_LABEL(nswitchdiff), 4
- .comm _C_LABEL(nswitchexit), 4
-
-/*
- * REGISTER USAGE IN cpu_switch AND switchexit:
- * This is split into two phases, more or less
- * `before we locate a new proc' and `after'.
- * Some values are the same in both phases.
- * Note that the %o0-registers are not preserved across
- * the psr change when entering a new process, since this
- * usually changes the CWP field (hence heavy usage of %g's).
- *
- * %g1 = oldpsr (excluding ipl bits)
- * %g2 = %hi(_C_LABEL(whichqs)); newpsr
- * %g3 = p
- * %g4 = lastproc
- * %g5 = <free>; newpcb
- * %g6 = %hi(_C_LABEL(cpcb))
- * %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o2 = tmp 3
- * %o3 = tmp 4; whichqs; vm
- * %o4 = tmp 4; which; sswap
- * %o5 = tmp 5; q; <free>
- */
-
-/*
- * switchexit is called only from cpu_exit() before the current process
- * has freed its kernel stack; we must free it. (curproc is already NULL.)
- *
- * We lay the process to rest by changing to the `idle' kernel stack,
- * and note that the `last loaded process' is nonexistent.
- */
-ENTRY(switchexit)
- mov %o0, %g2 ! save proc for exit2() call
-
- /*
- * Change pcb to idle u. area, i.e., set %sp to top of stack
- * and %psr to PSR_S|PSR_ET, and set cpcb to point to _idle_u.
- * Once we have left the old stack, we can call kmem_free to
- * destroy it. Call it any sooner and the register windows
- * go bye-bye.
- */
- set _C_LABEL(idle_u), %g5
- sethi %hi(_C_LABEL(cpcb)), %g6
- mov 1, %g7
- wr %g0, PSR_S, %psr ! change to window 0, traps off
- wr %g0, 2, %wim ! and make window 1 the trap window
- st %g5, [%g6 + %lo(_C_LABEL(cpcb))] ! cpcb = &idle_u
- st %g7, [%g5 + PCB_WIM] ! idle_u.pcb_wim = log2(2) = 1
- set _C_LABEL(idle_u) + USPACE-CCFSZ, %sp ! set new %sp
-#ifdef DEBUG
- set _C_LABEL(idle_u), %l6
- SET_SP_REDZONE(%l6, %l5)
-#endif
- wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
- call _C_LABEL(exit2) ! exit2(p)
- mov %g2, %o0
-
- /*
- * Now fall through to `the last switch'. %g6 was set to
- * %hi(_C_LABEL(cpcb)), but may have been clobbered in kmem_free,
- * so all the registers described below will be set here.
- *
- * REGISTER USAGE AT THIS POINT:
- * %g1 = oldpsr (excluding ipl bits)
- * %g2 = %hi(_C_LABEL(whichqs))
- * %g4 = lastproc
- * %g6 = %hi(_C_LABEL(cpcb))
- * %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o3 = whichqs
- */
-
- INCR(_C_LABEL(nswitchexit)) ! nswitchexit++;
- INCR(_C_LABEL(uvmexp)+V_SWTCH) ! cnt.v_switch++;
-
- mov PSR_S|PSR_ET, %g1 ! oldpsr = PSR_S | PSR_ET;
- sethi %hi(_C_LABEL(whichqs)), %g2
- clr %g4 ! lastproc = NULL;
+ENTRY(cpu_switchto)
sethi %hi(_C_LABEL(cpcb)), %g6
sethi %hi(_C_LABEL(curproc)), %g7
- /* FALLTHROUGH */
-
-/*
- * When no processes are on the runq, switch
- * idles here waiting for something to come ready.
- * The registers are set up as noted above.
- */
- .globl idle
-idle:
- st %g0, [%g7 + %lo(_C_LABEL(curproc))] ! curproc = NULL;
- wr %g1, 0, %psr ! (void) spl0();
-1: ! spin reading _whichqs until nonzero
- ld [%g2 + %lo(_C_LABEL(whichqs))], %o3
- tst %o3
- bnz,a Lsw_scan
- wr %g1, IPL_CLOCK << 8, %psr ! (void) splclock();
- b,a 1b
-
-Lsw_panic_rq:
- sethi %hi(1f), %o0
- call _C_LABEL(panic)
- or %lo(1f), %o0, %o0
-Lsw_panic_wchan:
- sethi %hi(2f), %o0
- call _C_LABEL(panic)
- or %lo(2f), %o0, %o0
-Lsw_panic_srun:
- sethi %hi(3f), %o0
- call _C_LABEL(panic)
- or %lo(3f), %o0, %o0
-1: .asciz "switch rq"
-2: .asciz "switch wchan"
-3: .asciz "switch SRUN"
- _ALIGN
-
-/*
- * cpu_switch() picks a process to run and runs it, saving the current
- * one away. On the assumption that (since most workstations are
- * single user machines) the chances are quite good that the new
- * process will turn out to be the current process, we defer saving
- * it here until we have found someone to load. If that someone
- * is the current process we avoid both store and load.
- *
- * cpu_switch() is always entered at splstatclock or splhigh.
- *
- * IT MIGHT BE WORTH SAVING BEFORE ENTERING idle TO AVOID HAVING TO
- * SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
- */
- .globl _C_LABEL(time)
-ENTRY(cpu_switch)
- /*
- * REGISTER USAGE AT THIS POINT:
- * %g1 = oldpsr (excluding ipl bits)
- * %g2 = %hi(_C_LABEL(whichqs))
- * %g3 = p
- * %g4 = lastproc
- * %g5 = tmp 0
- * %g6 = %hi(_C_LABEL(cpcb))
- * %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o2 = tmp 3
- * %o3 = tmp 4, then at Lsw_scan, whichqs
- * %o4 = tmp 5, then at Lsw_scan, which
- * %o5 = tmp 6, then at Lsw_scan, q
- */
- sethi %hi(_C_LABEL(whichqs)), %g2 ! set up addr regs
- sethi %hi(_C_LABEL(cpcb)), %g6
- ld [%g6 + %lo(_C_LABEL(cpcb))], %o0
- std %o6, [%o0 + PCB_SP] ! cpcb->pcb_<sp,pc> = <sp,pc>;
+ ld [%g6 + %lo(_C_LABEL(cpcb))], %o2
+ std %o6, [%o2 + PCB_SP] ! cpcb->pcb_<sp,pc> = <sp,pc>;
rd %psr, %g1 ! oldpsr = %psr;
- sethi %hi(_C_LABEL(curproc)), %g7
- ld [%g7 + %lo(_C_LABEL(curproc))], %g4 ! lastproc = curproc;
- st %g1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
+ st %g1, [%o2 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
andn %g1, PSR_PIL, %g1 ! oldpsr &= ~PSR_PIL;
/*
- * In all the fiddling we did to get this far, the thing we are
- * waiting for might have come ready, so let interrupts in briefly
- * before checking for other processes. Note that we still have
- * curproc set---we have to fix this or we can get in trouble with
- * the run queues below.
- */
- st %g0, [%g7 + %lo(_C_LABEL(curproc))] ! curproc = NULL;
- wr %g1, 0, %psr ! (void) spl0();
- nop; nop; nop ! paranoia
- wr %g1, IPL_CLOCK << 8 , %psr ! (void) splclock();
-
-Lsw_scan:
- nop; nop; nop ! paranoia
- ld [%g2 + %lo(_C_LABEL(whichqs))], %o3
-
- /*
- * Optimized inline expansion of `which = ffs(whichqs) - 1';
- * branches to idle if ffs(whichqs) was 0.
- */
- set ffstab, %o2
- andcc %o3, 0xff, %o1 ! byte 0 zero?
- bz,a 1f ! yes, try byte 1
- srl %o3, 8, %o0
- b 2f ! ffs = ffstab[byte0]; which = ffs - 1;
- ldsb [%o2 + %o1], %o0
-1: andcc %o0, 0xff, %o1 ! byte 1 zero?
- bz,a 1f ! yes, try byte 2
- srl %o0, 8, %o0
- ldsb [%o2 + %o1], %o0 ! which = ffstab[byte1] + 7;
- b 3f
- add %o0, 7, %o4
-1: andcc %o0, 0xff, %o1 ! byte 2 zero?
- bz,a 1f ! yes, try byte 3
- srl %o0, 8, %o0
- ldsb [%o2 + %o1], %o0 ! which = ffstab[byte2] + 15;
- b 3f
- add %o0, 15, %o4
-1: ldsb [%o2 + %o0], %o0 ! ffs = ffstab[byte3] + 24
- addcc %o0, 24, %o0 ! (note that ffstab[0] == -24)
- bz idle ! if answer was 0, go idle
- EMPTY
-2: sub %o0, 1, %o4 ! which = ffs(whichqs) - 1
-3: /* end optimized inline expansion */
-
- /*
- * We found a nonempty run queue. Take its first process.
- */
- set _C_LABEL(qs), %o5 ! q = &qs[which];
- sll %o4, 3, %o0
- add %o0, %o5, %o5
- ld [%o5], %g3 ! p = q->ph_link;
- cmp %g3, %o5 ! if (p == q)
- be Lsw_panic_rq ! panic("switch rq");
- EMPTY
- ld [%g3], %o0 ! tmp0 = p->p_forw;
- st %o0, [%o5] ! q->ph_link = tmp0;
- st %o5, [%o0 + 4] ! tmp0->p_back = q;
- cmp %o0, %o5 ! if (tmp0 == q)
- bne 1f
- EMPTY
- mov 1, %o1 ! whichqs &= ~(1 << which);
- sll %o1, %o4, %o1
- andn %o3, %o1, %o3
- st %o3, [%g2 + %lo(_C_LABEL(whichqs))]
-1:
- /*
- * PHASE TWO: NEW REGISTER USAGE:
+ * REGISTER USAGE:
* %g1 = oldpsr (excluding ipl bits)
* %g2 = newpsr
- * %g3 = p
- * %g4 = lastproc
* %g5 = newpcb
* %g6 = %hi(_C_LABEL(cpcb))
* %g7 = %hi(_C_LABEL(curproc))
- * %o0 = tmp 1
- * %o1 = tmp 2
+ * %o0 = oldproc
+ * %o1 = newproc
* %o2 = tmp 3
* %o3 = vm
* %o4 = sswap
* %o5 = <free>
*/
- /* firewalls */
- ld [%g3 + P_WCHAN], %o0 ! if (p->p_wchan)
- tst %o0
- bne Lsw_panic_wchan ! panic("switch wchan");
- EMPTY
- ldsb [%g3 + P_STAT], %o0 ! if (p->p_stat != SRUN)
- cmp %o0, SRUN
- bne Lsw_panic_srun ! panic("switch SRUN");
- EMPTY
-
/*
- * Committed to running process p.
- * It may be the same as the one we were running before.
+ * Committed to running process p (in o1).
*/
+ mov %o1, %g3
+
mov SONPROC, %o0 ! p->p_stat = SONPROC
stb %o0, [%g3 + P_STAT]
sethi %hi(_C_LABEL(want_resched)), %o0
st %g0, [%o0 + %lo(_C_LABEL(want_resched))] ! want_resched = 0;
ld [%g3 + P_ADDR], %g5 ! newpcb = p->p_addr;
- st %g0, [%g3 + 4] ! p->p_back = NULL;
ld [%g5 + PCB_PSR], %g2 ! newpsr = newpcb->pcb_psr;
st %g3, [%g7 + %lo(_C_LABEL(curproc))] ! curproc = p;
- cmp %g3, %g4 ! p == lastproc?
- be,a Lsw_sameproc ! yes, go return 0
- wr %g2, 0, %psr ! (after restoring ipl)
-
/*
- * Not the old process. Save the old process, if any;
- * then load p.
+ * Save the old process, if any; then load p.
*/
- tst %g4
+ tst %o0
be,a Lsw_load ! if no old process, go load
wr %g1, (IPL_CLOCK << 8) | PSR_ET, %psr
- INCR(_C_LABEL(nswitchdiff)) ! clobbers %o0,%o1
/*
* save: write back all windows (including the current one).
* XXX crude; knows nwindows <= 8
@@ -4773,16 +4535,17 @@ Lsw_havectx:
sta %o0, [%o1] ASI_SRMMU ! setcontext(vm->vm_map.pmap->pm_ctxnum);
#endif
-Lsw_sameproc:
- /*
- * We are resuming the process that was running at the
- * call to switch(). Just set psr ipl and return.
- */
-! wr %g2, 0 %psr ! %psr = newpsr; (done earlier)
- nop
+ENTRY(cpu_idle_enter)
retl
nop
+ENTRY(cpu_idle_cycle)
+ retl
+ nop
+
+ENTRY(cpu_idle_leave)
+ retl
+ nop
/*
* Snapshot the current process so that stack frames are up to date.
diff --git a/sys/arch/sparc/sparc/locore2.c b/sys/arch/sparc/sparc/locore2.c
index f2b90ed0e4a..e69de29bb2d 100644
--- a/sys/arch/sparc/sparc/locore2.c
+++ b/sys/arch/sparc/sparc/locore2.c
@@ -1,99 +0,0 @@
-/* $OpenBSD: locore2.c,v 1.5 2003/06/02 23:27:55 millert Exp $ */
-/* $NetBSD: locore2.c,v 1.7 1996/11/06 20:19:53 cgd Exp $ */
-
-/*
- * Copyright (c) 1992, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This software was developed by the Computer Systems Engineering group
- * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
- * contributed to Berkeley.
- *
- * All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by the University of
- * California, Lawrence Berkeley Laboratory.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * @(#)locore2.c 8.4 (Berkeley) 12/10/93
- */
-
-/*
- * Primitives which are in locore.s on other machines,
- * but which have no reason to be assembly-coded on SPARC.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/proc.h>
-#include <sys/resourcevar.h>
-
-#include <machine/cpu.h>
-
-int whichqs;
-
-/*
- * Put process p on the run queue indicated by its priority.
- * Calls should be made at splstatclock(), and p->p_stat should be SRUN.
- */
-void
-setrunqueue(p)
- register struct proc *p;
-{
- register struct prochd *q;
- register struct proc *oldlast;
- register int which = p->p_priority >> 2;
-
- if (p->p_back != NULL)
- panic("setrunqueue");
- q = &qs[which];
- whichqs |= 1 << which;
- p->p_forw = (struct proc *)q;
- p->p_back = oldlast = q->ph_rlink;
- q->ph_rlink = p;
- oldlast->p_forw = p;
-}
-
-/*
- * Remove process p from its run queue, which should be the one
- * indicated by its priority. Calls should be made at splstatclock().
- */
-void
-remrunqueue(p)
- register struct proc *p;
-{
- register int which = p->p_priority >> 2;
- register struct prochd *q;
-
- if ((whichqs & (1 << which)) == 0)
- panic("remrunqueue");
- p->p_forw->p_back = p->p_back;
- p->p_back->p_forw = p->p_forw;
- p->p_back = NULL;
- q = &qs[which];
- if (q->ph_link == (struct proc *)q)
- whichqs &= ~(1 << which);
-}
diff --git a/sys/arch/sparc/sparc/vm_machdep.c b/sys/arch/sparc/sparc/vm_machdep.c
index 73bbdfb1b45..97f5ee4adc1 100644
--- a/sys/arch/sparc/sparc/vm_machdep.c
+++ b/sys/arch/sparc/sparc/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.49 2007/06/20 17:29:36 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.50 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.30 1997/03/10 23:55:40 pk Exp $ */
/*
@@ -449,10 +449,9 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
/*
* cpu_exit is called as the last action during exit.
*
- * We clean up a little and then call switchexit() with the old proc
- * as an argument. switchexit() switches to the idle context, schedules
- * the old vmspace and stack to be freed, then selects a new process to
- * run.
+ * We clean up a little and then call sched_exit() with the old proc
+ * as an argument. sched_exit() schedules the old vmspace and stack
+ * to be freed, then selects a new process to run.
*/
void
cpu_exit(p)
@@ -468,8 +467,8 @@ cpu_exit(p)
free((void *)fs, M_SUBPROC);
}
- switchexit(p);
- /* NOTREACHED */
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/sparc64/conf/files.sparc64 b/sys/arch/sparc64/conf/files.sparc64
index f3463fa8aa8..c9150465f13 100644
--- a/sys/arch/sparc64/conf/files.sparc64
+++ b/sys/arch/sparc64/conf/files.sparc64
@@ -1,4 +1,4 @@
-# $OpenBSD: files.sparc64,v 1.89 2007/09/10 21:33:16 kettenis Exp $
+# $OpenBSD: files.sparc64,v 1.90 2007/10/10 15:53:53 art Exp $
# $NetBSD: files.sparc64,v 1.50 2001/08/10 20:53:50 eeh Exp $
# maxpartitions must be first item in files.${ARCH}
@@ -243,7 +243,6 @@ file arch/sparc64/sparc64/ipifuncs.c multiprocessor
file arch/sparc64/sparc64/kgdb_machdep.c kgdb
# sparc64/sparc64/locore.s is handled specially in the makefile,
# because it must come first in the "ld" command line.
-file arch/sparc64/sparc64/locore2.c
file arch/sparc64/sparc64/machdep.c
file arch/sparc64/sparc64/mem.c
file arch/sparc64/sparc64/mutex.S
diff --git a/sys/arch/sparc64/sparc64/locore.s b/sys/arch/sparc64/sparc64/locore.s
index 07767d70bc5..2456810258e 100644
--- a/sys/arch/sparc64/sparc64/locore.s
+++ b/sys/arch/sparc64/sparc64/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.83 2007/09/30 21:34:20 kettenis Exp $ */
+/* $OpenBSD: locore.s,v 1.84 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: locore.s,v 1.137 2001/08/13 06:10:10 jdolecek Exp $ */
/*
@@ -5573,350 +5573,39 @@ Lcopyfault:
retl
mov EFAULT, %o0
-
- .data
- _ALIGN
-/*
- * Switch statistics (for later tweaking):
- * nswitchdiff = p1 => p2 (i.e., chose different process)
- * nswitchexit = number of calls to switchexit()
- * _cnt.v_swtch = total calls to swtch+swtchexit
- */
- .comm _C_LABEL(nswitchdiff), 4
- .comm _C_LABEL(nswitchexit), 4
- .text
-/*
- * REGISTER USAGE IN cpu_switch AND switchexit:
- * This is split into two phases, more or less
- * `before we locate a new proc' and `after'.
- * Some values are the same in both phases.
- * Note that the %o0-registers are not preserved across
- * the psr change when entering a new process, since this
- * usually changes the CWP field (hence heavy usage of %g's).
- *
- * %l1 = <free>; newpcb
- * %l2 = %hi(_whichqs); newpsr
- * %l3 = p
- * %l4 = lastproc
- * %l5 = oldpsr (excluding ipl bits)
- * %l6 = %hi(cpcb)
- * %l7 = %hi(curproc)
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o2 = tmp 3
- * %o3 = tmp 4; whichqs; vm
- * %o4 = tmp 4; which; sswap
- * %o5 = tmp 5; q; <free>
- */
-
-/*
- * switchexit is called only from cpu_exit() before the current process
- * has freed its vmspace and kernel stack; we must schedule them to be
- * freed. (curproc is already NULL.)
- *
- * We lay the process to rest by changing to the `idle' kernel stack,
- * and note that the `last loaded process' is nonexistent.
- */
-ENTRY(switchexit)
- /*
- * Since we're exiting we don't need to save locals or ins, so
- * we won't need the next instruction.
- */
-! save %sp, -CC64FSZ, %sp
- flushw ! We don't have anything else to run, so why not
-#ifdef DEBUG
- save %sp, -CC64FSZ, %sp
- flushw
- restore
-#endif /* DEBUG */
- wrpr %g0, PSTATE_KERN, %pstate ! Make sure we're on the right globals
- mov %o0, %l2 ! save proc arg for exit2() call XXXXX
-
- /*
- * Change pcb to idle u. area, i.e., set %sp to top of stack
- * and %psr to PSR_S|PSR_ET, and set cpcb to point to _idle_u.
- * Once we have left the old stack, we can call kmem_free to
- * destroy it. Call it any sooner and the register windows
- * go bye-bye.
- */
- set _C_LABEL(idle_u), %l1
- sethi %hi(CPCB), %l6
-#if 0
- /* Get rid of the stack */
- rdpr %ver, %o0
- wrpr %g0, 0, %canrestore ! Fixup window state regs
- and %o0, 0x0f, %o0
- wrpr %g0, 0, %otherwin
- wrpr %g0, %o0, %cleanwin ! kernel don't care, but user does
- dec 1, %o0 ! What happens if we don't subtract 2?
- wrpr %g0, %o0, %cansave
- flushw ! DEBUG
-#endif /* 0 */
-
- stx %l1, [%l6 + %lo(CPCB)] ! cpcb = &idle_u
- set _C_LABEL(idle_u) + USPACE - CC64FSZ, %o0 ! set new %sp
- sub %o0, BIAS, %sp ! Maybe this should be a save?
- wrpr %g0, 0, %canrestore
- wrpr %g0, 0, %otherwin
- rdpr %ver, %l7
- and %l7, CWP, %l7
- wrpr %l7, 0, %cleanwin
- dec 1, %l7 ! NWINDOWS-1-1
- wrpr %l7, %cansave
- clr %fp ! End of stack.
-#ifdef DEBUG
- flushw ! DEBUG
- set _C_LABEL(idle_u), %l6
- SET_SP_REDZONE %l6, %l5
-#endif /* DEBUG */
- wrpr %g0, PSTATE_INTR, %pstate ! and then enable traps
- call _C_LABEL(exit2) ! exit2(p)
- mov %l2, %o0
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_lock_idle) ! Acquire sched_lock
-#endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
- wrpr %g0, PIL_SCHED, %pil ! Set splsched()
-
- /*
- * Now fall through to `the last switch'. %g6 was set to
- * %hi(cpcb), but may have been clobbered in kmem_free,
- * so all the registers described below will be set here.
- *
- * Since the process has exited we can blow its context
- * out of the MMUs now to free up those TLB entries rather
- * than have more useful ones replaced.
- *
- * REGISTER USAGE AT THIS POINT:
- * %l2 = %hi(_whichqs)
- * %l4 = lastproc
- * %l5 = oldpsr (excluding ipl bits)
- * %l6 = %hi(cpcb)
- * %l7 = %hi(curproc)
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o3 = whichqs
- */
-
- INCR _C_LABEL(nswitchexit) ! nswitchexit++;
- INCR _C_LABEL(uvmexp)+V_SWTCH ! cnt.v_switch++;
-
- mov CTX_SECONDARY, %o0
- sethi %hi(_C_LABEL(whichqs)), %l2
- sethi %hi(CPCB), %l6
- sethi %hi(CURPROC), %l7
- ldxa [%o0] ASI_DMMU, %l1 ! Don't demap the kernel
- ldx [%l6 + %lo(CPCB)], %l5
- clr %l4 ! lastproc = NULL;
- brz,pn %l1, 1f
- set DEMAP_CTX_SECONDARY, %l1 ! Demap secondary context
- stxa %g1, [%l1] ASI_DMMU_DEMAP
- stxa %g1, [%l1] ASI_IMMU_DEMAP
- membar #Sync
-1:
- stxa %g0, [%o0] ASI_DMMU ! Clear out our context
- membar #Sync
- /* FALLTHROUGH */
-
-/*
- * When no processes are on the runq, switch
- * idles here waiting for something to come ready.
- * The registers are set up as noted above.
- */
- .globl idle
-idle:
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_unlock_idle) ! Release sched_lock
-#endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
- stx %g0, [%l7 + %lo(CURPROC)] ! curproc = NULL;
-1: ! spin reading _whichqs until nonzero
- wrpr %g0, PSTATE_INTR, %pstate ! Make sure interrupts are enabled
- wrpr %g0, 0, %pil ! (void) spl0();
- ld [%l2 + %lo(_C_LABEL(whichqs))], %o3
- brnz,pt %o3, notidle ! Something to run
- nop
-#ifdef UVM_PAGE_IDLE_ZERO
- ! Check uvm.page_idle_zero
- sethi %hi(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO), %o3
- ld [%o3 + %lo(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO)], %o3
- brz,pn %o3, 1b
- nop
-
- ! zero some pages
- call _C_LABEL(uvm_pageidlezero)
- nop
-#endif /* UVM_PAGE_IDLE_ZERO */
- ba,a,pt %xcc, 1b
- nop ! spitfire bug
-notidle:
- wrpr %g0, PIL_SCHED, %pil ! (void) splhigh();
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- call _C_LABEL(sched_lock_idle) ! Grab sched_lock
- add %o7, (Lsw_scan-.-4), %o7 ! Return to Lsw_scan directly
-#endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
- ba,a,pt %xcc, Lsw_scan
- nop ! spitfire bug
-
-Lsw_panic_rq:
- sethi %hi(1f), %o0
- call _C_LABEL(panic)
- or %lo(1f), %o0, %o0
Lsw_panic_wchan:
- sethi %hi(2f), %o0
- call _C_LABEL(panic)
- or %lo(2f), %o0, %o0
+ sethi %hi(1f), %o0
+ call _C_LABEL(panic)
+ or %lo(1f), %o0, %o0
Lsw_panic_srun:
- sethi %hi(3f), %o0
- call _C_LABEL(panic)
- or %lo(3f), %o0, %o0
- .data
-1: .asciz "switch rq"
-2: .asciz "switch wchan"
-3: .asciz "switch SRUN"
-idlemsg: .asciz "idle %x %x %x %x"
-idlemsg1: .asciz " %x %x %x\r\n"
- _ALIGN
+ sethi %hi(2f), %o0
+ call _C_LABEL(panic)
+ or %lo(2f), %o0, %o0
+ .data
+1: .asciz "switch wchan"
+2: .asciz "switch SRUN"
+
.text
/*
- * cpu_switch() picks a process to run and runs it, saving the current
- * one away. On the assumption that (since most workstations are
- * single user machines) the chances are quite good that the new
- * process will turn out to be the current process, we defer saving
- * it here until we have found someone to load. If that someone
- * is the current process we avoid both store and load.
+ * cpu_switchto(struct proc *old, struct proc *new)
*
- * cpu_switch() is always entered at splstatclock or splhigh.
- *
- * IT MIGHT BE WORTH SAVING BEFORE ENTERING idle TO AVOID HAVING TO
- * SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
- *
- * Apparently cpu_switch() is called with curproc as the first argument,
- * but no port seems to make use of that parameter.
+ * Save the context of "old" and switch to "new".
*/
- .globl _C_LABEL(time)
-ENTRY(cpu_switch)
+ENTRY(cpu_switchto)
save %sp, -CC64FSZ, %sp
- /*
- * REGISTER USAGE AT THIS POINT:
- * %l1 = tmp 0
- * %l2 = %hi(_C_LABEL(whichqs))
- * %l3 = p
- * %l4 = lastproc
- * %l5 = cpcb
- * %l6 = %hi(CPCB)
- * %l7 = %hi(CURPROC)
- * %o0 = tmp 1
- * %o1 = tmp 2
- * %o2 = tmp 3
- * %o3 = tmp 4, then at Lsw_scan, whichqs
- * %o4 = tmp 5, then at Lsw_scan, which
- * %o5 = tmp 6, then at Lsw_scan, q
- */
-#ifdef DEBUG
- set swdebug, %o1
- ld [%o1], %o1
- brz,pt %o1, 2f
- set 1f, %o0
- call printf
- nop
- .data
-1: .asciz "s"
- _ALIGN
- .globl swdebug
-swdebug: .word 0
- .text
-2:
-#endif /* DEBUG */
flushw ! We don't have anything else to run, so why not flush
-#ifdef DEBUG
- save %sp, -CC64FSZ, %sp
- flushw
- restore
-#endif /* DEBUG */
rdpr %pstate, %o1 ! oldpstate = %pstate;
wrpr %g0, PSTATE_INTR, %pstate ! make sure we're on normal globals
+
+ mov %i0, %l4 ! oldproc
+ mov %i1, %l3 ! newproc
+
sethi %hi(CPCB), %l6
- sethi %hi(_C_LABEL(whichqs)), %l2 ! set up addr regs
ldx [%l6 + %lo(CPCB)], %l5
- sethi %hi(CURPROC), %l7
+ sethi %hi(CURPROC), %l7
stx %o7, [%l5 + PCB_PC] ! cpcb->pcb_pc = pc;
- ldx [%l7 + %lo(CURPROC)], %l4 ! lastproc = curproc;
sth %o1, [%l5 + PCB_PSTATE] ! cpcb->pcb_pstate = oldpstate;
- stx %g0, [%l7 + %lo(CURPROC)] ! curproc = NULL;
-
-Lsw_scan:
- ld [%l2 + %lo(_C_LABEL(whichqs))], %o3
-
-#ifndef POPC
- .globl _C_LABEL(__ffstab)
- /*
- * Optimized inline expansion of `which = ffs(whichqs) - 1';
- * branches to idle if ffs(whichqs) was 0.
- */
- set _C_LABEL(__ffstab), %o2
- andcc %o3, 0xff, %o1 ! byte 0 zero?
- bz,a,pn %icc, 1f ! yes, try byte 1
- srl %o3, 8, %o0
- ba,pt %icc, 2f ! ffs = ffstab[byte0]; which = ffs - 1;
- ldsb [%o2 + %o1], %o0
-1: andcc %o0, 0xff, %o1 ! byte 1 zero?
- bz,a,pn %icc, 1f ! yes, try byte 2
- srl %o0, 8, %o0
- ldsb [%o2 + %o1], %o0 ! which = ffstab[byte1] + 7;
- ba,pt %icc, 3f
- add %o0, 7, %o4
-1: andcc %o0, 0xff, %o1 ! byte 2 zero?
- bz,a,pn %icc, 1f ! yes, try byte 3
- srl %o0, 8, %o0
- ldsb [%o2 + %o1], %o0 ! which = ffstab[byte2] + 15;
- ba,pt %icc, 3f
- add %o0, 15, %o4
-1: ldsb [%o2 + %o0], %o0 ! ffs = ffstab[byte3] + 24
- addcc %o0, 24, %o0 ! (note that ffstab[0] == -24)
- bz,pn %icc, idle ! if answer was 0, go idle
-! XXX check no delay slot
-2: sub %o0, 1, %o4
-3: /* end optimized inline expansion */
-
-#else /* POPC */
- /*
- * Optimized inline expansion of `which = ffs(whichqs) - 1';
- * branches to idle if ffs(whichqs) was 0.
- *
- * This version uses popc.
- *
- * XXXX spitfires and blackbirds don't implement popc.
- *
- */
- brz,pn %o3, idle ! Don't bother if queues are empty
- neg %o3, %o1 ! %o1 = -zz
- xnor %o3, %o1, %o2 ! %o2 = zz ^ ~ -zz
- popc %o2, %o4 ! which = popc(whichqs)
- dec %o4 ! which = ffs(whichqs) - 1
-
-#endif /* POPC */
- /*
- * We found a nonempty run queue. Take its first process.
- */
- set _C_LABEL(qs), %o5 ! q = &qs[which];
- sll %o4, 3+1, %o0
- add %o0, %o5, %o5
- ldx [%o5], %l3 ! p = q->ph_link;
- cmp %l3, %o5 ! if (p == q)
- be,pn %icc, Lsw_panic_rq ! panic("switch rq");
-! XXX check no delay slot
- ldx [%l3], %o0 ! tmp0 = p->p_forw;
- stx %o0, [%o5] ! q->ph_link = tmp0;
- stx %o5, [%o0 + 8] ! tmp0->p_back = q;
- cmp %o0, %o5 ! if (tmp0 == q)
- bne 1f
-! XXX check no delay slot
- mov 1, %o1 ! whichqs &= ~(1 << which);
- sll %o1, %o4, %o1
- andn %o3, %o1, %o3
- st %o3, [%l2 + %lo(_C_LABEL(whichqs))]
-1:
/*
* PHASE TWO: NEW REGISTER USAGE:
* %l1 = newpcb
@@ -5945,7 +5634,6 @@ Lsw_scan:
/*
* Committed to running process p.
- * It may be the same as the one we were running before.
*/
#if defined(MULTIPROCESSOR)
/*
@@ -5958,19 +5646,7 @@ Lsw_scan:
sethi %hi(CPUINFO_VA+CI_WANT_RESCHED), %o0
st %g0, [%o0 + %lo(CPUINFO_VA+CI_WANT_RESCHED)] ! want_resched = 0;
ldx [%l3 + P_ADDR], %l1 ! newpcb = p->p_addr;
- stx %g0, [%l3 + 8] ! p->p_back = NULL;
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- /*
- * Done mucking with the run queues, release the
- * scheduler lock, but keep interrupts out.
- */
- call _C_LABEL(sched_unlock_idle)
-#endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
- stx %l4, [%l7 + %lo(CURPROC)] ! restore old proc so we can save it
-
- cmp %l3, %l4 ! p == lastproc?
- be,pt %xcc, Lsw_sameproc ! yes, go return 0
- nop
+ stx %l4, [%l7 + %lo(CURPROC)] ! restore old proc so we can save it
/*
* Not the old process. Save the old process, if any;
@@ -5980,7 +5656,6 @@ Lsw_scan:
brz,pn %l4, Lsw_load ! if no old process, go load
wrpr %g0, PSTATE_KERN, %pstate
- INCR _C_LABEL(nswitchdiff) ! clobbers %o0,%o1,%o2
wb1:
flushw ! save all register windows except this one
stx %i7, [%l5 + PCB_PC] ! Save rpc
@@ -6060,11 +5735,6 @@ Lsw_havectx:
membar #Sync ! Maybe we should use flush here?
flush %sp
-Lsw_sameproc:
- /*
- * We are resuming the process that was running at the
- * call to switch(). Just set psr ipl and return.
- */
! wrpr %g0, 0, %cleanwin ! DEBUG
clr %g4 ! This needs to point to the base of the data segment
wr %g0, ASI_PRIMARY_NOFAULT, %asi ! Restore default ASI
@@ -6072,6 +5742,18 @@ Lsw_sameproc:
ret
restore
+ENTRY(cpu_idle_enter)
+ retl
+ nop
+
+ENTRY(cpu_idle_cycle)
+ retl
+ nop
+
+ENTRY(cpu_idle_leave)
+ retl
+ nop
+
/*
* Snapshot the current process so that stack frames are up to date.
* Only used just before a crash dump.
diff --git a/sys/arch/sparc64/sparc64/vm_machdep.c b/sys/arch/sparc64/sparc64/vm_machdep.c
index 5d46c9eb715..88ba247878a 100644
--- a/sys/arch/sparc64/sparc64/vm_machdep.c
+++ b/sys/arch/sparc64/sparc64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.15 2007/06/20 17:29:36 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.16 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.38 2001/06/30 00:02:20 eeh Exp $ */
/*
@@ -316,8 +316,7 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
* run.
*/
void
-cpu_exit(p)
- struct proc *p;
+cpu_exit(struct proc *p)
{
register struct fpstate64 *fs;
@@ -328,8 +327,9 @@ cpu_exit(p)
}
free((void *)fs, M_SUBPROC);
}
- switchexit(p);
- /* NOTREACHED */
+
+ pmap_deactivate(p);
+ sched_exit(p);
}
/*
diff --git a/sys/arch/vax/include/cpu.h b/sys/arch/vax/include/cpu.h
index 80b05d5d185..dc6ec203a16 100644
--- a/sys/arch/vax/include/cpu.h
+++ b/sys/arch/vax/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.25 2007/05/16 05:19:13 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.26 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: cpu.h,v 1.41 1999/10/21 20:01:36 ragge Exp $ */
/*
@@ -129,6 +129,10 @@ extern int want_resched; /* resched() was called */
*/
#define need_proftick(p) mtpr(AST_OK,PR_ASTLVL)
+#define cpu_idle_enter() do { /* nothing */ } while (0)
+#define cpu_idle_cycle() do { /* nothing */ } while (0)
+#define cpu_idle_leave() do { /* nothing */ } while (0)
+
/*
* This defines the I/O device register space size in pages.
*/
diff --git a/sys/arch/vax/include/macros.h b/sys/arch/vax/include/macros.h
index 0c514ff5aa7..87bd942c43a 100644
--- a/sys/arch/vax/include/macros.h
+++ b/sys/arch/vax/include/macros.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: macros.h,v 1.14 2006/11/06 21:31:36 miod Exp $ */
+/* $OpenBSD: macros.h,v 1.15 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: macros.h,v 1.20 2000/07/19 01:02:52 matt Exp $ */
/*
@@ -232,15 +232,10 @@ skpc(int mask, size_t size, u_char *cp)
return ret;
}
-#define setrunqueue(p) \
- __asm__ __volatile("movl %0,r0;jsb Setrq":: "g"(p):"r0","r1","r2");
-
-#define remrunqueue(p) \
- __asm__ __volatile("movl %0,r0;jsb Remrq":: "g"(p):"r0","r1","r2");
-
-#define cpu_switch(p) \
- __asm__ __volatile("movl %0,r0;movpsl -(sp);jsb Swtch" \
- ::"g"(p):"r0","r1","r2","r3");
+#define cpu_switchto(o, n) \
+ __asm__ __volatile__( \
+ "movl %0,r0; movl %1, r1; movpsl -(sp); jsb __cpu_switchto" \
+ :: "g"(o), "g"(n) : "r0", "r1");
/*
* Interlock instructions. Used both in multiprocessor environments to
diff --git a/sys/arch/vax/vax/subr.s b/sys/arch/vax/vax/subr.s
index 6c669bd47af..b0ca031e3d2 100644
--- a/sys/arch/vax/vax/subr.s
+++ b/sys/arch/vax/vax/subr.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr.s,v 1.26 2007/05/16 05:19:15 miod Exp $ */
+/* $OpenBSD: subr.s,v 1.27 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: subr.s,v 1.32 1999/03/25 00:41:48 mrg Exp $ */
/*
@@ -240,114 +240,37 @@ ENTRY(longjmp, 0)
#endif
#
-# setrunqueue/remrunqueue fast variants.
+# void
+# cpu_switchto(struct proc *oldproc = r0, struct proc *newproc = r1);
#
-JSBENTRY(Setrq)
-#ifdef DIAGNOSTIC
- tstl 4(r0) # Check that process actually are off the queue
- beql 1f
- pushab setrq
- calls $1,_panic
-setrq: .asciz "setrunqueue"
-#endif
-1: extzv $2,$6,P_PRIORITY(r0),r1 # get priority
- movaq _qs[r1],r2 # get address of queue
- insque (r0),*4(r2) # put proc last in queue
- bbss r1,_whichqs,1f # set queue bit.
-1: rsb
-
-JSBENTRY(Remrq)
- extzv $2,$6,P_PRIORITY(r0),r1
-#ifdef DIAGNOSTIC
- bbs r1,_whichqs,1f
- pushab remrq
- calls $1,_panic
-remrq: .asciz "remrunqueue"
-#endif
-1: remque (r0),r2
- bneq 1f # Not last process on queue
- bbsc r1,_whichqs,1f
-1: clrl 4(r0) # saftey belt
- rsb
-
-#
-# Idle loop. Here we could do something fun, maybe, like calculating
-# pi or something.
-#
-idle: mtpr $0,$PR_IPL # Enable all types of interrupts
-1: tstl _whichqs # Anything ready to run?
- beql 1b # no, continue to loop
- brb Swtch # Yes, goto switch again.
+#define CURPROC _cpu_info_store + CI_CURPROC
-#
-# cpu_switch, cpu_exit and the idle loop implemented in assembler
-# for efficiency. r0 contains pointer to last process.
-#
+JSBENTRY(__cpu_switchto)
+ svpctx
-#define CURPROC _cpu_info_store + CI_CURPROC
+ movb $SONPROC,P_STAT(r1) # p->p_stat = SONPROC
+ movl r1, CURPROC # set new process running
-JSBENTRY(Swtch)
- clrl CURPROC # Stop process accounting
-#bpt
- mtpr $0x1f,$PR_IPL # block all interrupts
- ffs $0,$32,_whichqs,r3 # Search for bit set
- beql idle # no bit set, go to idle loop
-
- movaq _qs[r3],r1 # get address of queue head
- remque *(r1),r2 # remove proc pointed to by queue head
-#ifdef DIAGNOSTIC
- bvc 1f # check if something on queue
- pushab noque
- calls $1,_panic
-noque: .asciz "swtch"
-#endif
-1: bneq 2f # more processes on queue?
- bbsc r3,_whichqs,2f # no, clear bit in whichqs
-2: clrl 4(r2) # clear proc backpointer
- clrl _want_resched # we are now changing process
- movb $SONPROC,P_STAT(r2) # p->p_stat = SONPROC
- movl r2,CURPROC # set new process running
- cmpl r0,r2 # Same process?
- bneq 1f # No, continue
- rsb
-xxd:
-1: movl P_ADDR(r2),r0 # Get pointer to new pcb.
+ movl P_ADDR(r1),r0 # Get pointer to new pcb.
addl3 r0,$IFTRAP,pcbtrap # Save for copy* functions.
-#
-# Nice routine to get physical from virtual addresses.
-#
+ # inline kvtophys
extzv $9,$21,r0,r1 # extract offset
movl *_Sysmap[r1],r2 # get pte
ashl $9,r2,r3 # shift to get phys address.
#
# Do the actual process switch. pc + psl are already on stack, from
-# the calling routine.
+# the beginning of this routine.
#
- svpctx
mtpr r3,$PR_PCBB
- ldpctx
- rei
-
-#
-# the last routine called by a process.
-#
-
-ENTRY(cpu_exit,0)
- movl 4(ap),r6 # Process pointer in r6
- mtpr $0x18,$PR_IPL # Block almost everything
- addl3 $512,_scratch,sp # Change stack, and schedule it to be freed
- pushl r6
- calls $1,_exit2
-
- clrl r0 # No process to switch from
- bicl3 $0xc0000000,_scratch,r1
- mtpr r1,$PR_PCBB
- brw Swtch
+ pushl CURPROC
+ calls $1, _C_LABEL(pmap_activate)
+ ldpctx
+ rei
#
# copy/fetch/store routines.
diff --git a/sys/arch/vax/vax/vm_machdep.c b/sys/arch/vax/vax/vm_machdep.c
index 7205101225d..603bb17e3d3 100644
--- a/sys/arch/vax/vax/vm_machdep.c
+++ b/sys/arch/vax/vax/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.34 2007/06/20 17:29:36 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.35 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.67 2000/06/29 07:14:34 mrg Exp $ */
/*
@@ -58,6 +58,16 @@
#include <sys/syscallargs.h>
+void
+cpu_exit(struct proc *p)
+{
+ int s;
+ s = splhigh(); /* splclock(); */
+
+ pmap_deactivate(p);
+ sched_exit(p);
+}
+
/*
* Finish a fork operation, with process p2 nearly set up.
* Copy and update the pcb and trap frame, making the child ready to run.
@@ -110,13 +120,6 @@ cpu_fork(p1, p2, stack, stacksize, func, arg)
p2->p_addr->u_pcb.framep = tf;
bcopy(p1->p_addr->u_pcb.framep, tf, sizeof(*tf));
- /*
- * Activate address space for the new process. The PTEs have
- * already been allocated by way of pmap_create().
- * This writes the page table registers to the PCB.
- */
- pmap_activate(p2);
-
/* Mark guard page invalid in kernel stack */
*kvtopte((u_int)p2->p_addr + REDZONEADDR) &= ~PG_V;