summaryrefslogtreecommitdiff
path: root/sys/arch/alpha
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2007-10-10 15:53:54 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2007-10-10 15:53:54 +0000
commite51062c8cca21a333603b567563e3b84f74ddac0 (patch)
treedccf12b7d5ef806260203fe60b2bcaf94260c651 /sys/arch/alpha
parent34c540de32da6090afdcdd6fee481f9a2df345fd (diff)
Make context switching much more MI:
- Move the functionality of choosing a process from cpu_switch into a much simpler function: cpu_switchto. Instead of having the locore code walk the run queues, let the MI code choose the process we want to run and only implement the context switching itself in MD code. - Let MD context switching run without worrying about spls or locks. - Instead of having the idle loop implemented with special contexts in MD code, implement one idle proc for each cpu. make the idle loop MI with MD hooks. - Change the proc lists from the old style vax queues to TAILQs. - Change the sleep queue from vax queues to TAILQs. This makes wakeup() go from O(n^2) to O(n) there will be some MD fallout, but it will be fixed shortly. There's also a few cleanups to be done after this. deraadt@, kettenis@ ok
Diffstat (limited to 'sys/arch/alpha')
-rw-r--r--sys/arch/alpha/alpha/genassym.cf7
-rw-r--r--sys/arch/alpha/alpha/locore.s195
-rw-r--r--sys/arch/alpha/alpha/machdep.c62
-rw-r--r--sys/arch/alpha/alpha/vm_machdep.c6
4 files changed, 45 insertions, 225 deletions
diff --git a/sys/arch/alpha/alpha/genassym.cf b/sys/arch/alpha/alpha/genassym.cf
index cddc3760215..d8393934035 100644
--- a/sys/arch/alpha/alpha/genassym.cf
+++ b/sys/arch/alpha/alpha/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.10 2007/05/28 23:10:10 beck Exp $
+# $OpenBSD: genassym.cf,v 1.11 2007/10/10 15:53:51 art Exp $
# Copyright (c) 1994, 1995 Gordon W. Ross
# Copyright (c) 1993 Adam Glass
@@ -103,17 +103,12 @@ export ALPHA_PTE_KW
# Important offsets into the proc struct & associated constants
struct proc
-member p_forw
-member p_back
member p_addr
member p_vmspace
member p_stat
member P_MD_FLAGS p_md.md_flags
member P_MD_PCBPADDR p_md.md_pcbpaddr
member p_cpu
-struct prochd
-member ph_link
-member ph_rlink
export SONPROC
diff --git a/sys/arch/alpha/alpha/locore.s b/sys/arch/alpha/alpha/locore.s
index 6b9f238138f..fe7453cec66 100644
--- a/sys/arch/alpha/alpha/locore.s
+++ b/sys/arch/alpha/alpha/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.30 2007/05/28 23:10:10 beck Exp $ */
+/* $OpenBSD: locore.s,v 1.31 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: locore.s,v 1.94 2001/04/26 03:10:44 ross Exp $ */
/*-
@@ -716,123 +716,38 @@ LEAF(savectx, 1)
/**************************************************************************/
-IMPORT(whichqs, 4)
-
-/*
- * When no processes are on the runq, cpu_switch branches to idle
- * to wait for something to come ready.
- * Note: this is really a part of cpu_switch() but defined here for kernel
- * profiling.
- */
-LEAF(idle, 0)
- br pv, 1f
-1: LDGP(pv)
- /* Note: GET_CURPROC clobbers v0, t0, t8...t11. */
- GET_CURPROC
- stq zero, 0(v0) /* curproc <- NULL for stats */
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- CALL(sched_unlock_idle) /* release sched_lock */
-#endif
- mov zero, a0 /* enable all interrupts */
- call_pal PAL_OSF1_swpipl
-2: ldl t0, whichqs /* look for non-empty queue */
- beq t0, 2b
- ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
- call_pal PAL_OSF1_swpipl
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- CALL(sched_lock_idle) /* acquire sched_lock */
-#endif
- jmp zero, cpu_switch_queuescan /* jump back into the fire */
- END(idle)
-
/*
- * cpu_switch()
- * Find the highest priority process and resume it.
+ * cpu_switchto(struct proc *old, struct proc *new)
+ * Switch from "old" proc to "new".
*/
-LEAF(cpu_switch, 0)
+LEAF(cpu_switchto, 2)
LDGP(pv)
- /*
- * do an inline savectx(), to save old context
- * Note: GET_CURPROC clobbers v0, t0, t8...t11.
- */
- GET_CURPROC
- ldq a0, 0(v0)
- ldq a1, P_ADDR(a0)
- /* NOTE: ksp is stored by the swpctx */
- stq s0, U_PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
- stq s1, U_PCB_CONTEXT+(1 * 8)(a1)
- stq s2, U_PCB_CONTEXT+(2 * 8)(a1)
- stq s3, U_PCB_CONTEXT+(3 * 8)(a1)
- stq s4, U_PCB_CONTEXT+(4 * 8)(a1)
- stq s5, U_PCB_CONTEXT+(5 * 8)(a1)
- stq s6, U_PCB_CONTEXT+(6 * 8)(a1)
- stq ra, U_PCB_CONTEXT+(7 * 8)(a1) /* store ra */
- call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
- stq v0, U_PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */
- mov a0, s0 /* save old curproc */
- mov a1, s1 /* save old U-area */
-
-cpu_switch_queuescan:
- br pv, 1f
-1: LDGP(pv)
- ldl t0, whichqs /* look for non-empty queue */
- beq t0, idle /* and if none, go idle */
- mov t0, t3 /* t3 = saved whichqs */
- mov zero, t2 /* t2 = lowest bit set */
- blbs t0, 3f /* if low bit set, done! */
-
-2: srl t0, 1, t0 /* try next bit */
- addq t2, 1, t2
- blbc t0, 2b /* if clear, try again */
-
-3: /*
- * Remove process from queue
- */
- lda t1, qs /* get queues */
- sll t2, 4, t0 /* queue head is 16 bytes */
- addq t1, t0, t0 /* t0 = qp = &qs[firstbit] */
-
- ldq t4, PH_LINK(t0) /* t4 = p = highest pri proc */
- bne t4, 4f /* make sure p != NULL */
- PANIC("cpu_switch",Lcpu_switch_pmsg) /* nothing in queue! */
-
-4:
- ldq t5, P_FORW(t4) /* t5 = p->p_forw */
- stq t5, PH_LINK(t0) /* qp->ph_link = p->p_forw */
- stq t0, P_BACK(t5) /* p->p_forw->p_back = qp */
- stq zero, P_BACK(t4) /* firewall: p->p_back = NULL */
- cmpeq t0, t5, t0 /* see if queue is empty */
- beq t0, 5f /* nope, it's not! */
-
- ldiq t0, 1 /* compute bit in whichqs */
- sll t0, t2, t0
- xor t3, t0, t3 /* clear bit in whichqs */
- stl t3, whichqs
-
-5:
- mov t4, s2 /* save new proc */
- ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
/*
- * Done mucking with the run queues, release the
- * scheduler lock, but keep interrupts out.
+ * Don't bother saving the old context if oldproc is NULL.
*/
- CALL(sched_unlock_idle)
-#endif
+ beq a0, 1f
/*
- * Check to see if we're switching to ourself. If we are,
- * don't bother loading the new context.
- *
- * Note that even if we re-enter cpu_switch() from idle(),
- * s0 will still contain the old curproc value because any
- * users of that register between then and now must have
- * saved it. Also note that switch_exit() ensures that
- * s0 is clear before jumping here to find a new process.
+ * do an inline savectx(), to save old context
*/
- cmpeq s0, s2, t0 /* oldproc == newproc? */
- bne t0, 7f /* Yes! Skip! */
+ call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */
+ ldq t0, P_ADDR(a0)
+ /* NOTE: ksp is stored by the swpctx */
+ stq s0, U_PCB_CONTEXT+(0 * 8)(t0) /* store s0 - s6 */
+ stq s1, U_PCB_CONTEXT+(1 * 8)(t0)
+ stq s2, U_PCB_CONTEXT+(2 * 8)(t0)
+ stq s3, U_PCB_CONTEXT+(3 * 8)(t0)
+ stq s4, U_PCB_CONTEXT+(4 * 8)(t0)
+ stq s5, U_PCB_CONTEXT+(5 * 8)(t0)
+ stq s6, U_PCB_CONTEXT+(6 * 8)(t0)
+ stq ra, U_PCB_CONTEXT+(7 * 8)(t0) /* store ra */
+ stq v0, U_PCB_CONTEXT+(8 * 8)(t0) /* store ps, for ipl */
+
+1:
+ mov a0, s0 /* save old proc */
+ mov a1, s2 /* save new proc */
+ ldq s3, P_MD_PCBPADDR(s2) /* save new pcbpaddr */
/*
* Deactivate the old address space before activating the
@@ -842,18 +757,18 @@ cpu_switch_queuescan:
* do this after we activate, then we might end up
* incorrectly marking the pmap inactive!
*
- * We don't deactivate if we came here from switch_exit
+ * We don't deactivate if we came here from sched_exit
* (old pmap no longer exists; vmspace has been freed).
* oldproc will be NULL in this case. We have actually
* taken care of calling pmap_deactivate() in cpu_exit(),
* before the vmspace went away.
*/
- beq s0, 6f
+ beq s0, 2f
mov s0, a0 /* pmap_deactivate(oldproc) */
CALL(pmap_deactivate)
-6: /*
+2: /*
* Activate the new process's address space and perform
* the actual context swap.
*/
@@ -864,7 +779,7 @@ cpu_switch_queuescan:
mov s3, a0 /* swap the context */
SWITCH_CONTEXT
-7: /*
+ /*
* Now that the switch is done, update curproc and other
* globals. We must do this even if switching to ourselves
* because we might have re-entered cpu_switch() from idle(),
@@ -911,7 +826,7 @@ EXPORT(__bwx_switch1)
ldiq v0, 1 /* possible ret to savectx() */
RET
- END(cpu_switch)
+ END(cpu_switchto)
#ifndef SMALL_KERNEL
/*
@@ -926,6 +841,18 @@ EXPORT(__bwx_switch2)
EXPORT(__bwx_switch3)
#endif
+LEAF(cpu_idle_enter, 0)
+ RET
+ END(cpu_idle_enter)
+
+LEAF(cpu_idle_cycle, 0)
+ RET
+ END(cpu_idle_cycle)
+
+LEAF(cpu_idle_leave, 0)
+ RET
+ END(cpu_idle_leave)
+
/*
* switch_trampoline()
*
@@ -945,46 +872,6 @@ LEAF(switch_trampoline, 0)
jmp zero, (pv)
END(switch_trampoline)
-/*
- * switch_exit(struct proc *p)
- * Make a the named process exit. Partially switch to our idle thread
- * (we don't update curproc or restore registers), and jump into the middle
- * of cpu_switch to switch into a few process. The process reaper will
- * free the dead process's VM resources. MUST BE CALLED AT SPLHIGH.
- */
-LEAF(switch_exit, 1)
- LDGP(pv)
-
- /* save the exiting proc pointer */
- mov a0, s2
-
- /* Switch to our idle stack. */
- GET_IDLE_PCB(a0) /* clobbers v0, t0, t8-t11 */
- SWITCH_CONTEXT
-
- /*
- * Now running as idle thread, except for the value of 'curproc' and
- * the saved regs.
- */
-
- /* Schedule the vmspace and stack to be freed. */
- mov s2, a0
- CALL(exit2)
-
-#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
- CALL(sched_lock_idle) /* acquire sched_lock */
-#endif
-
- /*
- * Now jump back into the middle of cpu_switch(). Note that
- * we must clear s0 to guarantee that the check for switching
- * to ourselves in cpu_switch() will fail. This is safe since
- * s0 will be restored when a new process is resumed.
- */
- mov zero, s0
- jmp zero, cpu_switch_queuescan
- END(switch_exit)
-
/**************************************************************************/
/*
diff --git a/sys/arch/alpha/alpha/machdep.c b/sys/arch/alpha/alpha/machdep.c
index ce9d237b8d8..4f7836c9de1 100644
--- a/sys/arch/alpha/alpha/machdep.c
+++ b/sys/arch/alpha/alpha/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.112 2007/09/15 10:10:37 martin Exp $ */
+/* $OpenBSD: machdep.c,v 1.113 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */
/*-
@@ -1874,66 +1874,6 @@ spl0()
}
/*
- * The following primitives manipulate the run queues. _whichqs tells which
- * of the 32 queues _qs have processes in them. Setrunqueue puts processes
- * into queues, Remrunqueue removes them from queues. The running process is
- * on no queue, other processes are on a queue related to p->p_priority,
- * divided by 4 actually to shrink the 0-127 range of priorities into the 32
- * available queues.
- */
-/*
- * setrunqueue(p)
- * proc *p;
- *
- * Call should be made at splclock(), and p->p_stat should be SRUN.
- */
-
-/* XXXART - grmble */
-#define sched_qs qs
-#define sched_whichqs whichqs
-
-void
-setrunqueue(p)
- struct proc *p;
-{
- int bit;
-
- /* firewall: p->p_back must be NULL */
- if (p->p_back != NULL)
- panic("setrunqueue");
-
- bit = p->p_priority >> 2;
- sched_whichqs |= (1 << bit);
- p->p_forw = (struct proc *)&sched_qs[bit];
- p->p_back = sched_qs[bit].ph_rlink;
- p->p_back->p_forw = p;
- sched_qs[bit].ph_rlink = p;
-}
-
-/*
- * remrunqueue(p)
- *
- * Call should be made at splclock().
- */
-void
-remrunqueue(p)
- struct proc *p;
-{
- int bit;
-
- bit = p->p_priority >> 2;
- if ((sched_whichqs & (1 << bit)) == 0)
- panic("remrunqueue");
-
- p->p_back->p_forw = p->p_forw;
- p->p_forw->p_back = p->p_back;
- p->p_back = NULL; /* for firewall checking. */
-
- if ((struct proc *)&sched_qs[bit] == sched_qs[bit].ph_link)
- sched_whichqs &= ~(1 << bit);
-}
-
-/*
* Wait "n" microseconds.
*/
void
diff --git a/sys/arch/alpha/alpha/vm_machdep.c b/sys/arch/alpha/alpha/vm_machdep.c
index 16191729b90..eda0437fb46 100644
--- a/sys/arch/alpha/alpha/vm_machdep.c
+++ b/sys/arch/alpha/alpha/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.34 2007/09/03 01:24:22 krw Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.35 2007/10/10 15:53:51 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.55 2000/03/29 03:49:48 simonb Exp $ */
/*
@@ -114,9 +114,7 @@ cpu_exit(p)
* vmspace's context until the switch to proc0 in switch_exit().
*/
pmap_deactivate(p);
-
- (void) splhigh();
- switch_exit(p);
+ sched_exit(p);
/* NOTREACHED */
}