diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2007-10-10 15:53:54 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2007-10-10 15:53:54 +0000 |
commit | e51062c8cca21a333603b567563e3b84f74ddac0 (patch) | |
tree | dccf12b7d5ef806260203fe60b2bcaf94260c651 /sys/arch/arm | |
parent | 34c540de32da6090afdcdd6fee481f9a2df345fd (diff) |
Make context switching much more MI:
- Move the functionality of choosing a process from cpu_switch into
a much simpler function: cpu_switchto. Instead of having the locore
code walk the run queues, let the MI code choose the process we
want to run and only implement the context switching itself in MD
code.
- Let MD context switching run without worrying about spls or locks.
- Instead of having the idle loop implemented with special contexts
in MD code, implement one idle proc for each cpu. make the idle
loop MI with MD hooks.
- Change the proc lists from the old style vax queues to TAILQs.
- Change the sleep queue from vax queues to TAILQs. This makes
wakeup() go from O(n^2) to O(n)
there will be some MD fallout, but it will be fixed shortly.
There's also a few cleanups to be done after this.
deraadt@, kettenis@ ok
Diffstat (limited to 'sys/arch/arm')
-rw-r--r-- | sys/arch/arm/arm/cpuswitch.S | 697 | ||||
-rw-r--r-- | sys/arch/arm/arm/genassym.cf | 8 | ||||
-rw-r--r-- | sys/arch/arm/arm/pmap.c | 9 | ||||
-rw-r--r-- | sys/arch/arm/arm/vm_machdep.c | 42 | ||||
-rw-r--r-- | sys/arch/arm/include/pmap.h | 9 |
5 files changed, 76 insertions, 689 deletions
diff --git a/sys/arch/arm/arm/cpuswitch.S b/sys/arch/arm/arm/cpuswitch.S index d94219d88c0..544f7c27243 100644 --- a/sys/arch/arm/arm/cpuswitch.S +++ b/sys/arch/arm/arm/cpuswitch.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cpuswitch.S,v 1.7 2007/05/14 07:07:09 art Exp $ */ +/* $OpenBSD: cpuswitch.S,v 1.8 2007/10/10 15:53:51 art Exp $ */ /* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ /* @@ -123,142 +123,11 @@ .text -.Lwhichqs: - .word _C_LABEL(whichqs) - -.Lqs: - .word _C_LABEL(qs) - -/* - * On entry - * r0 = process - */ - -ENTRY(setrunqueue) - /* - * Local register usage - * r0 = process - * r1 = queue - * r2 = &qs[queue] and temp - * r3 = temp - * r12 = whichqs - */ -#ifdef DIAGNOSTIC - ldr r1, [r0, #(P_BACK)] - teq r1, #0x00000000 - bne Lsetrunqueue_erg - - ldr r1, [r0, #(P_WCHAN)] - teq r1, #0x00000000 - bne Lsetrunqueue_erg -#endif - - /* Get the priority of the queue */ - ldrb r1, [r0, #(P_PRIORITY)] - mov r1, r1, lsr #2 - - /* Indicate that there is a process on this queue */ - ldr r12, .Lwhichqs - ldr r2, [r12] - mov r3, #0x00000001 - mov r3, r3, lsl r1 - orr r2, r2, r3 - str r2, [r12] - - /* Get the address of the queue */ - ldr r2, .Lqs - add r1, r2, r1, lsl # 3 - - /* Hook the process in */ - str r1, [r0, #(P_FORW)] - ldr r2, [r1, #(P_BACK)] - - str r0, [r1, #(P_BACK)] -#ifdef DIAGNOSTIC - teq r2, #0x00000000 - beq Lsetrunqueue_erg -#endif - str r0, [r2, #(P_FORW)] - str r2, [r0, #(P_BACK)] - - mov pc, lr - -#ifdef DIAGNOSTIC -Lsetrunqueue_erg: - mov r2, r1 - mov r1, r0 - add r0, pc, #Ltext1 - . - 8 - bl _C_LABEL(printf) - - ldr r2, .Lqs - ldr r1, [r2] - add r0, pc, #Ltext2 - . - 8 - b _C_LABEL(panic) - -Ltext1: - .asciz "setrunqueue : %08x %08x\n" -Ltext2: - .asciz "setrunqueue : [qs]=%08x qs=%08x\n" - .align 0 -#endif - -/* - * On entry - * r0 = process - */ - -ENTRY(remrunqueue) - /* - * Local register usage - * r0 = oldproc - * r1 = queue - * r2 = &qs[queue] and scratch - * r3 = scratch - * r12 = whichqs - */ - - /* Get the priority of the queue */ - ldrb r1, [r0, #(P_PRIORITY)] - mov r1, r1, lsr #2 - - /* Unhook the process */ - ldr r2, [r0, #(P_FORW)] - ldr r3, [r0, #(P_BACK)] - - str r3, [r2, #(P_BACK)] - str r2, [r3, #(P_FORW)] - - /* If the queue is now empty clear the queue not empty flag */ - teq r2, r3 - - /* This could be reworked to avoid the use of r4 */ - ldreq r12, .Lwhichqs - ldreq r2, [r12] - moveq r3, #0x00000001 - moveq r3, r3, lsl r1 - biceq r2, r2, r3 - streq r2, [r12] - - /* Remove the back pointer for the process */ - mov r1, #0x00000000 - str r1, [r0, #(P_BACK)] - - mov pc, lr - - -/* - * cpuswitch() - * - * preforms a process context switch. - * This function has several entry points - */ - .Lcpu_info_store: .word _C_LABEL(cpu_info_store) .Lcurproc: .word _C_LABEL(cpu_info_store) + CI_CURPROC - .Lwant_resched: .word _C_LABEL(want_resched) @@ -289,320 +158,123 @@ _C_LABEL(curpcb): /* * Idle loop, exercised while waiting for a process to wake up. - * - * NOTE: When we jump back to .Lswitch_search, we must have a - * pointer to whichqs in r7, which is what it is when we arrive - * here. */ -/* LINTSTUB: Ignore */ -ASENTRY_NP(idle) - ldr r6, .Lcpu_do_powersave - IRQenable /* Enable interrupts */ - ldr r6, [r6] /* r6 = cpu_do_powersave */ +ENTRY(cpu_idle_enter) + stmfd sp!, {lr} -#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) - bl _C_LABEL(sched_unlock_idle) -#endif + IRQenable /* Enable interrupts */ /* Drop to spl0 (returns the current spl level in r0). */ mov r0, #(IPL_NONE) bl _C_LABEL(_spllower) + ldmfd sp!, {pc} + +ENTRY(cpu_idle_cycle) + stmfd sp!, {r6, lr} + + ldr r6, .Lcpu_do_powersave + ldr r6, [r6] /* r6 = cpu_do_powersave */ + teq r6, #0 /* cpu_do_powersave non zero? */ ldrne r6, .Lcpufuncs - mov r4, r0 /* Old interrupt level to r4 */ ldrne r6, [r6, #(CF_SLEEP)] - /* - * Main idle loop. - * r6 points to power-save idle function if required, else NULL. - */ -1: ldr r3, [r7] /* r3 = sched_whichqs */ - teq r3, #0 - bne 2f /* We have work to do */ teq r6, #0 /* Powersave idle? */ - beq 1b /* Nope. Just sit-n-spin. */ + beq 1f /* Nope. Just continue. */ /* - * Before going into powersave idle mode, disable interrupts - * and check sched_whichqs one more time. + * Before going into powersave idle mode, disable interrupts. */ IRQdisableALL - ldr r3, [r7] - mov r0, #0 - teq r3, #0 /* sched_whichqs still zero? */ - moveq lr, pc - moveq pc, r6 /* If so, do powersave idle */ + mov lr, pc + mov pc, r6 /* If so, do powersave idle */ IRQenableALL - b 1b /* Back around */ - /* - * sched_whichqs indicates that at least one proc is ready to run. - * Restore the original interrupt priority level, grab the - * scheduler lock if necessary, and jump back into cpu_switch. - */ -2: mov r0, r4 -#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) - bl _C_LABEL(splx) - adr lr, .Lswitch_search - b _C_LABEL(sched_lock_idle) -#else - adr lr, .Lswitch_search - b _C_LABEL(splx) -#endif +1: ldmfd sp!, {r6, pc} + +ENTRY(cpu_idle_leave) + stmfd sp!, {lr} + + mov r0, #(IPL_SCHED) + bl _C_LABEL(_splraise) + + ldmfd sp!, {pc} /* - * Find a new lwp to run, save the current context and - * load the new context + * cpu_switchto(struct proc *oldproc, struct proc *newproc) + * + * Performs a process context switch from oldproc (which may be NULL) + * to newproc. * * Arguments: - * r0 'struct proc *' of the current LWP + * r0 'struct proc *' of the context to switch from + * r1 'struct proc *' of the context to switch to */ -ENTRY(cpu_switch) -/* - * Local register usage. Some of these registers are out of date. - * r1 = oldproc - * r2 = spl level - * r3 = whichqs - * r4 = queue - * r5 = &qs[queue] - * r6 = newlwp - * r7 = scratch - */ +ENTRY(cpu_switchto) stmfd sp!, {r4-r7, lr} - /* - * Indicate that there is no longer a valid process (curlwp = 0). - * Zero the current PCB pointer while we're at it. - */ - ldr r7, .Lcurproc - ldr r6, .Lcurpcb - mov r2, #0x00000000 - str r2, [r7] /* curproc = NULL */ - str r2, [r6] /* curpcb = NULL */ - - /* stash the old proc while we call functions */ - mov r5, r0 - - /* First phase : find a new proc */ - ldr r7, .Lwhichqs - - /* rem: r5 = old proc */ - /* rem: r7 = &whichqs */ - -.Lswitch_search: - IRQdisable - - /* Do we have any active queues */ - ldr r3, [r7] - - /* If not we must idle until we do. */ - teq r3, #0x00000000 - beq _ASM_LABEL(idle) - - /* put old proc back in r1 */ - mov r1, r5 - - /* rem: r1 = old proc */ - /* rem: r3 = whichqs */ - /* rem: interrupts are disabled */ - - /* used further down, saves SA stall */ - ldr r6, .Lqs - - /* - * We have found an active queue. Currently we do not know which queue - * is active just that one of them is. - */ - /* Non-Xscale version of the ffs algorithm devised by d.seal and - * posted to comp.sys.arm on 16 Feb 1994. - */ - rsb r5, r3, #0 - ands r0, r3, r5 - -#ifndef __XSCALE__ - adr r5, .Lcpu_switch_ffs_table - - /* X = R0 */ - orr r4, r0, r0, lsl #4 /* r4 = X * 0x11 */ - orr r4, r4, r4, lsl #6 /* r4 = X * 0x451 */ - rsb r4, r4, r4, lsl #16 /* r4 = X * 0x0450fbaf */ - - /* now lookup in table indexed on top 6 bits of a4 */ - ldrb r4, [ r5, r4, lsr #26 ] - -#else /* __XSCALE__ */ - clz r4, r0 - rsb r4, r4, #31 -#endif /* __XSCALE__ */ - - /* rem: r0 = bit mask of chosen queue (1 << r4) */ - /* rem: r1 = old proc */ - /* rem: r3 = whichqs */ - /* rem: r4 = queue number */ - /* rem: interrupts are disabled */ - - /* Get the address of the queue (&qs[queue]) */ - add r5, r6, r4, lsl #3 - - /* - * Get the proc from the queue and place the next process in - * the queue at the head. This basically unlinks the lwp at - * the head of the queue. - */ - ldr r6, [r5, #(P_FORW)] - -#ifdef DIAGNOSTIC - cmp r6, r5 - beq .Lswitch_bogons -#endif - - /* rem: r6 = new proc */ - ldr r7, [r6, #(P_FORW)] - str r7, [r5, #(P_FORW)] - - /* - * Test to see if the queue is now empty. If the head of the queue - * points to the queue itself then there are no more procs in - * the queue. We can therefore clear the queue not empty flag held - * in r3. - */ - - teq r5, r7 - biceq r3, r3, r0 - - /* rem: r0 = bit mask of chosen queue (1 << r4) - NOT NEEDED AN MORE */ - - /* Fix the back pointer for the lwp now at the head of the queue. */ - ldr r0, [r6, #(P_BACK)] - str r0, [r7, #(P_BACK)] - - /* Update the RAM copy of the queue not empty flags word. */ - ldreq r7, .Lwhichqs - streq r3, [r7] - - /* rem: r1 = old proc */ - /* rem: r3 = whichqs - NOT NEEDED ANY MORE */ - /* rem: r4 = queue number - NOT NEEDED ANY MORE */ - /* rem: r6 = new proc */ - /* rem: interrupts are disabled */ - /* Clear the want_resched flag */ ldr r7, .Lwant_resched - mov r0, #0x00000000 - str r0, [r7] - - /* - * Clear the back pointer of the proc we have removed from - * the head of the queue. The new proc is isolated now. - */ - str r0, [r6, #(P_BACK)] - -#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) - /* - * unlock the sched_lock, but leave interrupts off, for now. - */ - mov r7, r1 - bl _C_LABEL(sched_unlock_idle) - mov r1, r7 -#endif - - -.Lswitch_resume: - /* rem: r1 = old proc */ - /* rem: r4 = return value [not used if came from cpu_switchto()] */ - /* rem: r6 = new process */ - /* rem: interrupts are disabled */ + mov r2, #0x00000000 + str r2, [r7] #ifdef MULTIPROCESSOR /* XXX use curcpu() */ - ldr r0, .Lcpu_info_store - str r0, [r6, #(P_CPU)] + ldr r2, .Lcpu_info_store + str r2, [r1, #(P_CPU)] #else - /* l->l_cpu initialized in fork1() for single-processor */ + /* p->p_cpu initialized in fork1() for single-processor */ #endif /* Process is now on a processor. */ - mov r0, #SONPROC /* p->p_stat = SONPROC */ - strb r0, [r6, #(P_STAT)] + mov r2, #SONPROC /* p->p_stat = SONPROC */ + strb r2, [r1, #(P_STAT)] /* We have a new curproc now so make a note it */ ldr r7, .Lcurproc - str r6, [r7] + str r1, [r7] /* Hook in a new pcb */ ldr r7, .Lcurpcb - ldr r0, [r6, #(P_ADDR)] - str r0, [r7] - - /* At this point we can allow IRQ's again. */ - IRQenable - - /* rem: r1 = old proc */ - /* rem: r4 = return value */ - /* rem: r6 = new process */ - /* rem: interrupts are enabled */ - - /* - * If the new process is the same as the process that called - * cpu_switch() then we do not need to save and restore any - * contexts. This means we can make a quick exit. - * The test is simple if curproc on entry (now in r1) is the - * same as the proc removed from the queue we can jump to the exit. - */ - teq r1, r6 - moveq r4, #0x00000000 /* default to "didn't switch" */ - beq .Lswitch_return - - /* - * At this point, we are guaranteed to be switching to - * a new proc. - */ - mov r4, #0x00000001 - - /* Remember the old proc in r0 */ - mov r0, r1 + ldr r6, [r7] /* Remember the old PCB */ + ldr r2, [r1, #(P_ADDR)] + str r2, [r7] /* * If the old proc on entry to cpu_switch was zero then the * process that called it was exiting. This means that we do - * not need to save the current context. Instead we can jump - * straight to restoring the context for the new process. + * not need to save the current context (we nevertheless need + * to clear the cache and TLB). */ teq r0, #0x00000000 beq .Lswitch_exited - /* rem: r0 = old proc */ - /* rem: r4 = return value */ - /* rem: r6 = new process */ - /* rem: interrupts are enabled */ - /* Stage two : Save old context */ - /* Get the user structure for the old proc. */ - ldr r1, [r0, #(P_ADDR)] - /* Save all the registers in the old proc's pcb */ #ifndef __XSCALE__ - add r7, r1, #(PCB_R8) + add r7, r6, #(PCB_R8) stmia r7, {r8-r13} #else - strd r8, [r1, #(PCB_R8)] - strd r10, [r1, #(PCB_R10)] - strd r12, [r1, #(PCB_R12)] + strd r8, [r6, #(PCB_R8)] + strd r10, [r6, #(PCB_R10)] + strd r12, [r6, #(PCB_R12)] #endif +.Lswitch_exited: /* * NOTE: We can now use r8-r13 until it is time to restore * them for the new process. */ /* Remember the old PCB. */ - mov r8, r1 + mov r8, r6 - /* r1 now free! */ + /* Save new proc in r6 now. */ + mov r6, r1 /* Get the user structure for the new process in r9 */ ldr r9, [r6, #(P_ADDR)] @@ -616,28 +288,24 @@ ENTRY(cpu_switch) orr r2, r2, #(PSR_UND32_MODE | I32_bit) msr cpsr_c, r2 +#ifdef notworthit + teq r0, #0x00000000 + strne sp, [r8, #(PCB_UND_SP)] +#else str sp, [r8, #(PCB_UND_SP)] +#endif msr cpsr_c, r3 /* Restore the old mode */ /* rem: r0 = old proc */ - /* rem: r4 = return value */ - /* rem: r6 = new process */ + /* rem: r1 = r6 = new process */ /* rem: r8 = old PCB */ /* rem: r9 = new PCB */ - /* rem: interrupts are enabled */ /* What else needs to be saved Only FPA stuff when that is supported */ /* Third phase : restore saved context */ - /* rem: r0 = old proc */ - /* rem: r4 = return value */ - /* rem: r6 = new proc */ - /* rem: r8 = old PCB */ - /* rem: r9 = new PCB */ - /* rem: interrupts are enabled */ - /* * Get the new L1 table pointer into r11. If we're switching to * an LWP with the same address space as the outgoing one, we can @@ -652,7 +320,7 @@ ENTRY(cpu_switch) ldr r0, [r8, #(PCB_DACR)] /* r0 = old DACR */ ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */ ldr r8, [r9, #(PCB_CSTATE)] /* r8 = &new_pmap->pm_cstate */ - ldr r5, .Llast_cache_state_ptr /* Previous thread's cstate */ + ldr r5, .Llast_cache_state_ptr /* Previous proc's cstate */ teq r10, r11 /* Same L1? */ ldr r5, [r5] @@ -681,7 +349,7 @@ ENTRY(cpu_switch) beq .Lcs_cache_purge_skipped /* VM space is not in cache */ /* - * Definately need to flush the cache. + * Definitely need to flush the cache. * Mark the old VM space as NOT being resident in the cache. */ mov r2, #0x00000000 @@ -696,7 +364,6 @@ ENTRY(cpu_switch) .Lcs_cache_purge_skipped: /* rem: r1 = new DACR */ - /* rem: r4 = return value */ /* rem: r5 = &old_pmap->pm_cstate (or NULL) */ /* rem: r6 = new proc */ /* rem: r8 = &new_pmap->pm_cstate */ @@ -787,7 +454,6 @@ ENTRY(cpu_switch) cmp r5, r8 strne r8, [r0] - /* rem: r4 = return value */ /* rem: r6 = new proc */ /* rem: r9 = new PCB */ @@ -820,14 +486,6 @@ ENTRY(cpu_switch) ldr r13, [r7, #(PCB_SP)] #endif -#if 0 - ldr r5, [r6, #(L_PROC)] /* fetch the proc for below */ -#else - mov r5, r6 -#endif - - /* rem: r4 = return value */ - /* rem: r5 = new proc's proc */ /* rem: r6 = new proc */ /* rem: r7 = new pcb */ @@ -840,234 +498,16 @@ ENTRY(cpu_switch) /* We can enable interrupts again */ IRQenableALL - /* rem: r4 = return value */ - /* rem: r5 = new proc's proc */ /* rem: r6 = new proc */ /* rem: r7 = new PCB */ -#if 0 - /* - * Check for restartable atomic sequences (RAS). - */ - - ldr r2, [r5, #(P_RASLIST)] - ldr r1, [r7, #(PCB_TF)] /* r1 = trapframe (used below) */ - teq r2, #0 /* p->p_nras == 0? */ - bne .Lswitch_do_ras /* no, check for one */ -#endif - .Lswitch_return: - /* cpu_switch returns 1 == switched, 0 == didn't switch */ - mov r0, r4 - /* * Pull the registers that got pushed when either savectx() or * cpu_switch() was called and return. */ ldmfd sp!, {r4-r7, pc} -#if 0 -.Lswitch_do_ras: - ldr r1, [r1, #(TF_PC)] /* second ras_lookup() arg */ - mov r0, r5 /* first ras_lookup() arg */ - bl _C_LABEL(ras_lookup) - cmn r0, #1 /* -1 means "not in a RAS" */ - ldrne r1, [r7, #(PCB_TF)] - strne r0, [r1, #(TF_PC)] - b .Lswitch_return -#endif - -.Lswitch_exited: - /* - * We skip the cache purge because switch_exit() already did it. - * Load up registers the way .Lcs_cache_purge_skipped expects. - * Userpsace access already blocked by switch_exit(). - */ - ldr r9, [r6, #(P_ADDR)] /* r9 = new PCB */ - mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ - mov r5, #0 /* No previous cache state */ - ldr r1, [r9, #(PCB_DACR)] /* r1 = new DACR */ - ldr r8, [r9, #(PCB_CSTATE)] /* r8 = new cache state */ - ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ - b .Lcs_cache_purge_skipped - - -#ifdef DIAGNOSTIC -.Lswitch_bogons: - adr r0, .Lswitch_panic_str - bl _C_LABEL(panic) -1: nop - b 1b - -.Lswitch_panic_str: - .asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n" -#endif - -/* - * cpu_switchto(struct proc *current, struct proc *next) - * Switch to the specified next LWP - * Arguments: - * - * r0 'struct proc *' of the current LWP - * r1 'struct proc *' of the LWP to switch to - */ -ENTRY(cpu_switchto) - stmfd sp!, {r4-r7, lr} - - mov r6, r1 /* save new proc */ - -#if defined(LOCKDEBUG) - mov r5, r0 /* save old proc */ - bl _C_LABEL(sched_unlock_idle) - mov r1, r5 -#else - mov r1, r0 -#endif - - IRQdisable - - /* - * Okay, set up registers the way cpu_switch() wants them, - * and jump into the middle of it (where we bring up the - * new process). - * - * r1 = old proc (r6 = new proc) - */ - b .Lswitch_resume - -/* - * void switch_exit(struct proc *l, struct proc *l0, - * void (*exit)(struct proc *)); - * Switch to proc0's saved context and deallocate the address space and kernel - * stack for l. Then jump into cpu_switch(), as if we were in proc0 all along. - */ - -/* LINTSTUB: Func: void switch_exit(struct proc *l, struct proc *l0, - void (*func)(struct proc *)) */ -ENTRY(switch_exit) - /* - * The process is going away, so we can use callee-saved - * registers here without having to save them. - */ - - mov r4, r0 - ldr r0, .Lcurproc - - mov r5, r1 - mov r6, r2 - - /* - * r4 = proc - * r5 = proc0 - * r6 = exit func - */ - - mov r2, #0x00000000 /* curproc = NULL */ - str r2, [r0] - - /* - * We're about to clear both the cache and the TLB. - * Make sure to zap the 'last cache state' pointer since the - * pmap might be about to go away. Also ensure the outgoing - * VM space's cache state is marked as NOT resident in the - * cache, and that proc0's cache state IS resident. - */ - ldr r7, [r4, #(P_ADDR)] /* r7 = old proc's PCB */ - ldr r0, .Llast_cache_state_ptr /* Last userland cache state */ - ldr r9, [r7, #(PCB_CSTATE)] /* Fetch cache state pointer */ - ldr r3, [r5, #(P_ADDR)] /* r3 = proc0's PCB */ - str r2, [r0] /* No previous cache state */ - str r2, [r9, #(CS_ALL)] /* Zap old proc's cache state */ - ldr r3, [r3, #(PCB_CSTATE)] /* proc0's cache state */ - mov r2, #-1 - str r2, [r3, #(CS_ALL)] /* proc0 is in da cache! */ - - /* Switch to proc0 context */ - - ldr r9, .Lcpufuncs - mov lr, pc - ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] - - ldr r0, [r7, #(PCB_PL1VEC)] - ldr r1, [r7, #(PCB_DACR)] - - /* - * r0 = Pointer to L1 slot for vector_page (or NULL) - * r1 = proc0's DACR - * r4 = proc we're switching from - * r5 = proc0 - * r6 = exit func - * r7 = proc0's PCB - * r9 = cpufuncs - */ - - IRQdisableALL - - /* - * Ensure the vector table is accessible by fixing up proc0's L1 - */ - cmp r0, #0 /* No need to fixup vector table? */ - ldrne r3, [r0] /* But if yes, fetch current value */ - ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */ - mcr p15, 0, r1, c3, c0, 0 /* Update DACR for proc0's context */ - cmpne r3, r2 /* Stuffing the same value? */ - strne r2, [r0] /* Store if not. */ - -#ifdef PMAP_INCLUDE_PTE_SYNC - /* - * Need to sync the cache to make sure that last store is - * visible to the MMU. - */ - movne r1, #4 - movne lr, pc - ldrne pc, [r9, #CF_DCACHE_WB_RANGE] -#endif /* PMAP_INCLUDE_PTE_SYNC */ - - /* - * Note: We don't do the same optimisation as cpu_switch() with - * respect to avoiding flushing the TLB if we're switching to - * the same L1 since this process' VM space may be about to go - * away, so we don't want *any* turds left in the TLB. - */ - - /* Switch the memory to the new process */ - ldr r0, [r7, #(PCB_PAGEDIR)] - mov lr, pc - ldr pc, [r9, #CF_CONTEXT_SWITCH] - - ldr r0, .Lcurpcb - - /* Restore all the save registers */ -#ifndef __XSCALE__ - add r1, r7, #PCB_R8 - ldmia r1, {r8-r13} -#else - ldr r8, [r7, #(PCB_R8)] - ldr r9, [r7, #(PCB_R9)] - ldr r10, [r7, #(PCB_R10)] - ldr r11, [r7, #(PCB_R11)] - ldr r12, [r7, #(PCB_R12)] - ldr r13, [r7, #(PCB_SP)] -#endif - str r7, [r0] /* curpcb = proc0's PCB */ - - IRQenableALL - - /* - * Schedule the vmspace and stack to be freed. - */ - mov r0, r4 /* {proc_}exit2(l) */ - mov lr, pc - mov pc, r6 - -#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) - bl _C_LABEL(sched_lock_idle) -#endif - - ldr r7, .Lwhichqs /* r7 = &whichqs */ - mov r5, #0x00000000 /* r5 = old proc = NULL */ - b .Lswitch_search - /* LINTSTUB: Func: void savectx(struct pcb *pcb) */ ENTRY(savectx) /* @@ -1110,18 +550,3 @@ ENTRY(proc_trampoline) PULLFRAME movs pc, lr /* Exit */ - -#ifndef __XSCALE__ - .type .Lcpu_switch_ffs_table, _ASM_TYPE_OBJECT; -.Lcpu_switch_ffs_table: -/* same as ffs table but all nums are -1 from that */ -/* 0 1 2 3 4 5 6 7 */ - .byte 0, 0, 1, 12, 2, 6, 0, 13 /* 0- 7 */ - .byte 3, 0, 7, 0, 0, 0, 0, 14 /* 8-15 */ - .byte 10, 4, 0, 0, 8, 0, 0, 25 /* 16-23 */ - .byte 0, 0, 0, 0, 0, 21, 27, 15 /* 24-31 */ - .byte 31, 11, 5, 0, 0, 0, 0, 0 /* 32-39 */ - .byte 9, 0, 0, 24, 0, 0, 20, 26 /* 40-47 */ - .byte 30, 0, 0, 0, 0, 23, 0, 19 /* 48-55 */ - .byte 29, 0, 22, 18, 28, 17, 16, 0 /* 56-63 */ -#endif /* !__XSCALE_ */ diff --git a/sys/arch/arm/arm/genassym.cf b/sys/arch/arm/arm/genassym.cf index 63839e12196..3ffdb707531 100644 --- a/sys/arch/arm/arm/genassym.cf +++ b/sys/arch/arm/arm/genassym.cf @@ -1,4 +1,4 @@ -# $OpenBSD: genassym.cf,v 1.7 2007/05/14 07:07:09 art Exp $ +# $OpenBSD: genassym.cf,v 1.8 2007/10/10 15:53:51 art Exp $ # $NetBSD: genassym.cf,v 1.27 2003/11/04 10:33:16 dsl Exp$ # Copyright (c) 1982, 1990 The Regents of the University of California. @@ -82,11 +82,9 @@ export P_PROFIL export SONPROC struct proc -member p_forw -member p_back member p_addr -member p_priority -member p_wchan +#member p_priority +#member p_wchan member p_stat # XXX use PROC_SIZEOF in new code whenever possible define PROCSIZE sizeof(struct proc) diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c index b19bce07a63..d9c0035b3a8 100644 --- a/sys/arch/arm/arm/pmap.c +++ b/sys/arch/arm/arm/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.13 2007/05/18 14:41:55 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.14 2007/10/10 15:53:51 art Exp $ */ /* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */ /* @@ -3100,11 +3100,6 @@ pmap_activate(struct proc *p) } void -pmap_deactivate(struct proc *p) -{ -} - -void pmap_update(pmap_t pm) { @@ -3211,7 +3206,7 @@ pmap_destroy(pmap_t pm) pmap_update(pm); /* - * Make sure cpu_switch(), et al, DTRT. This is safe to do + * Make sure cpu_switchto(), et al, DTRT. This is safe to do * since this process has no remaining mappings of its own. */ curpcb->pcb_pl1vec = pcb->pcb_pl1vec; diff --git a/sys/arch/arm/arm/vm_machdep.c b/sys/arch/arm/arm/vm_machdep.c index d5e109dc121..a3b4115793b 100644 --- a/sys/arch/arm/arm/vm_machdep.c +++ b/sys/arch/arm/arm/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.6 2007/05/27 20:59:25 miod Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.7 2007/10/10 15:53:51 art Exp $ */ /* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */ /* @@ -73,8 +73,6 @@ extern pv_addr_t systempage; int process_read_regs (struct proc *p, struct reg *regs); int process_read_fpregs (struct proc *p, struct fpreg *regs); -void switch_exit (struct proc *p, struct proc *p0, - void (*)(struct proc *)); extern void proc_trampoline (void); /* @@ -85,23 +83,6 @@ extern void proc_trampoline (void); * the amount of stack used. */ -#if 0 -void -cpu_proc_fork(p1, p2) - struct proc *p1, *p2; -{ - -#if defined(PERFCTRS) - if (PMC_ENABLED(p1)) - pmc_md_fork(p1, p2); - else { - p2->p_md.pmc_enabled = 0; - p2->p_md.pmc_state = NULL; - } -#endif -} -#endif - /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb and trap frame, making the child ready to run. @@ -201,27 +182,14 @@ cpu_fork(p1, p2, stack, stacksize, func, arg) pcb->pcb_un.un_32.pcb32_sp = (u_int)sf; } -#if 0 -void -cpu_setfunc(struct proc *p, void (*func)(void *), void *arg) -{ - struct pcb *pcb = &p->p_addr->u_pcb; - struct trapframe *tf = pcb->pcb_tf; - struct switchframe *sf = (struct switchframe *)tf - 1; - - sf->sf_r4 = (u_int)func; - sf->sf_r5 = (u_int)arg; - sf->sf_pc = (u_int)proc_trampoline; - pcb->pcb_un.un_32.pcb32_sp = (u_int)sf; -} -#endif - - void cpu_exit(struct proc *p) { +#if 0 pmap_update(p->p_vmspace->vm_map.pmap); /* XXX DSR help stability */ - switch_exit(p, &proc0, exit2); +#endif + pmap_deactivate(p); + sched_exit(p); } /* diff --git a/sys/arch/arm/include/pmap.h b/sys/arch/arm/include/pmap.h index 847374df0ae..2e5b139324d 100644 --- a/sys/arch/arm/include/pmap.h +++ b/sys/arch/arm/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.8 2007/09/10 18:49:44 miod Exp $ */ +/* $OpenBSD: pmap.h,v 1.9 2007/10/10 15:53:51 art Exp $ */ /* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */ /* @@ -241,12 +241,13 @@ extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */ #define pmap_is_referenced(pg) \ (((pg)->mdpage.pvh_attrs & PVF_REF) != 0) -#define pmap_copy(dp, sp, da, l, sa) /* nothing */ +#define pmap_deactivate(p) do { /* nothing */ } while (0) +#define pmap_copy(dp, sp, da, l, sa) do { /* nothing */ } while (0) #define pmap_phys_address(ppn) (ptoa(ppn)) -#define pmap_proc_iflush(p, va, len) /* nothing */ -#define pmap_unuse_final(p) /* nothing */ +#define pmap_proc_iflush(p, va, len) do { /* nothing */ } while (0) +#define pmap_unuse_final(p) do { /* nothing */ } while (0) #define pmap_remove_holes(map) do { /* nothing */ } while (0) /* |