diff options
author | kstailey <kstailey@cvs.openbsd.org> | 1997-02-14 18:15:28 +0000 |
---|---|---|
committer | kstailey <kstailey@cvs.openbsd.org> | 1997-02-14 18:15:28 +0000 |
commit | dd7443d6c571f753f12953aa9280863884fda76e (patch) | |
tree | ac3df03bc704eb1e911e5555f20da7861c65c534 /sys/arch | |
parent | ec4406dcfdc51482dfa213c22d01bf6a7155c2a2 (diff) |
remove old #define of splx() and update rei funciton in locore.s
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/sun3/include/param.h | 3 | ||||
-rw-r--r-- | sys/arch/sun3/sun3/locore.s | 91 |
2 files changed, 91 insertions, 3 deletions
diff --git a/sys/arch/sun3/include/param.h b/sys/arch/sun3/include/param.h index 266312289d4..58273ce19a2 100644 --- a/sys/arch/sun3/include/param.h +++ b/sys/arch/sun3/include/param.h @@ -1,4 +1,4 @@ -/* $OpenBSD: param.h,v 1.10 1997/02/14 18:01:59 kstailey Exp $ */ +/* $OpenBSD: param.h,v 1.11 1997/02/14 18:15:27 kstailey Exp $ */ /* $NetBSD: param.h,v 1.34 1996/03/04 05:04:40 cgd Exp $ */ /* @@ -178,7 +178,6 @@ #define spl5() _spl(PSL_S|PSL_IPL5) #define spl6() _spl(PSL_S|PSL_IPL6) #define spl7() _spl(PSL_S|PSL_IPL7) -#define splx(x) _spl(x) /* IPL used by soft interrupts: netintr(), softclock() */ #define splsoftclock() spl1() diff --git a/sys/arch/sun3/sun3/locore.s b/sys/arch/sun3/sun3/locore.s index 9ab7e0845fb..813d66c8e81 100644 --- a/sys/arch/sun3/sun3/locore.s +++ b/sys/arch/sun3/sun3/locore.s @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.s,v 1.12 1997/02/14 18:01:58 kstailey Exp $ */ +/* $OpenBSD: locore.s,v 1.13 1997/02/14 18:15:23 kstailey Exp $ */ /* $NetBSD: locore.s,v 1.40 1996/11/06 20:19:54 cgd Exp $ */ /* @@ -696,6 +696,7 @@ _intrcnt: _eintrcnt: .text +#if 0 /* * Emulation of VAX REI instruction. * @@ -755,7 +756,95 @@ Laststkadj: movl sp@,sp | and our SP Ldorte: rte | real return +#else +/* + * Emulation of VAX REI instruction. + * + * This code deals with checking for and servicing ASTs + * (profiling, scheduling) and software interrupts (network, softclock). + * We check for ASTs first, just like the VAX. To avoid excess overhead + * the T_ASTFLT handling code will also check for software interrupts so we + * do not have to do it here. After identifing that we need an AST we + * drop the IPL to allow device interrupts. + * + * This code is complicated by the fact that sendsig may have been called + * necessitating a stack cleanup. + */ + .comm _ssir,1 + .globl _astpending + .globl rei +rei: +#ifdef DIAGNOSTIC + tstl _panicstr | have we paniced? + jne Ldorte | yes, do not make matters worse +#endif + tstl _astpending | AST pending? + jeq Lchksir | no, go check for SIR +Lrei1: + btst #5,sp@ | yes, are we returning to user mode? + jne Lchksir | no, go check for SIR + movw #PSL_LOWIPL,sr | lower SPL + clrl sp@- | stack adjust + moveml #0xFFFF,sp@- | save all registers + movl usp,a1 | including + movl a1,sp@(FR_SP) | the users SP +Lrei2: + clrl sp@- | VA == none + clrl sp@- | code == none + movl #T_ASTFLT,sp@- | type == async system trap + jbsr _trap | go handle it + lea sp@(12),sp | pop value args + movl sp@(FR_SP),a0 | restore user SP + movl a0,usp | from save area + movw sp@(FR_ADJ),d0 | need to adjust stack? + jne Laststkadj | yes, go to it + moveml sp@+,#0x7FFF | no, restore most user regs + addql #8,sp | toss SP and stack adjust + rte | and do real RTE +Laststkadj: + lea sp@(FR_HW),a1 | pointer to HW frame + addql #8,a1 | source pointer + movl a1,a0 | source + addw d0,a0 | + hole size = dest pointer + movl a1@-,a0@- | copy + movl a1@-,a0@- | 8 bytes + movl a0,sp@(FR_SP) | new SSP + moveml sp@+,#0x7FFF | restore user registers + movl sp@,sp | and our SP + rte | and do real RTE +Lchksir: + tstb _ssir | SIR pending? + jeq Ldorte | no, all done + movl d0,sp@- | need a scratch register + movw sp@(4),d0 | get SR + andw #PSL_IPL7,d0 | mask all but IPL + jne Lnosir | came from interrupt, no can do + movl sp@+,d0 | restore scratch register +Lgotsir: + movw #SPL1,sr | prevent others from servicing int + tstb _ssir | too late? + jeq Ldorte | yes, oh well... + clrl sp@- | stack adjust + moveml #0xFFFF,sp@- | save all registers + movl usp,a1 | including + movl a1,sp@(FR_SP) | the users SP +Lsir1: + clrl sp@- | VA == none + clrl sp@- | code == none + movl #T_SSIR,sp@- | type == software interrupt + jbsr _trap | go handle it + lea sp@(12),sp | pop value args + movl sp@(FR_SP),a0 | restore + movl a0,usp | user SP + moveml sp@+,#0x7FFF | and all remaining registers + addql #8,sp | pop SP and stack adjust + rte +Lnosir: + movl sp@+,d0 | restore scratch register +Ldorte: + rte | real return +#endif /* * Initialization is at the beginning of this file, because the * kernel entry point needs to be at zero for compatibility with |