diff options
author | Niklas Hallqvist <niklas@cvs.openbsd.org> | 1996-05-07 10:22:46 +0000 |
---|---|---|
committer | Niklas Hallqvist <niklas@cvs.openbsd.org> | 1996-05-07 10:22:46 +0000 |
commit | 4ef6dd8662d0ea2661549033419660930d63094a (patch) | |
tree | 4ebcf8b1ecc7a9872b29dd9bf5145a6276f3826a /sys | |
parent | 38ae05158636df94346dcf238261111d4b783775 (diff) |
Restructure and document a bit. Also fixed a confusing bug, which
actually might turn out a feature... :-) But I want to test that a
bit more before having that into the tree.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/amiga/include/psl.h | 89 |
1 files changed, 60 insertions, 29 deletions
diff --git a/sys/arch/amiga/include/psl.h b/sys/arch/amiga/include/psl.h index 708b63887e8..16ff7fbaa84 100644 --- a/sys/arch/amiga/include/psl.h +++ b/sys/arch/amiga/include/psl.h @@ -1,4 +1,4 @@ -/* $OpenBSD: psl.h,v 1.5 1996/05/02 07:33:43 niklas Exp $ */ +/* $OpenBSD: psl.h,v 1.6 1996/05/07 10:22:45 niklas Exp $ */ /* $NetBSD: psl.h,v 1.8 1996/04/21 21:13:22 veego Exp $ */ #ifndef _MACHINE_PSL_H_ @@ -14,60 +14,85 @@ splraise(npsl) { register int opsl; + __asm __volatile ("clrl %0; movew sr,%0" : "&=d" (opsl) : : "cc"); + if (npsl > (opsl & (PSL_S|PSL_IPL))) + __asm __volatile ("movew %0,sr" : : "di" (npsl) : "cc"); + return opsl; +} + +static __inline int +splexact(npsl) + register int npsl; +{ + register int opsl; + __asm __volatile ("clrl %0; movew sr,%0; movew %1,sr" : "&=d" (opsl) : - "di" (npsl)); + "di" (npsl) : "cc"); return opsl; } -#ifdef IPL_REMAP_1 +#if !defined(IPL_REMAP_1) && !defined(IPL_REMAP_2) +static __inline void +splx(npsl) + register int npsl; +{ + __asm __volatile ("movew %0,sr" : : "di" (npsl) : "cc"); +} +#endif +#ifdef IPL_REMAP_1 extern int isr_exter_ipl; extern void walk_ipls __P((int, int)); -static __inline int +static __inline void splx(npsl) register int npsl; { - register int opsl; - - __asm __volatile ("clrl %0; movew sr,%0" : "=d" (opsl)); +/* + * XXX This is scary as hell. Actually removing this increases performance + * XXX while functionality remains. However fairness of service is altered. + * XXX Potential lower priority services gets serviced before higher ones. + */ if ((isr_exter_ipl << 8) > (npsl & PSL_IPL)) walk_ipls(isr_exter_ipl, npsl); - __asm __volatile("movew %0,sr" : : "di" (npsl)); - return opsl; + __asm __volatile("movew %0,sr" : : "di" (npsl) : "cc"); } #endif -#ifndef IPL_REMAP_2 -#define splx splraise -#else - +#ifdef IPL_REMAP_2 extern int walk_ipls __P((int)); -static __inline int +static __inline void splx(npsl) register int npsl; { + /* We should maybe have a flag telling if this is needed. */ + walk_ipls(npsl); + __asm __volatile("movew %0,sr" : : "di" (npsl) : "cc"); +} +#endif + +static __inline int +spllower(npsl) + register int npsl; +{ register int opsl; - /* We should maybe have a flag telling if this is needed. */ - opsl = walk_ipls(npsl); - __asm __volatile("movew %0,sr" : : "di" (npsl)); + __asm __volatile ("clrl %0; movew sr,%0" : "&=d" (opsl) : : "cc"); + splx(npsl); return opsl; } -#endif - /* - * Shortcuts + * Shortcuts. For enhanced security use splraise instead of splexact. */ -#define spl1() splraise(PSL_S|PSL_IPL1) -#define spl2() splraise(PSL_S|PSL_IPL2) -#define spl3() splraise(PSL_S|PSL_IPL3) -#define spl4() splraise(PSL_S|PSL_IPL4) -#define spl5() splraise(PSL_S|PSL_IPL5) -#define spl6() splraise(PSL_S|PSL_IPL6) -#define spl7() splraise(PSL_S|PSL_IPL7) +#define spl1() splexact(PSL_S|PSL_IPL1) +#define spl2() splexact(PSL_S|PSL_IPL2) +#define spl3() splexact(PSL_S|PSL_IPL3) +#define spl4() splexact(PSL_S|PSL_IPL4) +#define spl5() splexact(PSL_S|PSL_IPL5) +#define spl6() splexact(PSL_S|PSL_IPL6) +#define spl7() splexact(PSL_S|PSL_IPL7) /* * Hardware interrupt masks @@ -89,19 +114,25 @@ splx(npsl) * NOTE: splsoftclock() is used by hardclock() to lower the priority from * clock to softclock before it calls softclock(). */ -#define splsoftclock() splx(PSL_S|PSL_IPL1) +#define splsoftclock() spllower(PSL_S|PSL_IPL1) #define splsoftnet() spl1() #define splsofttty() spl1() /* * Miscellaneous */ + +/* + * When remapping high interrupts down we also pull down splhigh, so that + * the fast internal serial interrupt can get called allover. This is safe + * as this interrupt never goes outside of its own structures. + */ #if defined(LEV6_DEFER) || defined(IPL_REMAP_1) || defined(IPL_REMAP_2) #define splhigh() spl4() #else #define splhigh() spl7() #endif -#define spl0() splx(PSL_S|PSL_IPL0) +#define spl0() spllower(PSL_S|PSL_IPL0) #endif /* KERNEL && !_LOCORE */ #endif /* _MACHINE_PSL_H_ */ |