1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
/* $OpenBSD: psl.h,v 1.7 1996/05/31 10:46:29 niklas Exp $ */
/* $NetBSD: psl.h,v 1.8 1996/04/21 21:13:22 veego Exp $ */
#ifndef _MACHINE_PSL_H_
#define _MACHINE_PSL_H_
#include <m68k/psl.h>
#if defined(_KERNEL) && !defined(_LOCORE)
static __inline int splraise __P((int));
static __inline int splexact __P((int));
static __inline void splx __P((int));
static __inline int spllower __P((int));
static __inline int
splraise(npsl)
register int npsl;
{
register int opsl;
__asm __volatile ("clrl %0; movew sr,%0" : "&=d" (opsl) : : "cc");
if (npsl > (opsl & (PSL_S|PSL_IPL)))
__asm __volatile ("movew %0,sr" : : "di" (npsl) : "cc");
return opsl;
}
static __inline int
splexact(npsl)
register int npsl;
{
register int opsl;
__asm __volatile ("clrl %0; movew sr,%0; movew %1,sr" : "&=d" (opsl) :
"di" (npsl) : "cc");
return opsl;
}
#if !defined(IPL_REMAP_1) && !defined(IPL_REMAP_2)
static __inline void
splx(npsl)
register int npsl;
{
__asm __volatile ("movew %0,sr" : : "di" (npsl) : "cc");
}
#endif
#ifdef IPL_REMAP_1
extern int isr_exter_ipl;
extern void walk_ipls __P((int, int));
static __inline void
splx(npsl)
register int npsl;
{
/*
* XXX This is scary as hell. Actually removing this increases performance
* XXX while functionality remains. However fairness of service is altered.
* XXX Potential lower priority services gets serviced before higher ones.
*/
if ((isr_exter_ipl << 8) > (npsl & PSL_IPL))
walk_ipls(isr_exter_ipl, npsl);
__asm __volatile("movew %0,sr" : : "di" (npsl) : "cc");
}
#endif
#ifdef IPL_REMAP_2
extern int walk_ipls __P((int));
static __inline void
splx(npsl)
register int npsl;
{
/* We should maybe have a flag telling if this is needed. */
walk_ipls(npsl);
__asm __volatile("movew %0,sr" : : "di" (npsl) : "cc");
}
#endif
static __inline int
spllower(npsl)
register int npsl;
{
register int opsl;
__asm __volatile ("clrl %0; movew sr,%0" : "&=d" (opsl) : : "cc");
splx(npsl);
return opsl;
}
/*
* Shortcuts. For enhanced security use splraise instead of splexact.
*/
#define spl1() splexact(PSL_S|PSL_IPL1)
#define spl2() splexact(PSL_S|PSL_IPL2)
#define spl3() splexact(PSL_S|PSL_IPL3)
#define spl4() splexact(PSL_S|PSL_IPL4)
#define spl5() splexact(PSL_S|PSL_IPL5)
#define spl6() splexact(PSL_S|PSL_IPL6)
#define spl7() splexact(PSL_S|PSL_IPL7)
/*
* Hardware interrupt masks
*/
#define splbio() spl3()
#define splnet() spl3()
#define spltty() spl4()
#define splimp() spl4()
#if defined(LEV6_DEFER) || defined(IPL_REMAP_1) || defined(IPL_REMAP_2)
#define splclock() spl4()
#else
#define splclock() spl6()
#endif
#define splstatclock() splclock()
/*
* Software interrupt masks
*
* NOTE: splsoftclock() is used by hardclock() to lower the priority from
* clock to softclock before it calls softclock().
*/
#define splsoftclock() spllower(PSL_S|PSL_IPL1)
#define splsoftnet() spl1()
#define splsofttty() spl1()
/*
* Miscellaneous
*/
/*
* When remapping high interrupts down we also pull down splhigh, so that
* the fast internal serial interrupt can get called allover. This is safe
* as this interrupt never goes outside of its own structures.
*/
#if defined(LEV6_DEFER) || defined(IPL_REMAP_1) || defined(IPL_REMAP_2)
#define splhigh() spl4()
#else
#define splhigh() spl7()
#endif
#define spl0() spllower(PSL_S|PSL_IPL0)
#endif /* KERNEL && !_LOCORE */
#endif /* _MACHINE_PSL_H_ */
|