summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2004-06-26 05:29:18 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2004-06-26 05:29:18 +0000
commit1b78c9699ad914864d387a652e35e477b232297d (patch)
treee9b31cc1be0b063cc48505b9213a73777c2a3e4d /sys/arch/amd64
parent2848f93ab2b2681cd23db65b1134e040761b7d76 (diff)
deinline splraise, spllower and setsoftint.
Makes the kernel smaller and faster. deraadt@ ok
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/intr.c56
-rw-r--r--sys/arch/amd64/include/intr.h64
2 files changed, 59 insertions, 61 deletions
diff --git a/sys/arch/amd64/amd64/intr.c b/sys/arch/amd64/amd64/intr.c
index f5f929cbbd2..d3bbfa778dc 100644
--- a/sys/arch/amd64/amd64/intr.c
+++ b/sys/arch/amd64/amd64/intr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intr.c,v 1.2 2004/06/25 11:03:27 art Exp $ */
+/* $OpenBSD: intr.c,v 1.3 2004/06/26 05:29:17 art Exp $ */
/* $NetBSD: intr.c,v 1.3 2003/03/03 22:16:20 fvdl Exp $ */
/*
@@ -705,3 +705,57 @@ intr_printconfig(void)
}
#endif
}
+
+/*
+ * Add a mask to cpl, and return the old value of cpl.
+ */
+int
+splraise(int nlevel)
+{
+ int olevel;
+ struct cpu_info *ci = curcpu();
+
+ olevel = ci->ci_ilevel;
+ if (nlevel > olevel)
+ ci->ci_ilevel = nlevel;
+ return (olevel);
+}
+
+/*
+ * Restore a value to cpl (unmasking interrupts). If any unmasked
+ * interrupts are pending, call Xspllower() to process them.
+ */
+int
+spllower(int nlevel)
+{
+ int olevel;
+ struct cpu_info *ci = curcpu();
+
+ /*
+ * Since this should only lower the interrupt level,
+ * the XOR below should only show interrupts that
+ * are being unmasked.
+ */
+ olevel = ci->ci_ilevel;
+ if (ci->ci_ipending & IUNMASK(ci,nlevel))
+ Xspllower(nlevel);
+ else
+ ci->ci_ilevel = nlevel;
+ return (olevel);
+}
+
+/*
+ * Software interrupt registration
+ *
+ * We hand-code this to ensure that it's atomic.
+ *
+ * XXX always scheduled on the current CPU.
+ */
+void
+softintr(int sir)
+{
+ struct cpu_info *ci = curcpu();
+
+ __asm __volatile("lock ; orl %1, %0" :
+ "=m"(ci->ci_ipending) : "ir" (1 << sir));
+}
diff --git a/sys/arch/amd64/include/intr.h b/sys/arch/amd64/include/intr.h
index f468f1a0b27..69d6247e700 100644
--- a/sys/arch/amd64/include/intr.h
+++ b/sys/arch/amd64/include/intr.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: intr.h,v 1.3 2004/06/25 11:03:28 art Exp $ */
+/* $OpenBSD: intr.h,v 1.4 2004/06/26 05:29:17 art Exp $ */
/* $NetBSD: intr.h,v 1.2 2003/05/04 22:01:56 fvdl Exp $ */
/*-
@@ -111,9 +111,9 @@ struct intrhand {
extern void Xspllower(int);
-static __inline int splraise(int);
-static __inline int spllower(int);
-static __inline void softintr(int);
+int splraise(int);
+int spllower(int);
+void softintr(int);
/*
* Convert spl level to local APIC level
@@ -131,46 +131,6 @@ static __inline void softintr(int);
#define __splbarrier() __asm __volatile("":::"memory")
/*
- * Add a mask to cpl, and return the old value of cpl.
- */
-static __inline int
-splraise(int nlevel)
-{
- int olevel;
- struct cpu_info *ci = curcpu();
-
- olevel = ci->ci_ilevel;
- if (nlevel > olevel)
- ci->ci_ilevel = nlevel;
- __splbarrier();
- return (olevel);
-}
-
-/*
- * Restore a value to cpl (unmasking interrupts). If any unmasked
- * interrupts are pending, call Xspllower() to process them.
- */
-static __inline int
-spllower(int nlevel)
-{
- int olevel;
- struct cpu_info *ci = curcpu();
-
- __splbarrier();
- /*
- * Since this should only lower the interrupt level,
- * the XOR below should only show interrupts that
- * are being unmasked.
- */
- olevel = ci->ci_ilevel;
- if (ci->ci_ipending & IUNMASK(ci,nlevel))
- Xspllower(nlevel);
- else
- ci->ci_ilevel = nlevel;
- return (olevel);
-}
-
-/*
* Hardware interrupt masks
*/
#define splbio() splraise(IPL_BIO)
@@ -228,22 +188,6 @@ void splassert_check(int, const char *);
#endif
/*
- * Software interrupt registration
- *
- * We hand-code this to ensure that it's atomic.
- *
- * XXX always scheduled on the current CPU.
- */
-static __inline void
-softintr(int sir)
-{
- struct cpu_info *ci = curcpu();
-
- __asm __volatile("lock ; orl %1, %0" :
- "=m"(ci->ci_ipending) : "ir" (1 << sir));
-}
-
-/*
* XXX
*/
#define setsoftnet() softintr(SIR_NET)