summaryrefslogtreecommitdiff
path: root/sys/arch/amd64/include/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/amd64/include/atomic.h')
-rw-r--r--sys/arch/amd64/include/atomic.h55
1 files changed, 30 insertions, 25 deletions
diff --git a/sys/arch/amd64/include/atomic.h b/sys/arch/amd64/include/atomic.h
index 693e242c5b6..db184d78122 100644
--- a/sys/arch/amd64/include/atomic.h
+++ b/sys/arch/amd64/include/atomic.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: atomic.h,v 1.4 2007/02/06 17:13:33 art Exp $ */
+/* $OpenBSD: atomic.h,v 1.5 2007/02/19 17:18:42 deraadt Exp $ */
/* $NetBSD: atomic.h,v 1.1 2003/04/26 18:39:37 fvdl Exp $ */
/*
@@ -36,8 +36,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _ATOMIC_H
-#define _ATOMIC_H
+#ifndef _AMD64_ATOMIC_H
+#define _AMD64_ATOMIC_H
/*
* Perform atomic operations on memory. Should be atomic with respect
@@ -47,7 +47,7 @@
* void atomic_clearbits_int(volatile u_int *a, u_int mas) { *a &= ~mask; }
*/
-#ifndef _LOCORE
+#if defined(_KERNEL) && !defined(_LOCORE)
#ifdef MULTIPROCESSOR
#define LOCK "lock"
@@ -56,35 +56,38 @@
#endif
static __inline u_int64_t
-x86_atomic_testset_u64(volatile u_int64_t *ptr, u_int64_t val) {
- __asm__ volatile ("xchgq %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
- return val;
+x86_atomic_testset_u64(volatile u_int64_t *ptr, u_int64_t val)
+{
+ __asm__ volatile ("xchgq %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
+ return val;
}
static __inline u_int32_t
-x86_atomic_testset_u32(volatile u_int32_t *ptr, u_int32_t val) {
- __asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
- return val;
+x86_atomic_testset_u32(volatile u_int32_t *ptr, u_int32_t val)
+{
+ __asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
+ return val;
}
-
-
static __inline int32_t
-x86_atomic_testset_i32(volatile int32_t *ptr, int32_t val) {
- __asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
- return val;
+x86_atomic_testset_i32(volatile int32_t *ptr, int32_t val)
+{
+ __asm__ volatile ("xchgl %0,(%2)" :"=r" (val):"0" (val),"r" (ptr));
+ return val;
}
static __inline void
-x86_atomic_setbits_u32(volatile u_int32_t *ptr, u_int32_t bits) {
- __asm __volatile(LOCK " orl %1,%0" : "=m" (*ptr) : "ir" (bits));
+x86_atomic_setbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
+{
+ __asm __volatile(LOCK " orl %1,%0" : "=m" (*ptr) : "ir" (bits));
}
static __inline void
-x86_atomic_clearbits_u32(volatile u_int32_t *ptr, u_int32_t bits) {
- __asm __volatile(LOCK " andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
+x86_atomic_clearbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
+{
+ __asm __volatile(LOCK " andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
}
@@ -96,13 +99,15 @@ x86_atomic_clearbits_u32(volatile u_int32_t *ptr, u_int32_t bits) {
* is an asm error pending
*/
static __inline void
-x86_atomic_setbits_u64(volatile u_int64_t *ptr, u_int64_t bits) {
- __asm __volatile(LOCK " orq %1,%0" : "=m" (*ptr) : "ir" (bits));
+x86_atomic_setbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
+{
+ __asm __volatile(LOCK " orq %1,%0" : "=m" (*ptr) : "ir" (bits));
}
static __inline void
-x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits) {
- __asm __volatile(LOCK " andq %1,%0" : "=m" (*ptr) : "ir" (~bits));
+x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
+{
+ __asm __volatile(LOCK " andq %1,%0" : "=m" (*ptr) : "ir" (~bits));
}
#define x86_atomic_testset_ul x86_atomic_testset_u32
@@ -117,5 +122,5 @@ x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits) {
#undef LOCK
-#endif
-#endif
+#endif /* defined(_KERNEL) && !defined(_LOCORE) */
+#endif /* _AMD64_ATOMIC_H_ */