summaryrefslogtreecommitdiff
path: root/sys/arch/arm/include
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2014-11-14 09:56:07 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2014-11-14 09:56:07 +0000
commit86ea9df69d1d8b7c05efb9d16939247a69d9e6ac (patch)
tree7f1f026878efc964493c2bd23623279382a7c92c /sys/arch/arm/include
parentdfee619fa1557639f818be2faec229909d51e52c (diff)
implement the atomic_foo things on arm.
testing and ok jsg@
Diffstat (limited to 'sys/arch/arm/include')
-rw-r--r--sys/arch/arm/include/atomic.h184
1 files changed, 181 insertions, 3 deletions
diff --git a/sys/arch/arm/include/atomic.h b/sys/arch/arm/include/atomic.h
index 83f3a42c6d8..ba80cb085eb 100644
--- a/sys/arch/arm/include/atomic.h
+++ b/sys/arch/arm/include/atomic.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: atomic.h,v 1.9 2014/03/29 18:09:28 guenther Exp $ */
+/* $OpenBSD: atomic.h,v 1.10 2014/11/14 09:56:06 dlg Exp $ */
/* Public Domain */
@@ -7,13 +7,191 @@
#if defined(_KERNEL)
+#include <arm/cpufunc.h>
+
/*
* on pre-v6 arm processors, it is necessary to disable interrupts if
* in the kernel and atomic updates are necessary without full mutexes
+*
+ * eventually it would be interesting to have these functions
+ * support the V6/V7+ atomic instructions ldrex/strex if available
+ * on the CPU.
*/
-void atomic_setbits_int(volatile unsigned int *, unsigned int);
-void atomic_clearbits_int(volatile unsigned int *, unsigned int);
+static inline unsigned int
+_atomic_cas_uint(volatile unsigned int *uip, unsigned int o, unsigned int n)
+{
+ unsigned int cpsr;
+ unsigned int rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip;
+ if (rv == o)
+ *uip = n;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
+
+static inline unsigned int
+_atomic_cas_ulong(volatile unsigned long *uip, unsigned long o, unsigned long n)
+{
+ unsigned int cpsr;
+ unsigned long rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip;
+ if (rv == o)
+ *uip = n;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
+
+static inline void *
+_atomic_cas_ptr(volatile void *uip, void *o, void *n)
+{
+ unsigned int cpsr;
+ void * volatile *uipp = (void * volatile *)uip;
+ void *rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uipp;
+ if (rv == o)
+ *uipp = n;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
+
+static inline unsigned int
+_atomic_swap_uint(volatile unsigned int *uip, unsigned int n)
+{
+ unsigned int cpsr;
+ unsigned int rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip;
+ *uip = n;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_swap_uint(_p, _n) _atomic_swap_uint((_p), (_n))
+
+static inline unsigned long
+_atomic_swap_ulong(volatile unsigned long *uip, unsigned long n)
+{
+ unsigned int cpsr;
+ unsigned long rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip;
+ *uip = n;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_swap_ulong(_p, _n) _atomic_swap_ulong((_p), (_n))
+
+static inline void *
+_atomic_swap_ptr(volatile void *uip, void *n)
+{
+ unsigned int cpsr;
+ void * volatile *uipp = (void * volatile *)uip;
+ void *rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uipp;
+ *uipp = n;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n))
+
+static inline unsigned int
+_atomic_add_int_nv(volatile unsigned int *uip, unsigned int v)
+{
+ unsigned int cpsr;
+ unsigned int rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip + v;
+ *uip = rv;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
+
+static inline unsigned long
+_atomic_add_long_nv(volatile unsigned long *uip, unsigned long v)
+{
+ unsigned int cpsr;
+ unsigned long rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip + v;
+ *uip = rv;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
+
+static inline unsigned int
+_atomic_sub_int_nv(volatile unsigned int *uip, unsigned int v)
+{
+ unsigned int cpsr;
+ unsigned int rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip - v;
+ *uip = rv;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
+
+static inline unsigned long
+_atomic_sub_long_nv(volatile unsigned long *uip, unsigned long v)
+{
+ unsigned int cpsr;
+ unsigned long rv;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ rv = *uip - v;
+ *uip = rv;
+ restore_interrupts(cpsr);
+
+ return (rv);
+}
+#define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
+
+static inline void
+atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
+{
+ unsigned int cpsr;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ *uip |= v;
+ restore_interrupts(cpsr);
+}
+
+static inline void
+atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
+{
+ unsigned int cpsr;
+
+ cpsr = disable_interrupts(I32_bit|F32_bit);
+ *uip &= ~v;
+ restore_interrupts(cpsr);
+}
#endif /* defined(_KERNEL) */
#endif /* _ARM_ATOMIC_H_ */