summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Leonard <d@cvs.openbsd.org>1998-12-21 07:22:27 +0000
committerDavid Leonard <d@cvs.openbsd.org>1998-12-21 07:22:27 +0000
commit21f0fd70f7fdd97dbfd9a4962cbcded9833a6c2f (patch)
tree96808755b4aaf142df62f4203d5904c9bc05584c
parenta0089315910b3609b141f3e965cc2ac806fcc567 (diff)
unfinished powerpc md stuff.. rahnds?
-rw-r--r--lib/libc_r/arch/powerpc/_atomic_lock.c43
-rw-r--r--lib/libc_r/arch/powerpc/_spinlock.h6
-rw-r--r--lib/libc_r/arch/powerpc/uthread_machdep.h31
-rw-r--r--lib/libpthread/arch/powerpc/_atomic_lock.c43
-rw-r--r--lib/libpthread/arch/powerpc/_spinlock.h6
-rw-r--r--lib/libpthread/arch/powerpc/uthread_machdep.h31
6 files changed, 160 insertions, 0 deletions
diff --git a/lib/libc_r/arch/powerpc/_atomic_lock.c b/lib/libc_r/arch/powerpc/_atomic_lock.c
new file mode 100644
index 00000000000..ab1a5321763
--- /dev/null
+++ b/lib/libc_r/arch/powerpc/_atomic_lock.c
@@ -0,0 +1,43 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/12/21 07:22:26 d Exp $ */
+/*
+ * Atomic lock for powerpc
+ */
+
+#include "spinlock.h"
+
+int
+_atomic_lock(volatile _spinlock_lock_t *lock)
+{
+ _spinlock_lock_t old;
+
+ __asm__("1: lwarx %0,0,%1 \n"
+ " stwcx. %2,0,%1 \n"
+ " bne- 1b \n"
+ : "=r" (old), "=r" (lock)
+ : "r" (_SPINLOCK_LOCKED), "1" (lock)
+ );
+
+ return (old != _SPINLOCK_UNLOCKED);
+
+ /*
+ * Dale <rahnds@openbsd.org> sez:
+ * Side note. to prevent two processes from accessing
+ * the same address with the lwarx in one instrution
+ * and the stwcx in another process, the current powerpc
+ * kernel uses a lwarx instruction without the corresponding
+ * stwcx which effectively causes any reservation of a
+ * process to be removed. if a context switch occurs
+ * between the two accesses the store will not occur
+ * and the condition code will cause it to loop. If on
+ * a dual processor machine, the reserve will cause
+ * appropriate bus cycle accesses to notify other
+ * processors.
+ */
+}
+
+int
+_atomic_is_locked(volatile _spinlock_lock_t *lock)
+{
+
+ return (*lock != _SPINLOCK_UNLOCKED);
+}
diff --git a/lib/libc_r/arch/powerpc/_spinlock.h b/lib/libc_r/arch/powerpc/_spinlock.h
new file mode 100644
index 00000000000..a1e95b6aafa
--- /dev/null
+++ b/lib/libc_r/arch/powerpc/_spinlock.h
@@ -0,0 +1,6 @@
+/* $OpenBSD: _spinlock.h,v 1.1 1998/12/21 07:22:26 d Exp $ */
+
+#define _SPINLOCK_UNLOCKED (0)
+#define _SPINLOCK_LOCKED (1)
+typedef register_t _spinlock_lock_t;
+
diff --git a/lib/libc_r/arch/powerpc/uthread_machdep.h b/lib/libc_r/arch/powerpc/uthread_machdep.h
new file mode 100644
index 00000000000..3ba790a49c2
--- /dev/null
+++ b/lib/libc_r/arch/powerpc/uthread_machdep.h
@@ -0,0 +1,31 @@
+/*
+ * OpenBSD/powerpc machine-dependent thread macros
+ *
+ * $OpenBSD: uthread_machdep.h,v 1.1 1998/12/21 07:22:26 d Exp $
+ */
+
+/* save the floating point state of a thread */
+#define _thread_machdep_save_float_state(thr) \
+ { \
+ /* rahnds to fill in */ \
+ }
+
+/* restore the floating point state of a thread */
+#define _thread_machdep_restore_float_state(thr) \
+ { \
+ /* rahnds to fill in */ \
+ }
+
+/* initialise the jmpbuf stack frame so it continues from entry */
+#define _thread_machdep_thread_create(thr, entry, pattr) \
+ { \
+ /* rahnds to fill in */ \
+ }
+
+#define _thread_machdep_longjmp(a,v) longjmp(a,v)
+#define _thread_machdep_setjmp(a) setjmp(a)
+
+struct _machdep_struct {
+ char xxx;
+};
+
diff --git a/lib/libpthread/arch/powerpc/_atomic_lock.c b/lib/libpthread/arch/powerpc/_atomic_lock.c
new file mode 100644
index 00000000000..ab1a5321763
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/_atomic_lock.c
@@ -0,0 +1,43 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/12/21 07:22:26 d Exp $ */
+/*
+ * Atomic lock for powerpc
+ */
+
+#include "spinlock.h"
+
+int
+_atomic_lock(volatile _spinlock_lock_t *lock)
+{
+ _spinlock_lock_t old;
+
+ __asm__("1: lwarx %0,0,%1 \n"
+ " stwcx. %2,0,%1 \n"
+ " bne- 1b \n"
+ : "=r" (old), "=r" (lock)
+ : "r" (_SPINLOCK_LOCKED), "1" (lock)
+ );
+
+ return (old != _SPINLOCK_UNLOCKED);
+
+ /*
+ * Dale <rahnds@openbsd.org> sez:
+ * Side note. to prevent two processes from accessing
+ * the same address with the lwarx in one instrution
+ * and the stwcx in another process, the current powerpc
+ * kernel uses a lwarx instruction without the corresponding
+ * stwcx which effectively causes any reservation of a
+ * process to be removed. if a context switch occurs
+ * between the two accesses the store will not occur
+ * and the condition code will cause it to loop. If on
+ * a dual processor machine, the reserve will cause
+ * appropriate bus cycle accesses to notify other
+ * processors.
+ */
+}
+
+int
+_atomic_is_locked(volatile _spinlock_lock_t *lock)
+{
+
+ return (*lock != _SPINLOCK_UNLOCKED);
+}
diff --git a/lib/libpthread/arch/powerpc/_spinlock.h b/lib/libpthread/arch/powerpc/_spinlock.h
new file mode 100644
index 00000000000..a1e95b6aafa
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/_spinlock.h
@@ -0,0 +1,6 @@
+/* $OpenBSD: _spinlock.h,v 1.1 1998/12/21 07:22:26 d Exp $ */
+
+#define _SPINLOCK_UNLOCKED (0)
+#define _SPINLOCK_LOCKED (1)
+typedef register_t _spinlock_lock_t;
+
diff --git a/lib/libpthread/arch/powerpc/uthread_machdep.h b/lib/libpthread/arch/powerpc/uthread_machdep.h
new file mode 100644
index 00000000000..3ba790a49c2
--- /dev/null
+++ b/lib/libpthread/arch/powerpc/uthread_machdep.h
@@ -0,0 +1,31 @@
+/*
+ * OpenBSD/powerpc machine-dependent thread macros
+ *
+ * $OpenBSD: uthread_machdep.h,v 1.1 1998/12/21 07:22:26 d Exp $
+ */
+
+/* save the floating point state of a thread */
+#define _thread_machdep_save_float_state(thr) \
+ { \
+ /* rahnds to fill in */ \
+ }
+
+/* restore the floating point state of a thread */
+#define _thread_machdep_restore_float_state(thr) \
+ { \
+ /* rahnds to fill in */ \
+ }
+
+/* initialise the jmpbuf stack frame so it continues from entry */
+#define _thread_machdep_thread_create(thr, entry, pattr) \
+ { \
+ /* rahnds to fill in */ \
+ }
+
+#define _thread_machdep_longjmp(a,v) longjmp(a,v)
+#define _thread_machdep_setjmp(a) setjmp(a)
+
+struct _machdep_struct {
+ char xxx;
+};
+