summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Leonard <d@cvs.openbsd.org>1998-11-20 11:15:39 +0000
committerDavid Leonard <d@cvs.openbsd.org>1998-11-20 11:15:39 +0000
commit394c7a9821726b84f284c0c4385b1a9198afa0b0 (patch)
treee3fcaf31862eb53986f206217f7986fe433c6ce7
parentd2d530d679e5709dfdaa5ea40bea4a4d25694930 (diff)
Move atomic_lock code from asm to C with inline asm;
Add m68k, mips and sparc. (needs more careful checking) Add 'slow_atomic_lock' for crippled archs.
-rw-r--r--lib/libc_r/arch/alpha/_atomic_lock.S58
-rw-r--r--lib/libc_r/arch/alpha/_atomic_lock.c33
-rw-r--r--lib/libc_r/arch/i386/_atomic_lock.S46
-rw-r--r--lib/libc_r/arch/i386/_atomic_lock.c26
-rw-r--r--lib/libc_r/arch/m68k/_atomic_lock.c27
-rw-r--r--lib/libc_r/arch/m68k/uthread_machdep.h38
-rw-r--r--lib/libc_r/arch/mips/_atomic_lock.S35
-rw-r--r--lib/libc_r/arch/mips/_atomic_lock.c51
-rw-r--r--lib/libc_r/arch/mips/uthread_machdep.h2
-rw-r--r--lib/libc_r/arch/sparc/_atomic_lock.c10
-rw-r--r--lib/libc_r/arch/sparc/uthread_machdep.h45
-rw-r--r--lib/libc_r/sys/Makefile.inc6
-rw-r--r--lib/libc_r/sys/slow_atomic_lock.c33
-rw-r--r--lib/libpthread/arch/alpha/_atomic_lock.S58
-rw-r--r--lib/libpthread/arch/alpha/_atomic_lock.c33
-rw-r--r--lib/libpthread/arch/i386/_atomic_lock.S46
-rw-r--r--lib/libpthread/arch/i386/_atomic_lock.c26
-rw-r--r--lib/libpthread/arch/m68k/_atomic_lock.c27
-rw-r--r--lib/libpthread/arch/m68k/uthread_machdep.h38
-rw-r--r--lib/libpthread/arch/mips/_atomic_lock.S35
-rw-r--r--lib/libpthread/arch/mips/_atomic_lock.c51
-rw-r--r--lib/libpthread/arch/mips/uthread_machdep.h2
-rw-r--r--lib/libpthread/arch/sparc/_atomic_lock.c10
-rw-r--r--lib/libpthread/arch/sparc/uthread_machdep.h45
-rw-r--r--lib/libpthread/sys/Makefile.inc6
-rw-r--r--lib/libpthread/sys/slow_atomic_lock.c33
26 files changed, 534 insertions, 286 deletions
diff --git a/lib/libc_r/arch/alpha/_atomic_lock.S b/lib/libc_r/arch/alpha/_atomic_lock.S
deleted file mode 100644
index ba0022e4a4d..00000000000
--- a/lib/libc_r/arch/alpha/_atomic_lock.S
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- * copyright Douglas Santry 1996
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the above copyright is retained
- * in the source form.
- *
- * THIS SOFTWARE IS PROVIDED BY Douglas Santry AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL Douglas Santry OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: _atomic_lock.S,v 1.1 1998/08/28 01:54:57 d Exp $
- * $OpenBSD: _atomic_lock.S,v 1.1 1998/08/28 01:54:57 d Exp $
- *
- */
-
-#include "SYS.h"
-
-/*
- * Atomicly lock a location with an identifier provided the location
- * is not currently locked.
- *
- * long _atomic_lock(long *);
- * v0 will contain the return value (zero if lock obtained).
- */
-LEAF(_atomic_lock,0)
- LDGP(pv)
-
- /* Get the existing lock value and lock memory: */
- ldq_l v0, 0(a0)
-
- /* Branch if already locked: */
- bne v0, already_locked
-
- /* Not locked, so store 1: */
- mov 1, t0
- stq_c t0, 0(a0)
-
- /* Obtained the lock: */
- br done
-
-already_locked:
- /* Already locked so put the value back and unlock memory: */
- stq_c v0, 0(a0)
-
-done:
- RET
-END(_atomic_lock)
diff --git a/lib/libc_r/arch/alpha/_atomic_lock.c b/lib/libc_r/arch/alpha/_atomic_lock.c
new file mode 100644
index 00000000000..74575fc11d7
--- /dev/null
+++ b/lib/libc_r/arch/alpha/_atomic_lock.c
@@ -0,0 +1,33 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:35 d Exp $ */
+/* Atomic lock for alpha */
+
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t * lock)
+{
+ register_t old;
+ register_t new;
+ int success;
+
+ do {
+ /* load the value of the thread-lock (lock mem on load) */
+ __asm__( "ldq_l %0, %1" : "=r"(old) : "m"(*lock) );
+ if (old)
+ new = old; /* in-use: put it back */
+ else
+ new = 1; /* free: store a 1 in the lock */
+
+ success = 0;
+ /* store the new value of the thrd-lock (unlock mem on store) */
+ /*
+ * XXX may need to add large branch forward for main line
+ * branch prediction to be right :(
+ */
+ __asm__( "stq_c %2, %0; beq %2, 1f; mov 1,%1; 1:"
+ : "=m"(*lock), "=r"(success)
+ : "r"(new) );
+ } while (!success);
+
+ return old;
+}
diff --git a/lib/libc_r/arch/i386/_atomic_lock.S b/lib/libc_r/arch/i386/_atomic_lock.S
deleted file mode 100644
index fbdc508eb52..00000000000
--- a/lib/libc_r/arch/i386/_atomic_lock.S
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- * copyright Douglas Santry 1996
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the above copyright is retained
- * in the source form.
- *
- * THIS SOFTWARE IS PROVIDED BY Douglas Santry AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL Douglas Santry OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: _atomic_lock.S,v 1.1 1998/08/28 01:54:58 d Exp $
- * $OpenBSD: _atomic_lock.S,v 1.1 1998/08/28 01:54:58 d Exp $
- *
- */
-
-#if defined(LIBC_RCS) && !defined(lint)
- .text
- .asciz "$Id: _atomic_lock.S,v 1.1 1998/08/28 01:54:58 d Exp $"
-#endif /* LIBC_RCS and not lint */
-
-#include "DEFS.h"
-
-/*
- * Atomicly lock a location with an identifier provided the location
- * is not currently locked.
- *
- * long _atomic_lock(long *);
- * eax will contain the return value (zero if lock obtained).
- */
-ENTRY(_atomic_lock)
- movl 4(%esp), %ecx
- movl $1, %eax
- xchg %eax, (%ecx)
- ret
-
diff --git a/lib/libc_r/arch/i386/_atomic_lock.c b/lib/libc_r/arch/i386/_atomic_lock.c
new file mode 100644
index 00000000000..609dc42fb01
--- /dev/null
+++ b/lib/libc_r/arch/i386/_atomic_lock.c
@@ -0,0 +1,26 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:36 d Exp $ */
+/*
+ * Atomic lock aquire for i386.
+ */
+
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+
+ /*
+ * Use the eXCHanGe instruction to swap the lock value with
+ * a local variable containg '1' (the locked state).
+ */
+ old = 1;
+ __asm__("xchg %0, %1"
+ : "=r" (old), "=m" (*lock) : "0"(old), "1" (*lock) );
+ /*
+ * So now there is a 1 in *lock and 'old' contains what
+ * used to be in the lock. We return 0 if the lock was acquired,
+ * (ie its old value was 0) or 1 otherwise.
+ */
+ return old;
+}
diff --git a/lib/libc_r/arch/m68k/_atomic_lock.c b/lib/libc_r/arch/m68k/_atomic_lock.c
new file mode 100644
index 00000000000..be874ad2892
--- /dev/null
+++ b/lib/libc_r/arch/m68k/_atomic_lock.c
@@ -0,0 +1,27 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:36 d Exp $ */
+/*
+ * Atomic lock for m68k
+ */
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+
+ /*
+ * The Compare And Swap instruction (mc68020 and above)
+ * compares its first operand with the memory addressed by
+ * the third. If they are the same value, the second operand
+ * is stored at the address. Otherwise the 1st operand (register)
+ * is loaded with the contents of the 3rd operand.
+ *
+ * old = 0;
+ * CAS(old, 1, *lock);
+ * return old;
+ */
+ old = 0;
+ __asm__("casl %0, %2, %1" : "=d"(old), "=m"(*lock)
+ : "d"(1), "0"(old));
+ return old;
+}
diff --git a/lib/libc_r/arch/m68k/uthread_machdep.h b/lib/libc_r/arch/m68k/uthread_machdep.h
new file mode 100644
index 00000000000..3df12262e28
--- /dev/null
+++ b/lib/libc_r/arch/m68k/uthread_machdep.h
@@ -0,0 +1,38 @@
+/*
+ * OpenBSD/m68k machine-dependent thread macros
+ *
+ * $OpenBSD: uthread_machdep.h,v 1.1 1998/11/20 11:15:36 d Exp $
+ */
+
+/* save the floating point state of a thread */
+#define _thread_machdep_save_float_state(thr) \
+ { \
+ /* fsave privileged instr */ \
+ }
+
+/* restore the floating point state of a thread */
+#define _thread_machdep_restore_float_state(thr) \
+ { \
+ /* frestore privileged instr */ \
+ }
+
+/* initialise the jmpbuf stack frame so it continues from entry */
+
+#define _thread_machdep_thread_create(thr, entry, pattr) \
+ { \
+ /* entry */ \
+ (thr)->saved_jmp_buf[5] = (long) entry; \
+ /* stack */ \
+ (thr)->saved_jmp_buf[2] = (long) (thr)->stack \
+ + (pattr)->stacksize_attr \
+ - sizeof(double); \
+ }
+
+#define _thread_machdep_longjmp(a,v) _longjmp(a,v)
+#define _thread_machdep_setjmp(a) _setjmp(a)
+
+struct _machdep_struct {
+ /* char saved_fp[108]; */
+ int dummy;
+};
+
diff --git a/lib/libc_r/arch/mips/_atomic_lock.S b/lib/libc_r/arch/mips/_atomic_lock.S
deleted file mode 100644
index 169852b9c75..00000000000
--- a/lib/libc_r/arch/mips/_atomic_lock.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * $OpenBSD: _atomic_lock.S,v 1.1 1998/11/09 03:13:14 d Exp $
- */
-
-#include "SYS.h"
-
-/*
- * Atomicly lock a location with an identifier provided the location
- * is not currently locked.
- *
- * long _atomic_lock(long *a0);
- * v0 will contain the return value (zero if lock obtained).
- */
-
-/*
- * XXXXXX THIS IS LOCK FUNCTION IS TOTALLY BOGUS XXXXXXXXX
- * pefo@ says that for R4000 processors, there is a way to do this
- * atomically, but for R3000 you will need to context switch.
- * Specifically he says the 'll' and 'sc' instructions can be used for mips2.
- */
-LEAF(_atomic_lock)
- .set noreorder
- .set nomacro
-
- /* Get the existing lock value and lock memory: */
- ori t0,zero,1
- lw v0,0(a0)
- sw t0,0(a0)
- j ra
- nop
-
- .set macro
- .set reorder
-END(_atomic_lock)
-
diff --git a/lib/libc_r/arch/mips/_atomic_lock.c b/lib/libc_r/arch/mips/_atomic_lock.c
new file mode 100644
index 00000000000..088d4a7a643
--- /dev/null
+++ b/lib/libc_r/arch/mips/_atomic_lock.c
@@ -0,0 +1,51 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:37 d Exp $ */
+/*
+ * Atomic lock for mips
+ */
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ */
+register_t
+_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+#if __mips >= 2
+ register_t temp;
+
+ do {
+ /*
+ * On a mips2 machine and above, we can use ll/sc.
+ * Read the lock and tag the cache line with a 'load linked'
+ * instruction. (Register 17 (LLAddr) will hold the
+ * physical address of lock for diagnostic purposes);
+ */
+ __asm__("ll %0, %1" : "=r"(old) : "m"(*lock));
+ if (old)
+ break; /* already locked */
+ /*
+ * Try and store a 1 at the tagged lock address. If
+ * anyone else has since written it, the tag on the cache
+ * line will have been wiped, and temp will be set to zero
+ * by the 'store conditional' instruction.
+ */
+ temp = 1;
+ __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock)
+ : "0"(temp));
+ } while (temp == 0);
+#else
+ /*
+ * Older MIPS cpus have no way of doing an atomic lock
+ * without some kind of shift to supervisor mode.
+ */
+
+ old = _thread_slow_atomic_lock(lock);
+
+#endif
+ return old;
+}
diff --git a/lib/libc_r/arch/mips/uthread_machdep.h b/lib/libc_r/arch/mips/uthread_machdep.h
index 605b900f8a7..0470a810663 100644
--- a/lib/libc_r/arch/mips/uthread_machdep.h
+++ b/lib/libc_r/arch/mips/uthread_machdep.h
@@ -1,7 +1,7 @@
/*
* OpenBSD/mips machine-dependent thread macros
*
- * $OpenBSD: uthread_machdep.h,v 1.1 1998/11/09 03:13:14 d Exp $
+ * $OpenBSD: uthread_machdep.h,v 1.2 1998/11/20 11:15:37 d Exp $
*/
#include <machine/regnum.h>
diff --git a/lib/libc_r/arch/sparc/_atomic_lock.c b/lib/libc_r/arch/sparc/_atomic_lock.c
new file mode 100644
index 00000000000..35b50e3ada5
--- /dev/null
+++ b/lib/libc_r/arch/sparc/_atomic_lock.c
@@ -0,0 +1,10 @@
+/* $OpenBSD */
+/* atomic lock for sparc */
+
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t * lock)
+{
+ return _thread_slow_atomic_lock(lock);
+}
diff --git a/lib/libc_r/arch/sparc/uthread_machdep.h b/lib/libc_r/arch/sparc/uthread_machdep.h
new file mode 100644
index 00000000000..56103122674
--- /dev/null
+++ b/lib/libc_r/arch/sparc/uthread_machdep.h
@@ -0,0 +1,45 @@
+/*
+ * OpenBSD/sparc machine-dependent thread macros
+ *
+ * $OpenBSD: uthread_machdep.h,v 1.1 1998/11/20 11:15:37 d Exp $
+ */
+
+#include <sys/signal.h>
+
+/* save the floating point state of a thread */
+#define _thread_machdep_save_float_state(thr) \
+ { \
+ /* XXX tdb */ \
+ }
+
+/* restore the floating point state of a thread */
+#define _thread_machdep_restore_float_state(thr) \
+ { \
+ /* XXX tdb */ \
+ }
+
+/* initialise the jmpbuf stack frame so it continues from entry */
+
+#define _thread_machdep_thread_create(thr, entry, pattr) \
+ { \
+ /* entry */ \
+ (thr)->saved_jmp_buf[1] = (long) entry; \
+ /* stack */ \
+ (thr)->saved_jmp_buf[0] = (long) (thr)->stack \
+ + (pattr)->stacksize_attr \
+ - sizeof(double); \
+ }
+
+/*
+ * XXX high chance of longjmp botch (see libc/arch/sparc/gen/_setjmp.S)
+ * because it uses the frame pointer to pop off frames.. we don't want
+ * that.. what to do? fudge %fp? do our own setjmp?
+ */
+#define _thread_machdep_longjmp(a,v) _longjmp(a,v)
+#define _thread_machdep_setjmp(a) _setjmp(a)
+
+struct _machdep_struct {
+ /* char saved_fp[???]; */
+ int dummy;
+};
+
diff --git a/lib/libc_r/sys/Makefile.inc b/lib/libc_r/sys/Makefile.inc
index bf19b6dd111..be55ff3747b 100644
--- a/lib/libc_r/sys/Makefile.inc
+++ b/lib/libc_r/sys/Makefile.inc
@@ -1,9 +1,9 @@
-# $Id: Makefile.inc,v 1.1 1998/08/27 09:00:48 d Exp $
-# $OpenBSD: Makefile.inc,v 1.1 1998/08/27 09:00:48 d Exp $
+# $Id: Makefile.inc,v 1.2 1998/11/20 11:15:38 d Exp $
+# $OpenBSD: Makefile.inc,v 1.2 1998/11/20 11:15:38 d Exp $
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}
-SRCS+= uthread_error.c _atomic_lock.S _sys_aliases.S
+SRCS+= uthread_error.c _atomic_lock.c _sys_aliases.S slow_atomic_lock.c
_sys_aliases.S: ${.CURDIR}/Makefile ${LIBCSRCDIR}/sys/Makefile.inc
(echo '#include "SYS.h"'; \
diff --git a/lib/libc_r/sys/slow_atomic_lock.c b/lib/libc_r/sys/slow_atomic_lock.c
new file mode 100644
index 00000000000..96be0897dc9
--- /dev/null
+++ b/lib/libc_r/sys/slow_atomic_lock.c
@@ -0,0 +1,33 @@
+/* $OpenBSD: slow_atomic_lock.c,v 1.1 1998/11/20 11:15:38 d Exp $ */
+
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ * This uses signal masking to make sure that no other thread
+ * can modify the lock while processing, hence it is very slow.
+ */
+register_t
+_thread_slow_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+ sigset_t oldset, newset = (sigset_t)~0;
+
+ /* block signals - incurs a context switch */
+ if (_thread_sys_sigprocmask(SIG_SETMASK, &newset, &oldset) < 0)
+ PANIC("_atomic_lock block");
+
+ old = *lock;
+ if (old == 0)
+ *lock = 1;
+
+ /* restore signal mask to what it was */
+ if (_thread_sys_sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+ PANIC("_atomic_lock restore");
+
+ return old;
+}
diff --git a/lib/libpthread/arch/alpha/_atomic_lock.S b/lib/libpthread/arch/alpha/_atomic_lock.S
deleted file mode 100644
index ba0022e4a4d..00000000000
--- a/lib/libpthread/arch/alpha/_atomic_lock.S
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- * copyright Douglas Santry 1996
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the above copyright is retained
- * in the source form.
- *
- * THIS SOFTWARE IS PROVIDED BY Douglas Santry AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL Douglas Santry OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: _atomic_lock.S,v 1.1 1998/08/28 01:54:57 d Exp $
- * $OpenBSD: _atomic_lock.S,v 1.1 1998/08/28 01:54:57 d Exp $
- *
- */
-
-#include "SYS.h"
-
-/*
- * Atomicly lock a location with an identifier provided the location
- * is not currently locked.
- *
- * long _atomic_lock(long *);
- * v0 will contain the return value (zero if lock obtained).
- */
-LEAF(_atomic_lock,0)
- LDGP(pv)
-
- /* Get the existing lock value and lock memory: */
- ldq_l v0, 0(a0)
-
- /* Branch if already locked: */
- bne v0, already_locked
-
- /* Not locked, so store 1: */
- mov 1, t0
- stq_c t0, 0(a0)
-
- /* Obtained the lock: */
- br done
-
-already_locked:
- /* Already locked so put the value back and unlock memory: */
- stq_c v0, 0(a0)
-
-done:
- RET
-END(_atomic_lock)
diff --git a/lib/libpthread/arch/alpha/_atomic_lock.c b/lib/libpthread/arch/alpha/_atomic_lock.c
new file mode 100644
index 00000000000..74575fc11d7
--- /dev/null
+++ b/lib/libpthread/arch/alpha/_atomic_lock.c
@@ -0,0 +1,33 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:35 d Exp $ */
+/* Atomic lock for alpha */
+
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t * lock)
+{
+ register_t old;
+ register_t new;
+ int success;
+
+ do {
+ /* load the value of the thread-lock (lock mem on load) */
+ __asm__( "ldq_l %0, %1" : "=r"(old) : "m"(*lock) );
+ if (old)
+ new = old; /* in-use: put it back */
+ else
+ new = 1; /* free: store a 1 in the lock */
+
+ success = 0;
+ /* store the new value of the thrd-lock (unlock mem on store) */
+ /*
+ * XXX may need to add large branch forward for main line
+ * branch prediction to be right :(
+ */
+ __asm__( "stq_c %2, %0; beq %2, 1f; mov 1,%1; 1:"
+ : "=m"(*lock), "=r"(success)
+ : "r"(new) );
+ } while (!success);
+
+ return old;
+}
diff --git a/lib/libpthread/arch/i386/_atomic_lock.S b/lib/libpthread/arch/i386/_atomic_lock.S
deleted file mode 100644
index fbdc508eb52..00000000000
--- a/lib/libpthread/arch/i386/_atomic_lock.S
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- * copyright Douglas Santry 1996
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the above copyright is retained
- * in the source form.
- *
- * THIS SOFTWARE IS PROVIDED BY Douglas Santry AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL Douglas Santry OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: _atomic_lock.S,v 1.1 1998/08/28 01:54:58 d Exp $
- * $OpenBSD: _atomic_lock.S,v 1.1 1998/08/28 01:54:58 d Exp $
- *
- */
-
-#if defined(LIBC_RCS) && !defined(lint)
- .text
- .asciz "$Id: _atomic_lock.S,v 1.1 1998/08/28 01:54:58 d Exp $"
-#endif /* LIBC_RCS and not lint */
-
-#include "DEFS.h"
-
-/*
- * Atomicly lock a location with an identifier provided the location
- * is not currently locked.
- *
- * long _atomic_lock(long *);
- * eax will contain the return value (zero if lock obtained).
- */
-ENTRY(_atomic_lock)
- movl 4(%esp), %ecx
- movl $1, %eax
- xchg %eax, (%ecx)
- ret
-
diff --git a/lib/libpthread/arch/i386/_atomic_lock.c b/lib/libpthread/arch/i386/_atomic_lock.c
new file mode 100644
index 00000000000..609dc42fb01
--- /dev/null
+++ b/lib/libpthread/arch/i386/_atomic_lock.c
@@ -0,0 +1,26 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:36 d Exp $ */
+/*
+ * Atomic lock aquire for i386.
+ */
+
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+
+ /*
+ * Use the eXCHanGe instruction to swap the lock value with
+ * a local variable containg '1' (the locked state).
+ */
+ old = 1;
+ __asm__("xchg %0, %1"
+ : "=r" (old), "=m" (*lock) : "0"(old), "1" (*lock) );
+ /*
+ * So now there is a 1 in *lock and 'old' contains what
+ * used to be in the lock. We return 0 if the lock was acquired,
+ * (ie its old value was 0) or 1 otherwise.
+ */
+ return old;
+}
diff --git a/lib/libpthread/arch/m68k/_atomic_lock.c b/lib/libpthread/arch/m68k/_atomic_lock.c
new file mode 100644
index 00000000000..be874ad2892
--- /dev/null
+++ b/lib/libpthread/arch/m68k/_atomic_lock.c
@@ -0,0 +1,27 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:36 d Exp $ */
+/*
+ * Atomic lock for m68k
+ */
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+
+ /*
+ * The Compare And Swap instruction (mc68020 and above)
+ * compares its first operand with the memory addressed by
+ * the third. If they are the same value, the second operand
+ * is stored at the address. Otherwise the 1st operand (register)
+ * is loaded with the contents of the 3rd operand.
+ *
+ * old = 0;
+ * CAS(old, 1, *lock);
+ * return old;
+ */
+ old = 0;
+ __asm__("casl %0, %2, %1" : "=d"(old), "=m"(*lock)
+ : "d"(1), "0"(old));
+ return old;
+}
diff --git a/lib/libpthread/arch/m68k/uthread_machdep.h b/lib/libpthread/arch/m68k/uthread_machdep.h
new file mode 100644
index 00000000000..3df12262e28
--- /dev/null
+++ b/lib/libpthread/arch/m68k/uthread_machdep.h
@@ -0,0 +1,38 @@
+/*
+ * OpenBSD/m68k machine-dependent thread macros
+ *
+ * $OpenBSD: uthread_machdep.h,v 1.1 1998/11/20 11:15:36 d Exp $
+ */
+
+/* save the floating point state of a thread */
+#define _thread_machdep_save_float_state(thr) \
+ { \
+ /* fsave privileged instr */ \
+ }
+
+/* restore the floating point state of a thread */
+#define _thread_machdep_restore_float_state(thr) \
+ { \
+ /* frestore privileged instr */ \
+ }
+
+/* initialise the jmpbuf stack frame so it continues from entry */
+
+#define _thread_machdep_thread_create(thr, entry, pattr) \
+ { \
+ /* entry */ \
+ (thr)->saved_jmp_buf[5] = (long) entry; \
+ /* stack */ \
+ (thr)->saved_jmp_buf[2] = (long) (thr)->stack \
+ + (pattr)->stacksize_attr \
+ - sizeof(double); \
+ }
+
+#define _thread_machdep_longjmp(a,v) _longjmp(a,v)
+#define _thread_machdep_setjmp(a) _setjmp(a)
+
+struct _machdep_struct {
+ /* char saved_fp[108]; */
+ int dummy;
+};
+
diff --git a/lib/libpthread/arch/mips/_atomic_lock.S b/lib/libpthread/arch/mips/_atomic_lock.S
deleted file mode 100644
index 169852b9c75..00000000000
--- a/lib/libpthread/arch/mips/_atomic_lock.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * $OpenBSD: _atomic_lock.S,v 1.1 1998/11/09 03:13:14 d Exp $
- */
-
-#include "SYS.h"
-
-/*
- * Atomicly lock a location with an identifier provided the location
- * is not currently locked.
- *
- * long _atomic_lock(long *a0);
- * v0 will contain the return value (zero if lock obtained).
- */
-
-/*
- * XXXXXX THIS IS LOCK FUNCTION IS TOTALLY BOGUS XXXXXXXXX
- * pefo@ says that for R4000 processors, there is a way to do this
- * atomically, but for R3000 you will need to context switch.
- * Specifically he says the 'll' and 'sc' instructions can be used for mips2.
- */
-LEAF(_atomic_lock)
- .set noreorder
- .set nomacro
-
- /* Get the existing lock value and lock memory: */
- ori t0,zero,1
- lw v0,0(a0)
- sw t0,0(a0)
- j ra
- nop
-
- .set macro
- .set reorder
-END(_atomic_lock)
-
diff --git a/lib/libpthread/arch/mips/_atomic_lock.c b/lib/libpthread/arch/mips/_atomic_lock.c
new file mode 100644
index 00000000000..088d4a7a643
--- /dev/null
+++ b/lib/libpthread/arch/mips/_atomic_lock.c
@@ -0,0 +1,51 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 1998/11/20 11:15:37 d Exp $ */
+/*
+ * Atomic lock for mips
+ */
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ */
+register_t
+_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+#if __mips >= 2
+ register_t temp;
+
+ do {
+ /*
+ * On a mips2 machine and above, we can use ll/sc.
+ * Read the lock and tag the cache line with a 'load linked'
+ * instruction. (Register 17 (LLAddr) will hold the
+ * physical address of lock for diagnostic purposes);
+ */
+ __asm__("ll %0, %1" : "=r"(old) : "m"(*lock));
+ if (old)
+ break; /* already locked */
+ /*
+ * Try and store a 1 at the tagged lock address. If
+ * anyone else has since written it, the tag on the cache
+ * line will have been wiped, and temp will be set to zero
+ * by the 'store conditional' instruction.
+ */
+ temp = 1;
+ __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock)
+ : "0"(temp));
+ } while (temp == 0);
+#else
+ /*
+ * Older MIPS cpus have no way of doing an atomic lock
+ * without some kind of shift to supervisor mode.
+ */
+
+ old = _thread_slow_atomic_lock(lock);
+
+#endif
+ return old;
+}
diff --git a/lib/libpthread/arch/mips/uthread_machdep.h b/lib/libpthread/arch/mips/uthread_machdep.h
index 605b900f8a7..0470a810663 100644
--- a/lib/libpthread/arch/mips/uthread_machdep.h
+++ b/lib/libpthread/arch/mips/uthread_machdep.h
@@ -1,7 +1,7 @@
/*
* OpenBSD/mips machine-dependent thread macros
*
- * $OpenBSD: uthread_machdep.h,v 1.1 1998/11/09 03:13:14 d Exp $
+ * $OpenBSD: uthread_machdep.h,v 1.2 1998/11/20 11:15:37 d Exp $
*/
#include <machine/regnum.h>
diff --git a/lib/libpthread/arch/sparc/_atomic_lock.c b/lib/libpthread/arch/sparc/_atomic_lock.c
new file mode 100644
index 00000000000..35b50e3ada5
--- /dev/null
+++ b/lib/libpthread/arch/sparc/_atomic_lock.c
@@ -0,0 +1,10 @@
+/* $OpenBSD */
+/* atomic lock for sparc */
+
+#include "spinlock.h"
+
+register_t
+_atomic_lock(volatile register_t * lock)
+{
+ return _thread_slow_atomic_lock(lock);
+}
diff --git a/lib/libpthread/arch/sparc/uthread_machdep.h b/lib/libpthread/arch/sparc/uthread_machdep.h
new file mode 100644
index 00000000000..56103122674
--- /dev/null
+++ b/lib/libpthread/arch/sparc/uthread_machdep.h
@@ -0,0 +1,45 @@
+/*
+ * OpenBSD/sparc machine-dependent thread macros
+ *
+ * $OpenBSD: uthread_machdep.h,v 1.1 1998/11/20 11:15:37 d Exp $
+ */
+
+#include <sys/signal.h>
+
+/* save the floating point state of a thread */
+#define _thread_machdep_save_float_state(thr) \
+ { \
+ /* XXX tdb */ \
+ }
+
+/* restore the floating point state of a thread */
+#define _thread_machdep_restore_float_state(thr) \
+ { \
+ /* XXX tdb */ \
+ }
+
+/* initialise the jmpbuf stack frame so it continues from entry */
+
+#define _thread_machdep_thread_create(thr, entry, pattr) \
+ { \
+ /* entry */ \
+ (thr)->saved_jmp_buf[1] = (long) entry; \
+ /* stack */ \
+ (thr)->saved_jmp_buf[0] = (long) (thr)->stack \
+ + (pattr)->stacksize_attr \
+ - sizeof(double); \
+ }
+
+/*
+ * XXX high chance of longjmp botch (see libc/arch/sparc/gen/_setjmp.S)
+ * because it uses the frame pointer to pop off frames.. we don't want
+ * that.. what to do? fudge %fp? do our own setjmp?
+ */
+#define _thread_machdep_longjmp(a,v) _longjmp(a,v)
+#define _thread_machdep_setjmp(a) _setjmp(a)
+
+struct _machdep_struct {
+ /* char saved_fp[???]; */
+ int dummy;
+};
+
diff --git a/lib/libpthread/sys/Makefile.inc b/lib/libpthread/sys/Makefile.inc
index bf19b6dd111..be55ff3747b 100644
--- a/lib/libpthread/sys/Makefile.inc
+++ b/lib/libpthread/sys/Makefile.inc
@@ -1,9 +1,9 @@
-# $Id: Makefile.inc,v 1.1 1998/08/27 09:00:48 d Exp $
-# $OpenBSD: Makefile.inc,v 1.1 1998/08/27 09:00:48 d Exp $
+# $Id: Makefile.inc,v 1.2 1998/11/20 11:15:38 d Exp $
+# $OpenBSD: Makefile.inc,v 1.2 1998/11/20 11:15:38 d Exp $
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}
-SRCS+= uthread_error.c _atomic_lock.S _sys_aliases.S
+SRCS+= uthread_error.c _atomic_lock.c _sys_aliases.S slow_atomic_lock.c
_sys_aliases.S: ${.CURDIR}/Makefile ${LIBCSRCDIR}/sys/Makefile.inc
(echo '#include "SYS.h"'; \
diff --git a/lib/libpthread/sys/slow_atomic_lock.c b/lib/libpthread/sys/slow_atomic_lock.c
new file mode 100644
index 00000000000..96be0897dc9
--- /dev/null
+++ b/lib/libpthread/sys/slow_atomic_lock.c
@@ -0,0 +1,33 @@
+/* $OpenBSD: slow_atomic_lock.c,v 1.1 1998/11/20 11:15:38 d Exp $ */
+
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ * This uses signal masking to make sure that no other thread
+ * can modify the lock while processing, hence it is very slow.
+ */
+register_t
+_thread_slow_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+ sigset_t oldset, newset = (sigset_t)~0;
+
+ /* block signals - incurs a context switch */
+ if (_thread_sys_sigprocmask(SIG_SETMASK, &newset, &oldset) < 0)
+ PANIC("_atomic_lock block");
+
+ old = *lock;
+ if (old == 0)
+ *lock = 1;
+
+ /* restore signal mask to what it was */
+ if (_thread_sys_sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+ PANIC("_atomic_lock restore");
+
+ return old;
+}