summaryrefslogtreecommitdiff
path: root/lib/libpthread
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2002-06-06 15:43:05 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2002-06-06 15:43:05 +0000
commite841821fc771654051660b22445a0c2480058635 (patch)
treec165a300faef9a4090efe1406ba0cc53fcf55771 /lib/libpthread
parent0e99a652d12cb73e2c41bf8a32d0a3f2cbc19dc3 (diff)
Implement _atomic_lock in assembler to make it more readable.
plus some various assembler improvements. Tested by various people a few months ago and in my tree for almost 6 months now.
Diffstat (limited to 'lib/libpthread')
-rw-r--r--lib/libpthread/arch/alpha/_atomic_lock.c37
-rw-r--r--lib/libpthread/arch/alpha/uthread_machdep_asm.S19
2 files changed, 19 insertions, 37 deletions
diff --git a/lib/libpthread/arch/alpha/_atomic_lock.c b/lib/libpthread/arch/alpha/_atomic_lock.c
index 5f11655ab1f..912d7c97709 100644
--- a/lib/libpthread/arch/alpha/_atomic_lock.c
+++ b/lib/libpthread/arch/alpha/_atomic_lock.c
@@ -1,42 +1,11 @@
-/* $OpenBSD: _atomic_lock.c,v 1.5 1999/05/26 00:11:27 d Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.6 2002/06/06 15:43:04 art Exp $ */
/*
- * Atomic lock for alpha
+ * Atomi lock for alpha.
*/
#include "spinlock.h"
-int
-_atomic_lock(volatile _spinlock_lock_t * lock)
-{
- _spinlock_lock_t old;
- _spinlock_lock_t new;
- int success;
-
- do {
- /* load the value of the thread-lock (lock mem on load) */
- __asm__( "ldq_l %0, %1" : "=r"(old) : "m"(*lock) );
- if (old)
- new = old; /* locked: no change */
- else
- new = _SPINLOCK_LOCKED; /* unlocked: grab it */
-
- success = 0;
- /* store the new value of the thrd-lock (unlock mem on store) */
- /*
- * XXX may need to add *large* branch forward for main line
- * branch prediction to be right :( [this note from linux]
- */
- __asm__( "stq_c %2, %0\n"
- "beq %2, 1f\n"
- "mb\n"
- "mov 1, %1\n"
- "1:"
- : "=m"(*lock), "=r"(success)
- : "r"(new) );
- } while (!success);
-
- return (old != _SPINLOCK_UNLOCKED);
-}
+/* _atomic lock is implemented in assembler. */
int
_atomic_is_locked(volatile _spinlock_lock_t * lock)
diff --git a/lib/libpthread/arch/alpha/uthread_machdep_asm.S b/lib/libpthread/arch/alpha/uthread_machdep_asm.S
index c702ea9cd47..ce5378d280f 100644
--- a/lib/libpthread/arch/alpha/uthread_machdep_asm.S
+++ b/lib/libpthread/arch/alpha/uthread_machdep_asm.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_machdep_asm.S,v 1.1 2000/09/25 05:44:13 d Exp $ */
+/* $OpenBSD: uthread_machdep_asm.S,v 1.2 2002/06/06 15:43:04 art Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
#include <machine/asm.h>
@@ -8,11 +8,12 @@
#define ALIGN(x) (((x)+15)&~15)
#define FRAMESIZE ALIGN(FPOFF(8))
+ .set noreorder
.globl _thread_machdep_switch
.ent _thread_machdep_switch, 2
_thread_machdep_switch:
.frame sp, FRAMESIZE, ra
-
+ ldgp gp, 0(pv)
lda sp, -FRAMESIZE(sp)
stq ra, INTOFF(0)(sp)
@@ -35,6 +36,7 @@ _thread_machdep_switch:
stt fs7, FPOFF(7)(sp)
stq sp, 0(a1)
+ or a0, zero, pv
ldq sp, 0(a0)
ldt fs7, FPOFF(7)(sp)
@@ -57,7 +59,18 @@ _thread_machdep_switch:
ldq ra, INTOFF(0)(sp)
lda sp,FRAMESIZE(sp)
-
RET
.end _thread_machdep_switch
+
+LEAF(_atomic_lock,1)
+ LDGP(pv)
+
+0: ldq_l v0, 0(a0) /* read existing lock value */
+ mov 1, t0 /* locked value to store */
+ stq_c t0, 0(a0) /* attempt to store, status in t0 */
+ beq t0, 1f /* branch foward to optimise prediction */
+ mb /* sync with other processors */
+ RET /* return with v0==0 if lock obtained */
+1: br 0b /* loop to try again */
+END(_atomic_lock)