summaryrefslogtreecommitdiff
path: root/lib/libpthread/arch/alpha/uthread_machdep_asm.S
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2002-06-06 15:43:05 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2002-06-06 15:43:05 +0000
commite841821fc771654051660b22445a0c2480058635 (patch)
treec165a300faef9a4090efe1406ba0cc53fcf55771 /lib/libpthread/arch/alpha/uthread_machdep_asm.S
parent0e99a652d12cb73e2c41bf8a32d0a3f2cbc19dc3 (diff)
Implement _atomic_lock in assembler to make it more readable.
plus some various assembler improvements. Tested by various people a few months ago and in my tree for almost 6 months now.
Diffstat (limited to 'lib/libpthread/arch/alpha/uthread_machdep_asm.S')
-rw-r--r--lib/libpthread/arch/alpha/uthread_machdep_asm.S19
1 files changed, 16 insertions, 3 deletions
diff --git a/lib/libpthread/arch/alpha/uthread_machdep_asm.S b/lib/libpthread/arch/alpha/uthread_machdep_asm.S
index c702ea9cd47..ce5378d280f 100644
--- a/lib/libpthread/arch/alpha/uthread_machdep_asm.S
+++ b/lib/libpthread/arch/alpha/uthread_machdep_asm.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_machdep_asm.S,v 1.1 2000/09/25 05:44:13 d Exp $ */
+/* $OpenBSD: uthread_machdep_asm.S,v 1.2 2002/06/06 15:43:04 art Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
#include <machine/asm.h>
@@ -8,11 +8,12 @@
#define ALIGN(x) (((x)+15)&~15)
#define FRAMESIZE ALIGN(FPOFF(8))
+ .set noreorder
.globl _thread_machdep_switch
.ent _thread_machdep_switch, 2
_thread_machdep_switch:
.frame sp, FRAMESIZE, ra
-
+ ldgp gp, 0(pv)
lda sp, -FRAMESIZE(sp)
stq ra, INTOFF(0)(sp)
@@ -35,6 +36,7 @@ _thread_machdep_switch:
stt fs7, FPOFF(7)(sp)
stq sp, 0(a1)
+ or a0, zero, pv
ldq sp, 0(a0)
ldt fs7, FPOFF(7)(sp)
@@ -57,7 +59,18 @@ _thread_machdep_switch:
ldq ra, INTOFF(0)(sp)
lda sp,FRAMESIZE(sp)
-
RET
.end _thread_machdep_switch
+
+LEAF(_atomic_lock,1)
+ LDGP(pv)
+
+0: ldq_l v0, 0(a0) /* read existing lock value */
+ mov 1, t0 /* locked value to store */
+ stq_c t0, 0(a0) /* attempt to store, status in t0 */
+ beq t0, 1f /* branch foward to optimise prediction */
+ mb /* sync with other processors */
+ RET /* return with v0==0 if lock obtained */
+1: br 0b /* loop to try again */
+END(_atomic_lock)