blob: 5f11655ab1f6677f33f40dc2966be6e6e8f36160 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
|
/* $OpenBSD: _atomic_lock.c,v 1.5 1999/05/26 00:11:27 d Exp $ */
/*
* Atomic lock for alpha
*/
#include "spinlock.h"
int
_atomic_lock(volatile _spinlock_lock_t * lock)
{
_spinlock_lock_t old;
_spinlock_lock_t new;
int success;
do {
/* load the value of the thread-lock (lock mem on load) */
__asm__( "ldq_l %0, %1" : "=r"(old) : "m"(*lock) );
if (old)
new = old; /* locked: no change */
else
new = _SPINLOCK_LOCKED; /* unlocked: grab it */
success = 0;
/* store the new value of the thrd-lock (unlock mem on store) */
/*
* XXX may need to add *large* branch forward for main line
* branch prediction to be right :( [this note from linux]
*/
__asm__( "stq_c %2, %0\n"
"beq %2, 1f\n"
"mb\n"
"mov 1, %1\n"
"1:"
: "=m"(*lock), "=r"(success)
: "r"(new) );
} while (!success);
return (old != _SPINLOCK_UNLOCKED);
}
int
_atomic_is_locked(volatile _spinlock_lock_t * lock)
{
return (*lock != _SPINLOCK_UNLOCKED);
}
|