blob: 7c075f95e14ca96b9cb33e9845f3ca626ee766cf (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
|
/* $OpenBSD: _atomic_lock.c,v 1.6 1999/02/02 01:36:00 imp Exp $ */
/*
* Atomic lock for mips
*/
#include "pthread.h"
#include "pthread_private.h"
#include "spinlock.h"
#include <signal.h>
/*
* uthread atomic lock:
* attempt to acquire a lock (by giving it a non-zero value).
* Return zero on success, or the lock's value on failure
*/
int
_atomic_lock(volatile _spinlock_lock_t *lock)
{
#if __mips >= 2
_spinlock_lock_t old;
_spinlock_lock_t temp;
do {
/*
* On a mips2 machine and above, we can use ll/sc.
* Read the lock and tag the cache line with a 'load linked'
* instruction. (Register 17 (LLAddr) will hold the
* physical address of lock for diagnostic purposes);
* (Under pathologically heavy swapping, the physaddr may
* change! XXX)
*/
__asm__("ll %0, %1" : "=r"(old) : "m"(*lock));
if (old != _SPINLOCK_UNLOCKED)
break; /* already locked */
/*
* Try and store a 1 at the tagged lock address. If
* anyone else has since written it, the tag on the cache
* line will have been wiped, and temp will be set to zero
* by the 'store conditional' instruction.
*/
temp = _SPINLOCK_LOCKED;
__asm__("sc %0, %1" : "=r"(temp), "=m"(*lock)
: "0"(temp));
} while (temp == 0);
return (old != _SPINLOCK_UNLOCKED);
#else
/*
* Older MIPS cpus have no way of doing an atomic lock
* without some kind of shift to supervisor mode.
*/
return (_thread_slow_atomic_lock(lock));
#endif
}
int
_atomic_is_locked(volatile _spinlock_lock_t *lock)
{
#if __mips >= 2
return (*lock != _SPINLOCK_UNLOCKED);
#else
return (_thread_slow_atomic_is_locked(lock));
#endif
}
|