summaryrefslogtreecommitdiff
path: root/lib/libpthread/arch/mips64/_atomic_lock.c
blob: cb5b42a1162185cb921249994a6f3bc2f3af2273 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
/*	$OpenBSD: _atomic_lock.c,v 1.2 2009/02/15 17:48:58 deraadt Exp $	*/

/* Public domain.  Written by David Leonard */

/*
 * Atomic lock for mips
 */

#include "pthread.h"
#include "pthread_private.h"
#include "spinlock.h"
#include <signal.h>

/*
 * uthread atomic lock: 
 * 	attempt to acquire a lock (by giving it a non-zero value).
 *	Return zero on success, or the lock's value on failure
 */
int
_atomic_lock(volatile _spinlock_lock_t *lock)
{
#if __mips >= 2
	_spinlock_lock_t old;
	_spinlock_lock_t temp;

	do {
		/*
		 * On a mips2 machine and above, we can use ll/sc.
		 * Read the lock and tag the cache line with a 'load linked'
		 * instruction. (Register 17 (LLAddr) will hold the 
		 * physical address of lock for diagnostic purposes);
		 * (Under pathologically heavy swapping, the physaddr may 
		 * change! XXX)
		 */
		__asm__("ll %0, %1" : "=r"(old) : "m"(*lock));
		if (old != _SPINLOCK_UNLOCKED) 
			break; /* already locked */
		/*
		 * Try and store a 1 at the tagged lock address.  If
		 * anyone else has since written it, the tag on the cache
		 * line will have been wiped, and temp will be set to zero
		 * by the 'store conditional' instruction.
		 */
		temp = _SPINLOCK_LOCKED;
		__asm__("sc  %0, %1" : "=r"(temp), "=m"(*lock)
				     : "0"(temp));
	} while (temp == 0);

	return (old != _SPINLOCK_UNLOCKED);
#else
	/*
	 * Older MIPS cpus have no way of doing an atomic lock
	 * without some kind of shift to supervisor mode.
	 */

	return (_thread_slow_atomic_lock(lock));
#endif
}