summaryrefslogtreecommitdiff
path: root/lib/libc_r/arch/powerpc/_atomic_lock.c
blob: e3eb2aab29054700afd8c120d1ba4ec136893086 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
/*	$OpenBSD: _atomic_lock.c,v 1.3 2002/07/10 20:30:13 jsyn Exp $	*/
/*
 * Atomic lock for powerpc
 */

#include "spinlock.h"

int
_atomic_lock(volatile _spinlock_lock_t *lock)
{
	_spinlock_lock_t old;

	__asm__("1: lwarx %0,0,%1  \n"
		"   stwcx. %2,0,%1 \n"
		"   bne- 1b        \n"
		: "=r" (old), "=r" (lock)
		: "r" (_SPINLOCK_LOCKED), "1" (lock)
	);

	return (old != _SPINLOCK_UNLOCKED);

	/*
	 * Dale <rahnds@openbsd.org> says:
	 *   Side note. to prevent two processes from accessing
	 *   the same address with the lwarx in one instrution
	 *   and the stwcx in another process, the current powerpc
	 *   kernel uses a stwcx instruction without the corresponding
	 *   lwarx which causes any reservation of a process
	 *   to be removed.  if a context switch occurs
	 *   between the two accesses the store will not occur
	 *   and the condition code will cause it to loop. If on
	 *   a dual processor machine, the reserve will cause
	 *   appropriate bus cycle accesses to notify other
	 *   processors.
	 */
}

int
_atomic_is_locked(volatile _spinlock_lock_t *lock)
{

	return (*lock != _SPINLOCK_UNLOCKED);
}