summaryrefslogtreecommitdiff
path: root/lib/librthread/arch/sparc64
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2013-06-01 20:47:41 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2013-06-01 20:47:41 +0000
commit2fbfcceb960fb807aa9b99a0597dedde67f9d6f8 (patch)
tree8898346871e5f91b8f2a10e9d30e44dbbf668349 /lib/librthread/arch/sparc64
parent751ab61a6dd86c9f4339695dcb646ee54892c289 (diff)
cleanup and consolidate the spinlock_lock (what a name!) code.
it's now atomic_lock to better reflect its usage, and librthread now features a new spinlock that's really a ticket lock. thrlseep can handle both types of lock via a flag in the clock arg. (temp back compat hack) remove some old stuff that's accumulated along the way and no longer used. some feedback from dlg, who is concerned with all things ticket lock. (you need to boot a new kernel before installing librthread)
Diffstat (limited to 'lib/librthread/arch/sparc64')
-rw-r--r--lib/librthread/arch/sparc64/_atomic_lock.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/lib/librthread/arch/sparc64/_atomic_lock.c b/lib/librthread/arch/sparc64/_atomic_lock.c
index 750d9e5aa14..e18426b848e 100644
--- a/lib/librthread/arch/sparc64/_atomic_lock.c
+++ b/lib/librthread/arch/sparc64/_atomic_lock.c
@@ -1,23 +1,23 @@
-/* $OpenBSD: _atomic_lock.c,v 1.4 2011/10/13 05:41:06 guenther Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.5 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
/*
* Atomic lock for sparc64
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t * lock)
+_atomic_lock(volatile _atomic_lock_t * lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* " ldstub [address], reg_rd
*
* The atomic load-store instructions copy a byte from memory
* into r[rd]m then rewrite the addressed byte in memory to all
- * ones [_SPINLOCK_LOCKED]. The operation is performed
+ * ones [_ATOMIC_LOCK_LOCKED]. The operation is performed
* atomically, that is, without allowing intervening interrupts
* or deferred traps. In a multiprocessor system, two or more
* processors executing atomic load-store unsigned byte [...]
@@ -37,5 +37,5 @@ _atomic_lock(volatile _spinlock_lock_t * lock)
*/
__asm__("ldstub [%1], %0" : "=&r" (old) : "r" (lock) : "memory");
- return (old == _SPINLOCK_LOCKED);
+ return (old == _ATOMIC_LOCK_LOCKED);
}