diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2009-06-01 23:17:54 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2009-06-01 23:17:54 +0000 |
commit | 67026859cc20df000a2c1fbe7b42dde69748b2df (patch) | |
tree | 861e7993023601fd3202f5f93511c3d0d446aba8 /lib/libpthread/arch | |
parent | 8e59d0ae88c89cc8f8d1c0586801b1c9126d0248 (diff) |
A much better atomic lock routine.
Diffstat (limited to 'lib/libpthread/arch')
-rw-r--r-- | lib/libpthread/arch/mips64/_atomic_lock.c | 52 |
1 files changed, 9 insertions, 43 deletions
diff --git a/lib/libpthread/arch/mips64/_atomic_lock.c b/lib/libpthread/arch/mips64/_atomic_lock.c index cb5b42a1162..e85335f7466 100644 --- a/lib/libpthread/arch/mips64/_atomic_lock.c +++ b/lib/libpthread/arch/mips64/_atomic_lock.c @@ -1,58 +1,24 @@ -/* $OpenBSD: _atomic_lock.c,v 1.2 2009/02/15 17:48:58 deraadt Exp $ */ - -/* Public domain. Written by David Leonard */ +/* $OpenBSD: _atomic_lock.c,v 1.3 2009/06/01 23:17:52 miod Exp $ */ /* * Atomic lock for mips + * Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain. */ -#include "pthread.h" -#include "pthread_private.h" #include "spinlock.h" -#include <signal.h> -/* - * uthread atomic lock: - * attempt to acquire a lock (by giving it a non-zero value). - * Return zero on success, or the lock's value on failure - */ int _atomic_lock(volatile _spinlock_lock_t *lock) { -#if __mips >= 2 _spinlock_lock_t old; - _spinlock_lock_t temp; - do { - /* - * On a mips2 machine and above, we can use ll/sc. - * Read the lock and tag the cache line with a 'load linked' - * instruction. (Register 17 (LLAddr) will hold the - * physical address of lock for diagnostic purposes); - * (Under pathologically heavy swapping, the physaddr may - * change! XXX) - */ - __asm__("ll %0, %1" : "=r"(old) : "m"(*lock)); - if (old != _SPINLOCK_UNLOCKED) - break; /* already locked */ - /* - * Try and store a 1 at the tagged lock address. If - * anyone else has since written it, the tag on the cache - * line will have been wiped, and temp will be set to zero - * by the 'store conditional' instruction. - */ - temp = _SPINLOCK_LOCKED; - __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock) - : "0"(temp)); - } while (temp == 0); + __asm__ __volatile__ ( + "1: ll %0, 0(%1)\n" + " sc %2, 0(%1)\n" + " beqz %2, 1b\n" + " nop\n" : + "=r"(old) : + "r"(lock), "r"(_SPINLOCK_LOCKED) : "memory"); return (old != _SPINLOCK_UNLOCKED); -#else - /* - * Older MIPS cpus have no way of doing an atomic lock - * without some kind of shift to supervisor mode. - */ - - return (_thread_slow_atomic_lock(lock)); -#endif } |