diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/libpthread/arch/mips64/_atomic_lock.c | 52 | ||||
-rw-r--r-- | lib/librthread/arch/mips64/_atomic_lock.c | 37 |
2 files changed, 19 insertions, 70 deletions
diff --git a/lib/libpthread/arch/mips64/_atomic_lock.c b/lib/libpthread/arch/mips64/_atomic_lock.c index cb5b42a1162..e85335f7466 100644 --- a/lib/libpthread/arch/mips64/_atomic_lock.c +++ b/lib/libpthread/arch/mips64/_atomic_lock.c @@ -1,58 +1,24 @@ -/* $OpenBSD: _atomic_lock.c,v 1.2 2009/02/15 17:48:58 deraadt Exp $ */ - -/* Public domain. Written by David Leonard */ +/* $OpenBSD: _atomic_lock.c,v 1.3 2009/06/01 23:17:52 miod Exp $ */ /* * Atomic lock for mips + * Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain. */ -#include "pthread.h" -#include "pthread_private.h" #include "spinlock.h" -#include <signal.h> -/* - * uthread atomic lock: - * attempt to acquire a lock (by giving it a non-zero value). - * Return zero on success, or the lock's value on failure - */ int _atomic_lock(volatile _spinlock_lock_t *lock) { -#if __mips >= 2 _spinlock_lock_t old; - _spinlock_lock_t temp; - do { - /* - * On a mips2 machine and above, we can use ll/sc. - * Read the lock and tag the cache line with a 'load linked' - * instruction. (Register 17 (LLAddr) will hold the - * physical address of lock for diagnostic purposes); - * (Under pathologically heavy swapping, the physaddr may - * change! XXX) - */ - __asm__("ll %0, %1" : "=r"(old) : "m"(*lock)); - if (old != _SPINLOCK_UNLOCKED) - break; /* already locked */ - /* - * Try and store a 1 at the tagged lock address. If - * anyone else has since written it, the tag on the cache - * line will have been wiped, and temp will be set to zero - * by the 'store conditional' instruction. - */ - temp = _SPINLOCK_LOCKED; - __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock) - : "0"(temp)); - } while (temp == 0); + __asm__ __volatile__ ( + "1: ll %0, 0(%1)\n" + " sc %2, 0(%1)\n" + " beqz %2, 1b\n" + " nop\n" : + "=r"(old) : + "r"(lock), "r"(_SPINLOCK_LOCKED) : "memory"); return (old != _SPINLOCK_UNLOCKED); -#else - /* - * Older MIPS cpus have no way of doing an atomic lock - * without some kind of shift to supervisor mode. - */ - - return (_thread_slow_atomic_lock(lock)); -#endif } diff --git a/lib/librthread/arch/mips64/_atomic_lock.c b/lib/librthread/arch/mips64/_atomic_lock.c index e521de67114..3e1c4a07a3f 100644 --- a/lib/librthread/arch/mips64/_atomic_lock.c +++ b/lib/librthread/arch/mips64/_atomic_lock.c @@ -1,41 +1,24 @@ -/* $OpenBSD: _atomic_lock.c,v 1.3 2009/02/15 17:48:58 deraadt Exp $ */ - -/* Public domain. Written by David Loenard */ +/* $OpenBSD: _atomic_lock.c,v 1.4 2009/06/01 23:17:53 miod Exp $ */ /* * Atomic lock for mips + * Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain. */ -#include <spinlock.h> +#include "spinlock.h" int _atomic_lock(volatile _spinlock_lock_t *lock) { _spinlock_lock_t old; - _spinlock_lock_t temp; - do { - /* - * On a mips2 machine and above, we can use ll/sc. - * Read the lock and tag the cache line with a 'load linked' - * instruction. (Register 17 (LLAddr) will hold the - * physical address of lock for diagnostic purposes); - * (Under pathologically heavy swapping, the physaddr may - * change! XXX) - */ - __asm__("ll %0, %1" : "=r"(old) : "m"(*lock)); - if (old != _SPINLOCK_UNLOCKED) - break; /* already locked */ - /* - * Try and store a 1 at the tagged lock address. If - * anyone else has since written it, the tag on the cache - * line will have been wiped, and temp will be set to zero - * by the 'store conditional' instruction. - */ - temp = _SPINLOCK_LOCKED; - __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock) - : "0"(temp)); - } while (temp == _SPINLOCK_UNLOCKED); + __asm__ __volatile__ ( + "1: ll %0, 0(%1)\n" + " sc %2, 0(%1)\n" + " beqz %2, 1b\n" + " nop\n" : + "=r"(old) : + "r"(lock), "r"(_SPINLOCK_LOCKED) : "memory"); return (old != _SPINLOCK_UNLOCKED); } |