From bed0f41cd9ce2bd6be5751a952fce38f6fdca95c Mon Sep 17 00:00:00 2001 From: Miod Vallat Date: Fri, 3 Dec 2010 19:44:23 +0000 Subject: Sync with amd64, to allow this to compile without warnings with gcc4: ``Correct the _atomic_lock() asm so that gcc accepts the constraints when compiling without optimization; fix copied from the kernel's atomic.h'' spotted by jim@ --- lib/librthread/arch/i386/_atomic_lock.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib/librthread/arch/i386/_atomic_lock.c') diff --git a/lib/librthread/arch/i386/_atomic_lock.c b/lib/librthread/arch/i386/_atomic_lock.c index d597f81f542..2a42259680e 100644 --- a/lib/librthread/arch/i386/_atomic_lock.c +++ b/lib/librthread/arch/i386/_atomic_lock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: _atomic_lock.c,v 1.2 2006/01/05 22:33:23 marc Exp $ */ +/* $OpenBSD: _atomic_lock.c,v 1.3 2010/12/03 19:44:22 miod Exp $ */ /* David Leonard, . Public domain. */ /* @@ -8,18 +8,18 @@ #include int -_atomic_lock(register volatile _spinlock_lock_t *lock) +_atomic_lock(volatile _spinlock_lock_t *lock) { - register _spinlock_lock_t old; + _spinlock_lock_t old; /* * Use the eXCHanGe instruction to swap the lock value with * a local variable containing the locked state. */ old = _SPINLOCK_LOCKED; - __asm__("xchg %0,%1" - : "=r" (old), "=m" (*lock) - : "0" (old), "1" (*lock)); + __asm__("xchg %0,(%2)" + : "=r" (old) + : "0" (old), "r" (lock)); return (old != _SPINLOCK_UNLOCKED); } -- cgit v1.2.3