summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorJoel Sing <jsing@cvs.openbsd.org>2011-06-24 12:49:07 +0000
committerJoel Sing <jsing@cvs.openbsd.org>2011-06-24 12:49:07 +0000
commitebccea5450443aa481ea40a5f338960ab1ebb658 (patch)
tree0e8685ecb222db941716a865f7d2a200cb0f40b4 /sys/arch
parentfd4baf40cb2ff68d620d4ab85ffc5da2cc0671e3 (diff)
When reading the value from a simple lock, we do not need to ldcws into a
__cpu_simple_lock_t - gcc most likely ignores the alignment for a stack variable anyway. Also remove unnecessary initialisation. ok kettenis@ miod@
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/hppa/include/lock.h7
-rw-r--r--sys/arch/hppa64/include/lock.h7
2 files changed, 6 insertions, 8 deletions
diff --git a/sys/arch/hppa/include/lock.h b/sys/arch/hppa/include/lock.h
index 6bbe382589c..ab494703aca 100644
--- a/sys/arch/hppa/include/lock.h
+++ b/sys/arch/hppa/include/lock.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: lock.h,v 1.5 2011/03/23 16:54:35 pirofti Exp $ */
+/* $OpenBSD: lock.h,v 1.6 2011/06/24 12:49:06 jsing Exp $ */
/* public domain */
@@ -21,10 +21,9 @@ __cpu_simple_lock_init(__cpu_simple_lock_t *l)
static __inline__ void
__cpu_simple_lock(__cpu_simple_lock_t *l)
{
- __cpu_simple_lock_t old;
+ volatile u_int old;
do {
- old = __SIMPLELOCK_LOCKED;
__asm__ __volatile__
("ldcws 0(%2), %0" : "=&r" (old), "+m" (l) : "r" (l));
} while (old != __SIMPLELOCK_UNLOCKED);
@@ -33,7 +32,7 @@ __cpu_simple_lock(__cpu_simple_lock_t *l)
static __inline__ int
__cpu_simple_lock_try(__cpu_simple_lock_t *l)
{
- __cpu_simple_lock_t old = __SIMPLELOCK_LOCKED;
+ volatile u_int old;
__asm__ __volatile__
("ldcws 0(%2), %0" : "=&r" (old), "+m" (l) : "r" (l));
diff --git a/sys/arch/hppa64/include/lock.h b/sys/arch/hppa64/include/lock.h
index 6bd03940965..c0a92a194b1 100644
--- a/sys/arch/hppa64/include/lock.h
+++ b/sys/arch/hppa64/include/lock.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: lock.h,v 1.3 2011/03/23 16:54:35 pirofti Exp $ */
+/* $OpenBSD: lock.h,v 1.4 2011/06/24 12:49:06 jsing Exp $ */
/* public domain */
@@ -21,10 +21,9 @@ __cpu_simple_lock_init(__cpu_simple_lock_t *l)
static __inline__ void
__cpu_simple_lock(__cpu_simple_lock_t *l)
{
- __cpu_simple_lock_t old;
+ volatile u_int old;
do {
- old = __SIMPLELOCK_LOCKED;
__asm__ __volatile__
("ldcw %1, %0" : "=r" (old), "=m" (l) : "m" (l));
} while (old != __SIMPLELOCK_UNLOCKED);
@@ -33,7 +32,7 @@ __cpu_simple_lock(__cpu_simple_lock_t *l)
static __inline__ int
__cpu_simple_lock_try(__cpu_simple_lock_t *l)
{
- __cpu_simple_lock_t old = __SIMPLELOCK_LOCKED;
+ volatile u_int old;
__asm__ __volatile__
("ldcw %1, %0" : "=r" (old), "=m" (l) : "m" (l));