summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2007-10-27 20:35:22 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2007-10-27 20:35:22 +0000
commit46e1c801d7a9835e871c570047964806aa7e6485 (patch)
tree9c9da3e7deb97c9f2e8c07ee64ebc115c4029194 /sys/arch
parent39826bed35d5224e115fe5fafa745276767e86a3 (diff)
In __cpu_simple_lock() and __cpu_simple_lock_try(), use a local u_int instead
of a local __cpu_simple_lock_t (which is volatile), so that the compiler can optimize it to a register, instead of using a memory location (and doing stores into it when __cpu_simple_lock() is spinning). This makes the MP code a bit smaller and a bit faster.
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/m88k/include/lock.h17
1 files changed, 14 insertions, 3 deletions
diff --git a/sys/arch/m88k/include/lock.h b/sys/arch/m88k/include/lock.h
index 7d377703b7a..baa0439d728 100644
--- a/sys/arch/m88k/include/lock.h
+++ b/sys/arch/m88k/include/lock.h
@@ -1,6 +1,6 @@
#ifndef _M88K_LOCK_H_
#define _M88K_LOCK_H_
-/* $OpenBSD: lock.h,v 1.3 2007/05/19 16:58:43 miod Exp $ */
+/* $OpenBSD: lock.h,v 1.4 2007/10/27 20:35:21 miod Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat.
@@ -44,7 +44,13 @@ __cpu_simple_lock_init(__cpu_simple_lock_t *l)
static __inline__ void
__cpu_simple_lock(__cpu_simple_lock_t *l)
{
- __cpu_simple_lock_t old;
+ /*
+ * The local __cpu_simple_lock_t is not declared volatile, so that
+ * stores to it can be optimized away, since we will use a register
+ * and only spin on it. xmem will do the right thing regardless of
+ * the volatile qualifier.
+ */
+ u_int old;
do {
old = __SIMPLELOCK_LOCKED;
@@ -56,7 +62,12 @@ __cpu_simple_lock(__cpu_simple_lock_t *l)
static __inline__ int
__cpu_simple_lock_try(__cpu_simple_lock_t *l)
{
- __cpu_simple_lock_t old = __SIMPLELOCK_LOCKED;
+ /*
+ * The local __cpu_simple_lock_t is not declared volatile, so that
+ * there are not pipeline synchronization around stores to it.
+ * xmem will do the right thing regardless of the volatile qualifier.
+ */
+ u_int old = __SIMPLELOCK_LOCKED;
__asm__ __volatile__
("xmem %0, %1, r0" : "+r" (old) : "r" (l));