summaryrefslogtreecommitdiff
path: root/sys/arch/sparc64/include
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2007-11-26 17:15:30 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2007-11-26 17:15:30 +0000
commit1c8d182d996ce78179d1c611b6cb57cc4f2a7449 (patch)
tree9a4067a51f60221707178e273d2b787ad24914c3 /sys/arch/sparc64/include
parent36460865e8900119b203ce0444196799e44ade5b (diff)
Move the implementation of __mp_lock (biglock) into machine dependent
code. At this moment all architectures get the copy of the old code except i386 which gets a new shiny implementation that doesn't spin at splhigh (doh!) and doesn't try to grab the biglock when releasing the biglock (double doh!). Shaves 10% of system time during kernel compile and might solve a few bugs as a bonus. Other architectures coming shortly. miod@ deraadt@ ok
Diffstat (limited to 'sys/arch/sparc64/include')
-rw-r--r--sys/arch/sparc64/include/mplock.h191
1 files changed, 191 insertions, 0 deletions
diff --git a/sys/arch/sparc64/include/mplock.h b/sys/arch/sparc64/include/mplock.h
new file mode 100644
index 00000000000..8f87c895aae
--- /dev/null
+++ b/sys/arch/sparc64/include/mplock.h
@@ -0,0 +1,191 @@
+/* $OpenBSD: mplock.h,v 1.1 2007/11/26 17:15:29 art Exp $ */
+
+/*
+ * Copyright (c) 2004 Niklas Hallqvist. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MPLOCK_H_
+#define _MACHINE_MPLOCK_H_
+
+/*
+ * Really simple spinlock implementation with recursive capabilities.
+ * Correctness is paramount, no fancyness allowed.
+ */
+
+struct __mp_lock {
+ __cpu_simple_lock_t mpl_lock;
+ cpuid_t mpl_cpu;
+ int mpl_count;
+};
+
+static __inline void
+__mp_lock_init(struct __mp_lock *lock)
+{
+ __cpu_simple_lock_init(&lock->mpl_lock);
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+}
+
+#if defined(MP_LOCKDEBUG)
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+extern void Debugger(void);
+extern int db_printf(const char *, ...)
+ __attribute__((__format__(__kprintf__,1,2)));
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+static __inline void
+__mp_lock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+#ifndef MP_LOCKDEBUG
+ __cpu_simple_lock(&lock->mpl_lock);
+#else
+ {
+ int got_it;
+ do {
+ int ticks = __mp_lock_spinout;
+
+ do {
+ got_it = __cpu_simple_lock_try(
+ &lock->mpl_lock);
+ } while (!got_it && ticks-- > 0);
+ if (!got_it) {
+ db_printf(
+ "__mp_lock(0x%x): lock spun out",
+ lock);
+ Debugger();
+ }
+ } while (!got_it);
+ }
+#endif
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+}
+
+/*
+ * Try to acquire the lock, if another cpu has it, fill it in the
+ * call-by-reference cpu parameter. Return true if acquired.
+ */
+static __inline int
+__mp_lock_try(struct __mp_lock *lock, cpuid_t *cpu)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+ if (!__cpu_simple_lock_try(&lock->mpl_lock)) {
+ *cpu = lock->mpl_cpu;
+ splx(s);
+ return 0;
+ }
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+ return 1;
+}
+
+static __inline void
+__mp_unlock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf("__mp_unlock(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ if (--lock->mpl_count == 0) {
+ lock->mpl_cpu = LK_NOCPU;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ }
+ splx(s);
+}
+
+static __inline int
+__mp_release_all(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ splx(s);
+ return (rv);
+}
+
+static __inline int
+__mp_release_all_but_one(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count - 1;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all_but_one(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_count = 1;
+ splx(s);
+ return (rv);
+}
+
+static __inline void
+__mp_acquire_count(struct __mp_lock *lock, int count) {
+ int s = spllock();
+
+ while (count--)
+ __mp_lock(lock);
+ splx(s);
+}
+
+static __inline int
+__mp_lock_held(struct __mp_lock *lock) {
+ return lock->mpl_count && lock->mpl_cpu == cpu_number();
+}
+
+#endif /* !_MACHINE_MPLOCK_H */