summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/amd64/include/mplock.h191
-rw-r--r--sys/arch/i386/i386/lock_machdep.c143
-rw-r--r--sys/arch/i386/include/mplock.h37
-rw-r--r--sys/arch/macppc/include/mplock.h191
-rw-r--r--sys/arch/mvme88k/include/mplock.h191
-rw-r--r--sys/arch/sparc64/include/mplock.h191
-rw-r--r--sys/kern/kern_sched.c4
-rw-r--r--sys/kern/sched_bsd.c6
-rw-r--r--sys/sys/mplock.h184
9 files changed, 961 insertions, 177 deletions
diff --git a/sys/arch/amd64/include/mplock.h b/sys/arch/amd64/include/mplock.h
new file mode 100644
index 00000000000..8f87c895aae
--- /dev/null
+++ b/sys/arch/amd64/include/mplock.h
@@ -0,0 +1,191 @@
+/* $OpenBSD: mplock.h,v 1.1 2007/11/26 17:15:29 art Exp $ */
+
+/*
+ * Copyright (c) 2004 Niklas Hallqvist. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MPLOCK_H_
+#define _MACHINE_MPLOCK_H_
+
+/*
+ * Really simple spinlock implementation with recursive capabilities.
+ * Correctness is paramount, no fancyness allowed.
+ */
+
+struct __mp_lock {
+ __cpu_simple_lock_t mpl_lock;
+ cpuid_t mpl_cpu;
+ int mpl_count;
+};
+
+static __inline void
+__mp_lock_init(struct __mp_lock *lock)
+{
+ __cpu_simple_lock_init(&lock->mpl_lock);
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+}
+
+#if defined(MP_LOCKDEBUG)
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+extern void Debugger(void);
+extern int db_printf(const char *, ...)
+ __attribute__((__format__(__kprintf__,1,2)));
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+static __inline void
+__mp_lock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+#ifndef MP_LOCKDEBUG
+ __cpu_simple_lock(&lock->mpl_lock);
+#else
+ {
+ int got_it;
+ do {
+ int ticks = __mp_lock_spinout;
+
+ do {
+ got_it = __cpu_simple_lock_try(
+ &lock->mpl_lock);
+ } while (!got_it && ticks-- > 0);
+ if (!got_it) {
+ db_printf(
+ "__mp_lock(0x%x): lock spun out",
+ lock);
+ Debugger();
+ }
+ } while (!got_it);
+ }
+#endif
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+}
+
+/*
+ * Try to acquire the lock, if another cpu has it, fill it in the
+ * call-by-reference cpu parameter. Return true if acquired.
+ */
+static __inline int
+__mp_lock_try(struct __mp_lock *lock, cpuid_t *cpu)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+ if (!__cpu_simple_lock_try(&lock->mpl_lock)) {
+ *cpu = lock->mpl_cpu;
+ splx(s);
+ return 0;
+ }
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+ return 1;
+}
+
+static __inline void
+__mp_unlock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf("__mp_unlock(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ if (--lock->mpl_count == 0) {
+ lock->mpl_cpu = LK_NOCPU;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ }
+ splx(s);
+}
+
+static __inline int
+__mp_release_all(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ splx(s);
+ return (rv);
+}
+
+static __inline int
+__mp_release_all_but_one(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count - 1;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all_but_one(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_count = 1;
+ splx(s);
+ return (rv);
+}
+
+static __inline void
+__mp_acquire_count(struct __mp_lock *lock, int count) {
+ int s = spllock();
+
+ while (count--)
+ __mp_lock(lock);
+ splx(s);
+}
+
+static __inline int
+__mp_lock_held(struct __mp_lock *lock) {
+ return lock->mpl_count && lock->mpl_cpu == cpu_number();
+}
+
+#endif /* !_MACHINE_MPLOCK_H */
diff --git a/sys/arch/i386/i386/lock_machdep.c b/sys/arch/i386/i386/lock_machdep.c
index 59c0ff96aff..ef76cb3c16e 100644
--- a/sys/arch/i386/i386/lock_machdep.c
+++ b/sys/arch/i386/i386/lock_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: lock_machdep.c,v 1.6 2007/05/29 18:18:20 tom Exp $ */
+/* $OpenBSD: lock_machdep.c,v 1.7 2007/11/26 17:15:29 art Exp $ */
/* $NetBSD: lock_machdep.c,v 1.1.2.3 2000/05/03 14:40:30 sommerfeld Exp $ */
/*-
@@ -113,3 +113,144 @@ rw_cas_486(volatile unsigned long *p, unsigned long o, unsigned long n)
{
return (i486_atomic_cas_int((u_int *)p, o, n) != o);
}
+
+ void
+__mp_lock_init(struct __mp_lock *lock)
+{
+ lock->mpl_cpu = NULL;
+ lock->mpl_count = 0;
+}
+
+#if defined(MP_LOCKDEBUG)
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+extern void Debugger(void);
+extern int db_printf(const char *, ...)
+ __attribute__((__format__(__kprintf__,1,2)));
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+static __inline void
+__mp_lock_spin(struct __mp_lock *mpl)
+{
+#ifndef MP_LOCKDEBUG
+ while (mpl->mpl_count != 0)
+ SPINLOCK_SPIN_HOOK;
+#else
+ int ticks = __mp_lock_spinout;
+
+ while (mpl->mpl_count != 0 && ticks-- > 0)
+ SPINLOCK_SPIN_HOOK;
+
+ if (ticks == 0) {
+ db_printf("__mp_lock(0x%x): lock spun out", mpl);
+ Debugger();
+ }
+#endif
+}
+
+void
+__mp_lock(struct __mp_lock *mpl)
+{
+ /*
+ * Please notice that mpl_count gets incremented twice for the
+ * first lock. This is on purpose. The way we release the lock
+ * in mp_unlock is to decrement the mpl_count and then check if
+ * the lock should be released. Since mpl_count is what we're
+ * spinning on, decrementing it in mpl_unlock to 0 means that
+ * we can't clear mpl_cpu, because we're no longer holding the
+ * lock. In theory mpl_cpu doesn't need to be cleared, but it's
+ * safer to clear it and besides, setting mpl_count to 2 on the
+ * first lock makes most of this code much simpler.
+ */
+
+ while (1) {
+ int ef = read_eflags();
+
+ disable_intr();
+ if (i486_atomic_cas_int(&mpl->mpl_count, 0, 1) == 0) {
+ mpl->mpl_cpu = curcpu();
+ }
+
+ if (mpl->mpl_cpu == curcpu()) {
+ mpl->mpl_count++;
+ write_eflags(ef);
+ break;
+ }
+ write_eflags(ef);
+
+ __mp_lock_spin(mpl);
+ }
+}
+
+void
+__mp_unlock(struct __mp_lock *mpl)
+{
+ int ef = read_eflags();
+
+#ifdef MP_LOCKDEBUG
+ if (mpl->mpl_cpu != curcpu()) {
+ db_printf("__mp_unlock(%p): not held lock\n", mpl);
+ Debugger();
+ }
+#endif
+
+ disable_intr();
+ if (--mpl->mpl_count == 1) {
+ mpl->mpl_cpu = NULL;
+ mpl->mpl_count = 0;
+ }
+ write_eflags(ef);
+}
+
+int
+__mp_release_all(struct __mp_lock *mpl) {
+ int rv = mpl->mpl_count - 1;
+ int ef = read_eflags();
+
+#ifdef MP_LOCKDEBUG
+ if (mpl->mpl_cpu != curcpu()) {
+ db_printf("__mp_release_all(%p): not held lock\n", mpl);
+ Debugger();
+ }
+#endif
+
+ disable_intr();
+ mpl->mpl_cpu = NULL;
+ mpl->mpl_count = 0;
+ write_eflags(ef);
+
+ return (rv);
+}
+
+int
+__mp_release_all_but_one(struct __mp_lock *mpl) {
+ int rv = mpl->mpl_count - 2;
+
+#ifdef MP_LOCKDEBUG
+ if (mpl->mpl_cpu != curcpu()) {
+ db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
+ Debugger();
+ }
+#endif
+
+ mpl->mpl_count = 2;
+
+ return (rv);
+}
+
+void
+__mp_acquire_count(struct __mp_lock *mpl, int count) {
+ while (count--)
+ __mp_lock(mpl);
+}
+
+int
+__mp_lock_held(struct __mp_lock *mpl) {
+ return mpl->mpl_cpu == curcpu();
+}
+
diff --git a/sys/arch/i386/include/mplock.h b/sys/arch/i386/include/mplock.h
new file mode 100644
index 00000000000..e1069d94e23
--- /dev/null
+++ b/sys/arch/i386/include/mplock.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2007 Artur Grabowski <art@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MACHINE_MPLOCK_H_
+#define _MACHINE_MPLOCK_H_
+
+struct __mp_lock {
+ volatile struct cpu_info *mpl_cpu;
+ volatile int mpl_count;
+};
+
+#ifndef _LOCORE
+
+void __mp_lock_init(struct __mp_lock *);
+void __mp_lock(struct __mp_lock *);
+void __mp_unlock(struct __mp_lock *);
+int __mp_release_all(struct __mp_lock *);
+int __mp_release_all_but_one(struct __mp_lock *);
+void __mp_acquire_count(struct __mp_lock *, int);
+int __mp_lock_held(struct __mp_lock *);
+
+#endif
+
+#endif
diff --git a/sys/arch/macppc/include/mplock.h b/sys/arch/macppc/include/mplock.h
new file mode 100644
index 00000000000..8f87c895aae
--- /dev/null
+++ b/sys/arch/macppc/include/mplock.h
@@ -0,0 +1,191 @@
+/* $OpenBSD: mplock.h,v 1.1 2007/11/26 17:15:29 art Exp $ */
+
+/*
+ * Copyright (c) 2004 Niklas Hallqvist. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MPLOCK_H_
+#define _MACHINE_MPLOCK_H_
+
+/*
+ * Really simple spinlock implementation with recursive capabilities.
+ * Correctness is paramount, no fancyness allowed.
+ */
+
+struct __mp_lock {
+ __cpu_simple_lock_t mpl_lock;
+ cpuid_t mpl_cpu;
+ int mpl_count;
+};
+
+static __inline void
+__mp_lock_init(struct __mp_lock *lock)
+{
+ __cpu_simple_lock_init(&lock->mpl_lock);
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+}
+
+#if defined(MP_LOCKDEBUG)
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+extern void Debugger(void);
+extern int db_printf(const char *, ...)
+ __attribute__((__format__(__kprintf__,1,2)));
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+static __inline void
+__mp_lock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+#ifndef MP_LOCKDEBUG
+ __cpu_simple_lock(&lock->mpl_lock);
+#else
+ {
+ int got_it;
+ do {
+ int ticks = __mp_lock_spinout;
+
+ do {
+ got_it = __cpu_simple_lock_try(
+ &lock->mpl_lock);
+ } while (!got_it && ticks-- > 0);
+ if (!got_it) {
+ db_printf(
+ "__mp_lock(0x%x): lock spun out",
+ lock);
+ Debugger();
+ }
+ } while (!got_it);
+ }
+#endif
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+}
+
+/*
+ * Try to acquire the lock, if another cpu has it, fill it in the
+ * call-by-reference cpu parameter. Return true if acquired.
+ */
+static __inline int
+__mp_lock_try(struct __mp_lock *lock, cpuid_t *cpu)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+ if (!__cpu_simple_lock_try(&lock->mpl_lock)) {
+ *cpu = lock->mpl_cpu;
+ splx(s);
+ return 0;
+ }
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+ return 1;
+}
+
+static __inline void
+__mp_unlock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf("__mp_unlock(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ if (--lock->mpl_count == 0) {
+ lock->mpl_cpu = LK_NOCPU;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ }
+ splx(s);
+}
+
+static __inline int
+__mp_release_all(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ splx(s);
+ return (rv);
+}
+
+static __inline int
+__mp_release_all_but_one(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count - 1;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all_but_one(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_count = 1;
+ splx(s);
+ return (rv);
+}
+
+static __inline void
+__mp_acquire_count(struct __mp_lock *lock, int count) {
+ int s = spllock();
+
+ while (count--)
+ __mp_lock(lock);
+ splx(s);
+}
+
+static __inline int
+__mp_lock_held(struct __mp_lock *lock) {
+ return lock->mpl_count && lock->mpl_cpu == cpu_number();
+}
+
+#endif /* !_MACHINE_MPLOCK_H */
diff --git a/sys/arch/mvme88k/include/mplock.h b/sys/arch/mvme88k/include/mplock.h
new file mode 100644
index 00000000000..8f87c895aae
--- /dev/null
+++ b/sys/arch/mvme88k/include/mplock.h
@@ -0,0 +1,191 @@
+/* $OpenBSD: mplock.h,v 1.1 2007/11/26 17:15:29 art Exp $ */
+
+/*
+ * Copyright (c) 2004 Niklas Hallqvist. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MPLOCK_H_
+#define _MACHINE_MPLOCK_H_
+
+/*
+ * Really simple spinlock implementation with recursive capabilities.
+ * Correctness is paramount, no fancyness allowed.
+ */
+
+struct __mp_lock {
+ __cpu_simple_lock_t mpl_lock;
+ cpuid_t mpl_cpu;
+ int mpl_count;
+};
+
+static __inline void
+__mp_lock_init(struct __mp_lock *lock)
+{
+ __cpu_simple_lock_init(&lock->mpl_lock);
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+}
+
+#if defined(MP_LOCKDEBUG)
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+extern void Debugger(void);
+extern int db_printf(const char *, ...)
+ __attribute__((__format__(__kprintf__,1,2)));
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+static __inline void
+__mp_lock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+#ifndef MP_LOCKDEBUG
+ __cpu_simple_lock(&lock->mpl_lock);
+#else
+ {
+ int got_it;
+ do {
+ int ticks = __mp_lock_spinout;
+
+ do {
+ got_it = __cpu_simple_lock_try(
+ &lock->mpl_lock);
+ } while (!got_it && ticks-- > 0);
+ if (!got_it) {
+ db_printf(
+ "__mp_lock(0x%x): lock spun out",
+ lock);
+ Debugger();
+ }
+ } while (!got_it);
+ }
+#endif
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+}
+
+/*
+ * Try to acquire the lock, if another cpu has it, fill it in the
+ * call-by-reference cpu parameter. Return true if acquired.
+ */
+static __inline int
+__mp_lock_try(struct __mp_lock *lock, cpuid_t *cpu)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+ if (!__cpu_simple_lock_try(&lock->mpl_lock)) {
+ *cpu = lock->mpl_cpu;
+ splx(s);
+ return 0;
+ }
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+ return 1;
+}
+
+static __inline void
+__mp_unlock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf("__mp_unlock(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ if (--lock->mpl_count == 0) {
+ lock->mpl_cpu = LK_NOCPU;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ }
+ splx(s);
+}
+
+static __inline int
+__mp_release_all(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ splx(s);
+ return (rv);
+}
+
+static __inline int
+__mp_release_all_but_one(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count - 1;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all_but_one(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_count = 1;
+ splx(s);
+ return (rv);
+}
+
+static __inline void
+__mp_acquire_count(struct __mp_lock *lock, int count) {
+ int s = spllock();
+
+ while (count--)
+ __mp_lock(lock);
+ splx(s);
+}
+
+static __inline int
+__mp_lock_held(struct __mp_lock *lock) {
+ return lock->mpl_count && lock->mpl_cpu == cpu_number();
+}
+
+#endif /* !_MACHINE_MPLOCK_H */
diff --git a/sys/arch/sparc64/include/mplock.h b/sys/arch/sparc64/include/mplock.h
new file mode 100644
index 00000000000..8f87c895aae
--- /dev/null
+++ b/sys/arch/sparc64/include/mplock.h
@@ -0,0 +1,191 @@
+/* $OpenBSD: mplock.h,v 1.1 2007/11/26 17:15:29 art Exp $ */
+
+/*
+ * Copyright (c) 2004 Niklas Hallqvist. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_MPLOCK_H_
+#define _MACHINE_MPLOCK_H_
+
+/*
+ * Really simple spinlock implementation with recursive capabilities.
+ * Correctness is paramount, no fancyness allowed.
+ */
+
+struct __mp_lock {
+ __cpu_simple_lock_t mpl_lock;
+ cpuid_t mpl_cpu;
+ int mpl_count;
+};
+
+static __inline void
+__mp_lock_init(struct __mp_lock *lock)
+{
+ __cpu_simple_lock_init(&lock->mpl_lock);
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+}
+
+#if defined(MP_LOCKDEBUG)
+#ifndef DDB
+#error "MP_LOCKDEBUG requires DDB"
+#endif
+
+extern void Debugger(void);
+extern int db_printf(const char *, ...)
+ __attribute__((__format__(__kprintf__,1,2)));
+
+/* CPU-dependent timing, needs this to be settable from ddb. */
+extern int __mp_lock_spinout;
+#endif
+
+static __inline void
+__mp_lock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+#ifndef MP_LOCKDEBUG
+ __cpu_simple_lock(&lock->mpl_lock);
+#else
+ {
+ int got_it;
+ do {
+ int ticks = __mp_lock_spinout;
+
+ do {
+ got_it = __cpu_simple_lock_try(
+ &lock->mpl_lock);
+ } while (!got_it && ticks-- > 0);
+ if (!got_it) {
+ db_printf(
+ "__mp_lock(0x%x): lock spun out",
+ lock);
+ Debugger();
+ }
+ } while (!got_it);
+ }
+#endif
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+}
+
+/*
+ * Try to acquire the lock, if another cpu has it, fill it in the
+ * call-by-reference cpu parameter. Return true if acquired.
+ */
+static __inline int
+__mp_lock_try(struct __mp_lock *lock, cpuid_t *cpu)
+{
+ int s = spllock();
+
+ if (lock->mpl_cpu != cpu_number()) {
+ if (!__cpu_simple_lock_try(&lock->mpl_lock)) {
+ *cpu = lock->mpl_cpu;
+ splx(s);
+ return 0;
+ }
+ lock->mpl_cpu = cpu_number();
+ }
+ lock->mpl_count++;
+ splx(s);
+ return 1;
+}
+
+static __inline void
+__mp_unlock(struct __mp_lock *lock)
+{
+ int s = spllock();
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf("__mp_unlock(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ if (--lock->mpl_count == 0) {
+ lock->mpl_cpu = LK_NOCPU;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ }
+ splx(s);
+}
+
+static __inline int
+__mp_release_all(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_cpu = LK_NOCPU;
+ lock->mpl_count = 0;
+ __cpu_simple_unlock(&lock->mpl_lock);
+ splx(s);
+ return (rv);
+}
+
+static __inline int
+__mp_release_all_but_one(struct __mp_lock *lock) {
+ int s = spllock();
+ int rv = lock->mpl_count - 1;
+
+#ifdef MP_LOCKDEBUG
+ if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
+ db_printf(
+ "__mp_release_all_but_one(0x%x): releasing not locked lock\n",
+ lock);
+ Debugger();
+ }
+#endif
+
+ lock->mpl_count = 1;
+ splx(s);
+ return (rv);
+}
+
+static __inline void
+__mp_acquire_count(struct __mp_lock *lock, int count) {
+ int s = spllock();
+
+ while (count--)
+ __mp_lock(lock);
+ splx(s);
+}
+
+static __inline int
+__mp_lock_held(struct __mp_lock *lock) {
+ return lock->mpl_count && lock->mpl_cpu == cpu_number();
+}
+
+#endif /* !_MACHINE_MPLOCK_H */
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index 83c7240c739..84a66fa7510 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sched.c,v 1.1 2007/10/10 15:53:53 art Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.2 2007/11/26 17:15:29 art Exp $ */
/*
* Copyright (c) 2007 Artur Grabowski <art@openbsd.org>
*
@@ -175,7 +175,7 @@ sched_init_runqueues(void)
TAILQ_INIT(&sched_qs[i]);
#ifdef MULTIPROCESSOR
- SIMPLE_LOCK_INIT(&sched_lock);
+ __mp_lock_init(&sched_lock);
#endif
}
diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c
index 1954f3a3ce7..80d418201ba 100644
--- a/sys/kern/sched_bsd.c
+++ b/sys/kern/sched_bsd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched_bsd.c,v 1.14 2007/10/11 10:34:08 art Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.15 2007/11/26 17:15:29 art Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -57,7 +57,9 @@
int lbolt; /* once a second sleep address */
int rrticks_init; /* # of hardclock ticks per roundrobin() */
-struct SIMPLELOCK sched_lock;
+#ifdef MULTIPROCESSOR
+struct __mp_lock sched_lock;
+#endif
void scheduler_start(void);
diff --git a/sys/sys/mplock.h b/sys/sys/mplock.h
index 0357c4ef48b..92dc8066bdf 100644
--- a/sys/sys/mplock.h
+++ b/sys/sys/mplock.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: mplock.h,v 1.8 2005/06/17 22:33:34 niklas Exp $ */
+/* $OpenBSD: mplock.h,v 1.9 2007/11/26 17:15:29 art Exp $ */
/*
* Copyright (c) 2004 Niklas Hallqvist. All rights reserved.
@@ -27,181 +27,21 @@
#ifndef _MPLOCK_H_
#define _MPLOCK_H_
+#ifdef notyet
/*
- * Really simple spinlock implementation with recursive capabilities.
- * Correctness is paramount, no fancyness allowed.
+ * Enable the prototypes once the architectures stop playing around
+ * with inlines.
*/
-
-struct __mp_lock {
- __cpu_simple_lock_t mpl_lock;
- cpuid_t mpl_cpu;
- int mpl_count;
-};
-
-static __inline void __mp_lock_init(struct __mp_lock *);
-static __inline void __mp_lock(struct __mp_lock *);
-static __inline void __mp_unlock(struct __mp_lock *);
-static __inline int __mp_release_all(struct __mp_lock *);
-static __inline void __mp_acquire_count(struct __mp_lock *, int);
-static __inline int __mp_lock_held(struct __mp_lock *);
-
-/*
- * XXX Simplelocks macros used at "trusted" places.
- */
-#define SIMPLELOCK __mp_lock
-#define SIMPLE_LOCK_INIT __mp_lock_init
-#define SIMPLE_LOCK __mp_lock
-#define SIMPLE_UNLOCK __mp_unlock
-
-static __inline void
-__mp_lock_init(struct __mp_lock *lock)
-{
- __cpu_simple_lock_init(&lock->mpl_lock);
- lock->mpl_cpu = LK_NOCPU;
- lock->mpl_count = 0;
-}
-
-#if defined(MP_LOCKDEBUG)
-#ifndef DDB
-#error "MP_LOCKDEBUG requires DDB"
-#endif
-
-extern void Debugger(void);
-extern int db_printf(const char *, ...)
- __attribute__((__format__(__kprintf__,1,2)));
-
-/* CPU-dependent timing, needs this to be settable from ddb. */
-extern int __mp_lock_spinout;
-#endif
-
-static __inline void
-__mp_lock(struct __mp_lock *lock)
-{
- int s = spllock();
-
- if (lock->mpl_cpu != cpu_number()) {
-#ifndef MP_LOCKDEBUG
- __cpu_simple_lock(&lock->mpl_lock);
-#else
- {
- int got_it;
- do {
- int ticks = __mp_lock_spinout;
-
- do {
- got_it = __cpu_simple_lock_try(
- &lock->mpl_lock);
- } while (!got_it && ticks-- > 0);
- if (!got_it) {
- db_printf(
- "__mp_lock(0x%x): lock spun out",
- lock);
- Debugger();
- }
- } while (!got_it);
- }
-#endif
- lock->mpl_cpu = cpu_number();
- }
- lock->mpl_count++;
- splx(s);
-}
-
-/*
- * Try to acquire the lock, if another cpu has it, fill it in the
- * call-by-reference cpu parameter. Return true if acquired.
- */
-static __inline int
-__mp_lock_try(struct __mp_lock *lock, cpuid_t *cpu)
-{
- int s = spllock();
-
- if (lock->mpl_cpu != cpu_number()) {
- if (!__cpu_simple_lock_try(&lock->mpl_lock)) {
- *cpu = lock->mpl_cpu;
- splx(s);
- return 0;
- }
- lock->mpl_cpu = cpu_number();
- }
- lock->mpl_count++;
- splx(s);
- return 1;
-}
-
-static __inline void
-__mp_unlock(struct __mp_lock *lock)
-{
- int s = spllock();
-
-#ifdef MP_LOCKDEBUG
- if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
- db_printf("__mp_unlock(0x%x): releasing not locked lock\n",
- lock);
- Debugger();
- }
-#endif
-
- if (--lock->mpl_count == 0) {
- lock->mpl_cpu = LK_NOCPU;
- __cpu_simple_unlock(&lock->mpl_lock);
- }
- splx(s);
-}
-
-static __inline int
-__mp_release_all(struct __mp_lock *lock) {
- int s = spllock();
- int rv = lock->mpl_count;
-
-#ifdef MP_LOCKDEBUG
- if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
- db_printf(
- "__mp_release_all(0x%x): releasing not locked lock\n",
- lock);
- Debugger();
- }
-#endif
-
- lock->mpl_cpu = LK_NOCPU;
- lock->mpl_count = 0;
- __cpu_simple_unlock(&lock->mpl_lock);
- splx(s);
- return (rv);
-}
-
-static __inline int
-__mp_release_all_but_one(struct __mp_lock *lock) {
- int s = spllock();
- int rv = lock->mpl_count - 1;
-
-#ifdef MP_LOCKDEBUG
- if (lock->mpl_count == 0 || lock->mpl_cpu == LK_NOCPU) {
- db_printf(
- "__mp_release_all_but_one(0x%x): releasing not locked lock\n",
- lock);
- Debugger();
- }
+void __mp_lock_init(struct __mp_lock *);
+void __mp_lock(struct __mp_lock *);
+void __mp_unlock(struct __mp_lock *);
+int __mp_release_all(struct __mp_lock *);
+int __mp_release_all_but_one(struct __mp_lock *);
+void __mp_acquire_count(struct __mp_lock *, int);
+int __mp_lock_held(struct __mp_lock *);
#endif
- lock->mpl_count = 1;
- splx(s);
- return (rv);
-}
-
-static __inline void
-__mp_acquire_count(struct __mp_lock *lock, int count) {
- int s = spllock();
-
- while (count--)
- __mp_lock(lock);
- splx(s);
-}
-
-static __inline int
-__mp_lock_held(struct __mp_lock *lock) {
- return lock->mpl_count && lock->mpl_cpu == cpu_number();
-}
+#include <machine/mplock.h>
extern struct __mp_lock kernel_lock;