diff options
author | Joel Sing <jsing@cvs.openbsd.org> | 2010-03-25 14:26:22 +0000 |
---|---|---|
committer | Joel Sing <jsing@cvs.openbsd.org> | 2010-03-25 14:26:22 +0000 |
commit | a48499d79004ef87ba1b40b37b59d92a7ba04ef2 (patch) | |
tree | c636f26e8edd668bd0b20f1eb8b871a140ad1e5a /sys | |
parent | 4a5e49e37289163dd98d7aa1d9bb91b541c9863c (diff) |
Add MP lock support for OpenBSD/hppa.
ok kettenis@
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/hppa/conf/files.hppa | 3 | ||||
-rw-r--r-- | sys/arch/hppa/hppa/lock_machdep.c | 203 | ||||
-rw-r--r-- | sys/arch/hppa/include/mplock.h | 56 |
3 files changed, 261 insertions, 1 deletions
diff --git a/sys/arch/hppa/conf/files.hppa b/sys/arch/hppa/conf/files.hppa index 88d641f032d..dfbb5dc7609 100644 --- a/sys/arch/hppa/conf/files.hppa +++ b/sys/arch/hppa/conf/files.hppa @@ -1,4 +1,4 @@ -# $OpenBSD: files.hppa,v 1.80 2008/07/16 16:32:08 miod Exp $ +# $OpenBSD: files.hppa,v 1.81 2010/03/25 14:26:21 jsing Exp $ # # hppa-specific configuration info @@ -307,6 +307,7 @@ file arch/hppa/hppa/conf.c file arch/hppa/hppa/db_interface.c ddb file arch/hppa/hppa/db_disasm.c ddb file arch/hppa/hppa/disksubr.c disk +file arch/hppa/hppa/lock_machdep.c multiprocessor file arch/hppa/hppa/machdep.c file arch/hppa/hppa/mutex.c file arch/hppa/hppa/pmap.c diff --git a/sys/arch/hppa/hppa/lock_machdep.c b/sys/arch/hppa/hppa/lock_machdep.c new file mode 100644 index 00000000000..1480ae45f6d --- /dev/null +++ b/sys/arch/hppa/hppa/lock_machdep.c @@ -0,0 +1,203 @@ +/* $OpenBSD: lock_machdep.c,v 1.1 2010/03/25 14:26:21 jsing Exp $ */ + +/* + * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <sys/param.h> +#include <sys/lock.h> +#include <sys/systm.h> + +#include <machine/atomic.h> +#include <machine/intr.h> +#include <machine/lock.h> +#include <machine/psl.h> + +#include <ddb/db_output.h> + +static __inline int +__cpu_cas(struct __mp_lock *mpl, volatile unsigned long *addr, + unsigned long old, unsigned long new) +{ + volatile int *lock = (int *)(((vaddr_t)mpl->mpl_lock + 0xf) & ~0xf); + volatile register_t locked = 0; + int ret = 1; + + /* Note: lock must be 16-byte aligned. */ + asm volatile ( + "ldcws 0(%2), %0" + : "=&r" (locked), "+m" (lock) + : "r" (lock) + ); + + if (locked == MPL_LOCKED) { + if (*addr == old) { + *addr = new; + asm("sync" ::: "memory"); + ret = 0; + } + *lock = MPL_UNLOCKED; + } + + return ret; +} + +void +__mp_lock_init(struct __mp_lock *lock) +{ + lock->mpl_lock[0] = MPL_UNLOCKED; + lock->mpl_lock[1] = MPL_UNLOCKED; + lock->mpl_lock[2] = MPL_UNLOCKED; + lock->mpl_lock[3] = MPL_UNLOCKED; + lock->mpl_cpu = NULL; + lock->mpl_count = 0; +} + +#if defined(MP_LOCKDEBUG) +#ifndef DDB +#error "MP_LOCKDEBUG requires DDB" +#endif + +/* CPU-dependent timing, this needs to be settable from ddb. */ +extern int __mp_lock_spinout; +#endif + +#define SPINLOCK_SPIN_HOOK /**/ + +static __inline void +__mp_lock_spin(struct __mp_lock *mpl) +{ +#ifndef MP_LOCKDEBUG + while (mpl->mpl_count != 0) + SPINLOCK_SPIN_HOOK; +#else + int ticks = __mp_lock_spinout; + + while (mpl->mpl_count != 0 && ticks-- > 0) + SPINLOCK_SPIN_HOOK; + + if (ticks == 0) { + db_printf("__mp_lock(0x%x): lock spun out", mpl); + Debugger(); + } +#endif +} + +void +__mp_lock(struct __mp_lock *mpl) +{ + int s; + + /* + * Please notice that mpl_count gets incremented twice for the + * first lock. This is on purpose. The way we release the lock + * in mp_unlock is to decrement the mpl_count and then check if + * the lock should be released. Since mpl_count is what we're + * spinning on, decrementing it in mpl_unlock to 0 means that + * we can't clear mpl_cpu, because we're no longer holding the + * lock. In theory mpl_cpu doesn't need to be cleared, but it's + * safer to clear it and besides, setting mpl_count to 2 on the + * first lock makes most of this code much simpler. + */ + + while (1) { + s = hppa_intr_disable(); + if (__cpu_cas(mpl, &mpl->mpl_count, 0, 1) == 0) { + __asm __volatile("sync" ::: "memory"); + mpl->mpl_cpu = curcpu(); + } + if (mpl->mpl_cpu == curcpu()) { + mpl->mpl_count++; + hppa_intr_enable(s); + break; + } + hppa_intr_enable(s); + + __mp_lock_spin(mpl); + } +} + +void +__mp_unlock(struct __mp_lock *mpl) +{ + int s; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_unlock(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + s = hppa_intr_disable(); + if (--mpl->mpl_count == 1) { + mpl->mpl_cpu = NULL; + __asm __volatile("sync" ::: "memory"); + mpl->mpl_count = 0; + } + hppa_intr_enable(s); +} + +int +__mp_release_all(struct __mp_lock *mpl) +{ + int rv = mpl->mpl_count - 1; + int s; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_release_all(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + s = hppa_intr_disable(); + mpl->mpl_cpu = NULL; + __asm __volatile("sync" ::: "memory"); + mpl->mpl_count = 0; + hppa_intr_enable(s); + + return (rv); +} + +int +__mp_release_all_but_one(struct __mp_lock *mpl) +{ + int rv = mpl->mpl_count - 2; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + mpl->mpl_count = 2; + + return (rv); +} + +void +__mp_acquire_count(struct __mp_lock *mpl, int count) +{ + while (count--) + __mp_lock(mpl); +} + +int +__mp_lock_held(struct __mp_lock *mpl) +{ + return mpl->mpl_cpu == curcpu(); +} diff --git a/sys/arch/hppa/include/mplock.h b/sys/arch/hppa/include/mplock.h new file mode 100644 index 00000000000..ec5bb03c16d --- /dev/null +++ b/sys/arch/hppa/include/mplock.h @@ -0,0 +1,56 @@ +/* $OpenBSD: mplock.h,v 1.1 2010/03/25 14:26:21 jsing Exp $ */ + +/* + * Copyright (c) 2004 Niklas Hallqvist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_MPLOCK_H_ +#define _MACHINE_MPLOCK_H_ + +/* + * Really simple spinlock implementation with recursive capabilities. + * Correctness is paramount, no fancyness allowed. + */ + +#define MPL_LOCKED 0 +#define MPL_UNLOCKED 1 + +struct __mp_lock { + volatile int mpl_lock[4]; + volatile struct cpu_info *mpl_cpu; + volatile long mpl_count; +}; + +#ifndef _LOCORE + +void __mp_lock_init(struct __mp_lock *); +void __mp_lock(struct __mp_lock *); +void __mp_unlock(struct __mp_lock *); +int __mp_release_all(struct __mp_lock *); +int __mp_release_all_but_one(struct __mp_lock *); +void __mp_acquire_count(struct __mp_lock *, int); +int __mp_lock_held(struct __mp_lock *); + +#endif + +#endif /* !_MACHINE_MPLOCK_H */ |