diff options
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/amd64/amd64/lock_machdep.c | 95 | ||||
-rw-r--r-- | sys/arch/amd64/include/mplock.h | 12 |
2 files changed, 51 insertions, 56 deletions
diff --git a/sys/arch/amd64/amd64/lock_machdep.c b/sys/arch/amd64/amd64/lock_machdep.c index 641b973a83f..3afdf49e8f5 100644 --- a/sys/arch/amd64/amd64/lock_machdep.c +++ b/sys/arch/amd64/amd64/lock_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: lock_machdep.c,v 1.4 2013/12/05 01:28:45 uebayasi Exp $ */ +/* $OpenBSD: lock_machdep.c,v 1.5 2014/03/14 02:08:57 dlg Exp $ */ /* * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> @@ -28,10 +28,11 @@ #include <ddb/db_output.h> void -__mp_lock_init(struct __mp_lock *lock) +__mp_lock_init(struct __mp_lock *mpl) { - lock->mpl_cpu = NULL; - lock->mpl_count = 0; + memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus)); + mpl->mpl_users = 0; + mpl->mpl_ticket = 0; } #if defined(MP_LOCKDEBUG) @@ -44,15 +45,15 @@ extern int __mp_lock_spinout; #endif static __inline void -__mp_lock_spin(struct __mp_lock *mpl) +__mp_lock_spin(struct __mp_lock *mpl, u_int me) { #ifndef MP_LOCKDEBUG - while (mpl->mpl_count != 0) + while (mpl->mpl_ticket != me) SPINLOCK_SPIN_HOOK; #else int ticks = __mp_lock_spinout; - while (mpl->mpl_count != 0 && --ticks > 0) + while (mpl->mpl_ticket != me && --ticks > 0) SPINLOCK_SPIN_HOOK; if (ticks == 0) { @@ -62,43 +63,35 @@ __mp_lock_spin(struct __mp_lock *mpl) #endif } +static inline u_int +fetch_and_add(u_int *var, u_int value) +{ + asm volatile("lock; xaddl %%eax, %2;" + : "=a" (value) + : "a" (value), "m" (*var) + : "memory"); + + return (value); +} + void __mp_lock(struct __mp_lock *mpl) { - /* - * Please notice that mpl_count gets incremented twice for the - * first lock. This is on purpose. The way we release the lock - * in mp_unlock is to decrement the mpl_count and then check if - * the lock should be released. Since mpl_count is what we're - * spinning on, decrementing it in mpl_unlock to 0 means that - * we can't clear mpl_cpu, because we're no longer holding the - * lock. In theory mpl_cpu doesn't need to be cleared, but it's - * safer to clear it and besides, setting mpl_count to 2 on the - * first lock makes most of this code much simpler. - */ - - while (1) { - long rf = read_rflags(); - - disable_intr(); - if (x86_atomic_cas_ul(&mpl->mpl_count, 0, 1) == 0) { - mpl->mpl_cpu = curcpu(); - } - - if (mpl->mpl_cpu == curcpu()) { - mpl->mpl_count++; - write_rflags(rf); - break; - } - write_rflags(rf); - - __mp_lock_spin(mpl); - } + struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; + long rf = read_rflags(); + + disable_intr(); + if (cpu->mplc_depth++ == 0) + cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1); + write_rflags(rf); + + __mp_lock_spin(mpl, cpu->mplc_ticket); } void __mp_unlock(struct __mp_lock *mpl) { + struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; long rf = read_rflags(); #ifdef MP_LOCKDEBUG @@ -109,29 +102,22 @@ __mp_unlock(struct __mp_lock *mpl) #endif disable_intr(); - if (--mpl->mpl_count == 1) { - mpl->mpl_cpu = NULL; - mpl->mpl_count = 0; - } + if (--cpu->mplc_depth == 0) + mpl->mpl_ticket++; write_rflags(rf); } int __mp_release_all(struct __mp_lock *mpl) { - int rv = mpl->mpl_count - 1; + struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; long rf = read_rflags(); - -#ifdef MP_LOCKDEBUG - if (mpl->mpl_cpu != curcpu()) { - db_printf("__mp_release_all(%p): not held lock\n", mpl); - Debugger(); - } -#endif + int rv; disable_intr(); - mpl->mpl_cpu = NULL; - mpl->mpl_count = 0; + rv = cpu->mplc_depth; + cpu->mplc_depth = 0; + mpl->mpl_ticket++; write_rflags(rf); return (rv); @@ -140,7 +126,8 @@ __mp_release_all(struct __mp_lock *mpl) int __mp_release_all_but_one(struct __mp_lock *mpl) { - int rv = mpl->mpl_count - 2; + struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; + int rv = cpu->mplc_depth - 1; #ifdef MP_LOCKDEBUG if (mpl->mpl_cpu != curcpu()) { @@ -149,7 +136,7 @@ __mp_release_all_but_one(struct __mp_lock *mpl) } #endif - mpl->mpl_count = 2; + cpu->mplc_depth = 1; return (rv); } @@ -164,6 +151,8 @@ __mp_acquire_count(struct __mp_lock *mpl, int count) int __mp_lock_held(struct __mp_lock *mpl) { - return mpl->mpl_cpu == curcpu(); + struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; + + return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0); } diff --git a/sys/arch/amd64/include/mplock.h b/sys/arch/amd64/include/mplock.h index 82b0d3212f9..118fabe0a2a 100644 --- a/sys/arch/amd64/include/mplock.h +++ b/sys/arch/amd64/include/mplock.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mplock.h,v 1.2 2007/11/26 23:50:03 art Exp $ */ +/* $OpenBSD: mplock.h,v 1.3 2014/03/14 02:08:57 dlg Exp $ */ /* * Copyright (c) 2004 Niklas Hallqvist. All rights reserved. @@ -27,9 +27,15 @@ #ifndef _MACHINE_MPLOCK_H_ #define _MACHINE_MPLOCK_H_ +struct __mp_lock_cpu { + u_int mplc_ticket; + u_int mplc_depth; +}; + struct __mp_lock { - volatile struct cpu_info *mpl_cpu; - volatile long mpl_count; + struct __mp_lock_cpu mpl_cpus[MAXCPUS]; + volatile u_int mpl_ticket; + u_int mpl_users; }; #ifndef _LOCORE |