summaryrefslogtreecommitdiff
path: root/sys/arch/i386
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2014-03-14 03:01:40 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2014-03-14 03:01:40 +0000
commitf1678ffc1d41703135c2217a06c0bcc851d7dbca (patch)
tree3e8d1a58a100e07519917d38465b850cdad33368 /sys/arch/i386
parentc00b06992a60bb17dba263c50905ab22e0f5211d (diff)
rework mplocks to use tickets instead of spinning. this provides
fairer access to the kernel lock between logical cpus, especially in multi socket systems. i first wrote this diff in 2011. ok n2k14 for post 5.5 (deraadt@ and kettenis@ in particular)
Diffstat (limited to 'sys/arch/i386')
-rw-r--r--sys/arch/i386/i386/lock_machdep.c104
-rw-r--r--sys/arch/i386/include/mplock.h10
2 files changed, 51 insertions, 63 deletions
diff --git a/sys/arch/i386/i386/lock_machdep.c b/sys/arch/i386/i386/lock_machdep.c
index 71b1810a311..32cfc2ee6f4 100644
--- a/sys/arch/i386/i386/lock_machdep.c
+++ b/sys/arch/i386/i386/lock_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: lock_machdep.c,v 1.13 2013/12/05 01:28:45 uebayasi Exp $ */
+/* $OpenBSD: lock_machdep.c,v 1.14 2014/03/14 03:01:38 dlg Exp $ */
/* $NetBSD: lock_machdep.c,v 1.1.2.3 2000/05/03 14:40:30 sommerfeld Exp $ */
/*-
@@ -53,10 +53,11 @@ rw_cas_486(volatile unsigned long *p, unsigned long o, unsigned long n)
#ifdef MULTIPROCESSOR
void
-__mp_lock_init(struct __mp_lock *lock)
+__mp_lock_init(struct __mp_lock *mpl)
{
- lock->mpl_cpu = NULL;
- lock->mpl_count = 0;
+ memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus));
+ mpl->mpl_users = 0;
+ mpl->mpl_ticket = 0;
}
#if defined(MP_LOCKDEBUG)
@@ -72,16 +73,27 @@ extern int db_printf(const char *, ...)
extern int __mp_lock_spinout;
#endif
+static inline u_int
+fetch_and_add(u_int *var, u_int value)
+{
+ asm volatile("lock; xaddl %%eax, %2;"
+ : "=a" (value)
+ : "a" (value), "m" (*var)
+ : "memory");
+
+ return (value);
+}
+
static __inline void
-__mp_lock_spin(struct __mp_lock *mpl)
+__mp_lock_spin(struct __mp_lock *mpl, u_int me)
{
#ifndef MP_LOCKDEBUG
- while (mpl->mpl_count != 0)
+ while (mpl->mpl_ticket != me)
SPINLOCK_SPIN_HOOK;
#else
int ticks = __mp_lock_spinout;
- while (mpl->mpl_count != 0 && --ticks > 0)
+ while (mpl->mpl_ticket != me && --ticks > 0)
SPINLOCK_SPIN_HOOK;
if (ticks == 0) {
@@ -94,40 +106,21 @@ __mp_lock_spin(struct __mp_lock *mpl)
void
__mp_lock(struct __mp_lock *mpl)
{
- /*
- * Please notice that mpl_count gets incremented twice for the
- * first lock. This is on purpose. The way we release the lock
- * in mp_unlock is to decrement the mpl_count and then check if
- * the lock should be released. Since mpl_count is what we're
- * spinning on, decrementing it in mpl_unlock to 0 means that
- * we can't clear mpl_cpu, because we're no longer holding the
- * lock. In theory mpl_cpu doesn't need to be cleared, but it's
- * safer to clear it and besides, setting mpl_count to 2 on the
- * first lock makes most of this code much simpler.
- */
-
- while (1) {
- int ef = read_eflags();
-
- disable_intr();
- if (i486_atomic_cas_int(&mpl->mpl_count, 0, 1) == 0) {
- mpl->mpl_cpu = curcpu();
- }
-
- if (mpl->mpl_cpu == curcpu()) {
- mpl->mpl_count++;
- write_eflags(ef);
- break;
- }
- write_eflags(ef);
-
- __mp_lock_spin(mpl);
- }
+ struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
+ long ef = read_eflags();
+
+ disable_intr();
+ if (cpu->mplc_depth++ == 0)
+ cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1);
+ write_eflags(ef);
+
+ __mp_lock_spin(mpl, cpu->mplc_ticket);
}
void
__mp_unlock(struct __mp_lock *mpl)
{
+ struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
int ef = read_eflags();
#ifdef MP_LOCKDEBUG
@@ -137,30 +130,23 @@ __mp_unlock(struct __mp_lock *mpl)
}
#endif
- disable_intr();
- if (--mpl->mpl_count == 1) {
- mpl->mpl_cpu = NULL;
- mpl->mpl_count = 0;
- }
+ disable_intr();
+ if (--cpu->mplc_depth == 0)
+ mpl->mpl_ticket++;
write_eflags(ef);
}
int
__mp_release_all(struct __mp_lock *mpl)
{
- int rv = mpl->mpl_count - 1;
+ struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
int ef = read_eflags();
-
-#ifdef MP_LOCKDEBUG
- if (mpl->mpl_cpu != curcpu()) {
- db_printf("__mp_release_all(%p): not held lock\n", mpl);
- Debugger();
- }
-#endif
+ int rv;
disable_intr();
- mpl->mpl_cpu = NULL;
- mpl->mpl_count = 0;
+ rv = cpu->mplc_depth;
+ cpu->mplc_depth = 0;
+ mpl->mpl_ticket++;
write_eflags(ef);
return (rv);
@@ -169,16 +155,10 @@ __mp_release_all(struct __mp_lock *mpl)
int
__mp_release_all_but_one(struct __mp_lock *mpl)
{
- int rv = mpl->mpl_count - 2;
+ struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
+ int rv = cpu->mplc_depth - 1;
-#ifdef MP_LOCKDEBUG
- if (mpl->mpl_cpu != curcpu()) {
- db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl);
- Debugger();
- }
-#endif
-
- mpl->mpl_count = 2;
+ cpu->mplc_depth = 1;
return (rv);
}
@@ -193,7 +173,9 @@ __mp_acquire_count(struct __mp_lock *mpl, int count)
int
__mp_lock_held(struct __mp_lock *mpl)
{
- return mpl->mpl_cpu == curcpu();
+ struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
+
+ return (cpu->mplc_ticket == mpl->mpl_ticket && cpu->mplc_depth > 0);
}
#endif
diff --git a/sys/arch/i386/include/mplock.h b/sys/arch/i386/include/mplock.h
index e1069d94e23..3f7d81fba96 100644
--- a/sys/arch/i386/include/mplock.h
+++ b/sys/arch/i386/include/mplock.h
@@ -17,9 +17,15 @@
#ifndef _MACHINE_MPLOCK_H_
#define _MACHINE_MPLOCK_H_
+struct __mp_lock_cpu {
+ u_int mplc_ticket;
+ u_int mplc_depth;
+};
+
struct __mp_lock {
- volatile struct cpu_info *mpl_cpu;
- volatile int mpl_count;
+ struct __mp_lock_cpu mpl_cpus[MAXCPUS];
+ volatile u_int mpl_ticket;
+ u_int mpl_users;
};
#ifndef _LOCORE