diff options
author | Takuya ASADA <syuu@cvs.openbsd.org> | 2009-11-21 23:28:16 +0000 |
---|---|---|
committer | Takuya ASADA <syuu@cvs.openbsd.org> | 2009-11-21 23:28:16 +0000 |
commit | 7e740391ceab0d63a1b796971df4ea68335dc9fb (patch) | |
tree | c8f51648828d9b521e53392988882b096c39925f /sys/arch | |
parent | bc0f5398bd77293c65c848f10f698710abc2a0d7 (diff) |
mplock, rw_cas implemented
ok miod@
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/mips64/include/lock.h | 30 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/clock.c | 7 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/interrupt.c | 6 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/softintr.c | 4 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/trap.c | 53 | ||||
-rw-r--r-- | sys/arch/sgi/conf/files.sgi | 3 | ||||
-rw-r--r-- | sys/arch/sgi/include/mplock.h | 52 | ||||
-rw-r--r-- | sys/arch/sgi/sgi/lock_machdep.c | 167 |
8 files changed, 309 insertions, 13 deletions
diff --git a/sys/arch/mips64/include/lock.h b/sys/arch/mips64/include/lock.h index 7bd7cc71727..37b9961608b 100644 --- a/sys/arch/mips64/include/lock.h +++ b/sys/arch/mips64/include/lock.h @@ -1,4 +1,4 @@ -/* $OpenBSD: lock.h,v 1.2 2009/09/15 04:54:31 syuu Exp $ */ +/* $OpenBSD: lock.h,v 1.3 2009/11/21 23:28:14 syuu Exp $ */ /* public domain */ @@ -51,4 +51,32 @@ __cpu_simple_unlock(__cpu_simple_lock_t *l) *l = __SIMPLELOCK_UNLOCKED; } +#define rw_cas __cpu_cas +static __inline int +__cpu_cas(volatile unsigned long *addr, unsigned long old, unsigned long new) +{ + int success, scratch0, scratch1; + + __asm volatile( + ".set noreorder\n" + "1:\n" + "lld %0, (%5)\n" + "bne %0, %3, 2f\n" + "move %1, %4\n" + "scd %1, (%5)\n" + "beqz %1, 1b\n" + "move %2, $0\n" + "j 3f\n" + "nop\n" + "2:\n" + "daddi %2, $0, 1\n" + "3:\n" + ".set reorder\n" + : "=&r"(scratch0), "=&r"(scratch1), "=&r"(success) + : "r"(old), "r"(new), "r"(addr) + : "memory"); + + return success; +} + #endif /* _MIPS64_LOCK_H_ */ diff --git a/sys/arch/mips64/mips64/clock.c b/sys/arch/mips64/mips64/clock.c index 71474a3404e..68583c03b1a 100644 --- a/sys/arch/mips64/mips64/clock.c +++ b/sys/arch/mips64/mips64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.26 2009/10/26 20:14:40 miod Exp $ */ +/* $OpenBSD: clock.c,v 1.27 2009/11/21 23:28:14 syuu Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -161,12 +161,15 @@ clock_int5(uint32_t mask, struct trap_frame *tf) /* * Process clock interrupt unless it is currently masked. */ - if (tf->ipl < IPL_CLOCK) + if (tf->ipl < IPL_CLOCK) { + KERNEL_LOCK(); while (pendingticks) { clk_count.ec_count++; hardclock(tf); pendingticks--; } + KERNEL_UNLOCK(); + } return CR_INT_5; /* Clock is always on 5 */ } diff --git a/sys/arch/mips64/mips64/interrupt.c b/sys/arch/mips64/mips64/interrupt.c index 3bb13031041..3fa043b939c 100644 --- a/sys/arch/mips64/mips64/interrupt.c +++ b/sys/arch/mips64/mips64/interrupt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: interrupt.c,v 1.50 2009/11/19 20:15:04 miod Exp $ */ +/* $OpenBSD: interrupt.c,v 1.51 2009/11/21 23:28:14 syuu Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -115,6 +115,8 @@ interrupt(struct trap_frame *trapframe) trapdebug_enter(trapframe, 0); #endif + if(ci->ci_ipl < IPL_SCHED) + __mp_lock(&kernel_lock); uvmexp.intrs++; /* Mask out interrupts from cause that are unmasked */ @@ -147,6 +149,8 @@ interrupt(struct trap_frame *trapframe) ci->ci_ipl = s; /* no-overhead splx */ __asm__ ("sync\n\t.set reorder\n"); } + if(ci->ci_ipl < IPL_SCHED) + __mp_unlock(&kernel_lock); } diff --git a/sys/arch/mips64/mips64/softintr.c b/sys/arch/mips64/mips64/softintr.c index f818219550b..d3401c4e219 100644 --- a/sys/arch/mips64/mips64/softintr.c +++ b/sys/arch/mips64/mips64/softintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: softintr.c,v 1.5 2009/10/22 22:08:54 miod Exp $ */ +/* $OpenBSD: softintr.c,v 1.6 2009/11/21 23:28:14 syuu Exp $ */ /* $NetBSD: softintr.c,v 1.2 2003/07/15 00:24:39 lukem Exp $ */ /* @@ -100,7 +100,9 @@ softintr_dispatch(int si) mtx_leave(&siq->siq_mtx); + KERNEL_LOCK(); (*sih->sih_func)(sih->sih_arg); + KERNEL_UNLOCK(); } } diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c index 2bdf703b990..d460179aa70 100644 --- a/sys/arch/mips64/mips64/trap.c +++ b/sys/arch/mips64/mips64/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.49 2009/11/19 20:16:27 miod Exp $ */ +/* $OpenBSD: trap.c,v 1.50 2009/11/21 23:28:15 syuu Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -172,7 +172,9 @@ ast() p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { + KERNEL_PROC_LOCK(p); ADDUPROF(p); + KERNEL_PROC_UNLOCK(p); } if (ci->ci_want_resched) preempt(NULL); @@ -293,7 +295,9 @@ trap(trapframe) va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = p->p_addr->u_pcb.pcb_onfault; p->p_addr->u_pcb.pcb_onfault = 0; + KERNEL_LOCK(); rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype); + KERNEL_UNLOCK(); p->p_addr->u_pcb.pcb_onfault = onfault; if (rv == 0) return; @@ -349,6 +353,11 @@ fault_common: onfault = p->p_addr->u_pcb.pcb_onfault; p->p_addr->u_pcb.pcb_onfault = 0; + if (USERMODE(trapframe->sr)) + KERNEL_PROC_LOCK(p); + else + KERNEL_LOCK(); + rv = uvm_fault(map, trunc_page(va), 0, ftype); p->p_addr->u_pcb.pcb_onfault = onfault; @@ -365,6 +374,10 @@ fault_common: else if (rv == EACCES) rv = EFAULT; } + if (USERMODE(trapframe->sr)) + KERNEL_PROC_UNLOCK(p); + else + KERNEL_UNLOCK(); if (rv == 0) { if (!USERMODE(trapframe->sr)) return; @@ -500,11 +513,16 @@ printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapfr } } #ifdef SYSCALL_DEBUG + KERNEL_PROC_LOCK(p); scdebug_call(p, code, args.i); + KERNEL_PROC_UNLOCK(p); #endif #ifdef KTRACE - if (KTRPOINT(p, KTR_SYSCALL)) + if (KTRPOINT(p, KTR_SYSCALL)) { + KERNEL_PROC_LOCK(p); ktrsyscall(p, code, callp->sy_argsize, args.i); + KERNEL_PROC_UNLOCK(p); + } #endif rval[0] = 0; rval[1] = locr0->v1; @@ -514,13 +532,22 @@ printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapfr else trp[-1].code = code; #endif + #if NSYSTRACE > 0 - if (ISSET(p->p_flag, P_SYSTRACE)) + if (ISSET(p->p_flag, P_SYSTRACE)) { + KERNEL_PROC_LOCK(p); i = systrace_redirect(code, p, args.i, rval); - else + KERNEL_PROC_UNLOCK(p); + } else #endif + { + int nolock = (callp->sy_flags & SY_NOLOCK); + if(!nolock) + KERNEL_PROC_LOCK(p); i = (*callp->sy_call)(p, &args, rval); - + if(!nolock) + KERNEL_PROC_UNLOCK(p); + } switch (i) { case 0: locr0->v0 = rval[0]; @@ -542,11 +569,16 @@ printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapfr if (code == SYS_ptrace) Mips_SyncCache(); #ifdef SYSCALL_DEBUG + KERNEL_PROC_LOCK(p); scdebug_ret(p, code, i, rval); + KERNEL_PROC_UNLOCK(p); #endif #ifdef KTRACE - if (KTRPOINT(p, KTR_SYSRET)) + if (KTRPOINT(p, KTR_SYSRET)) { + KERNEL_PROC_LOCK(p); ktrsysret(p, code, i, rval[0]); + KERNEL_PROC_UNLOCK(p); + } #endif goto out; } @@ -769,7 +801,9 @@ printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapfr p->p_md.md_regs->cause = trapframe->cause; p->p_md.md_regs->badvaddr = trapframe->badvaddr; sv.sival_ptr = (void *)trapframe->badvaddr; + KERNEL_PROC_LOCK(p); trapsignal(p, i, ucode, typ, sv); + KERNEL_PROC_UNLOCK(p); out: /* * Note: we should only get here if returning to user mode. @@ -789,12 +823,17 @@ child_return(arg) trapframe->v1 = 1; trapframe->a3 = 0; + KERNEL_PROC_UNLOCK(p); + userret(p); #ifdef KTRACE - if (KTRPOINT(p, KTR_SYSRET)) + if (KTRPOINT(p, KTR_SYSRET)) { + KERNEL_PROC_LOCK(p); ktrsysret(p, (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0); + KERNEL_PROC_UNLOCK(p); + } #endif } diff --git a/sys/arch/sgi/conf/files.sgi b/sys/arch/sgi/conf/files.sgi index 8aa4b3993de..f3885c33ebd 100644 --- a/sys/arch/sgi/conf/files.sgi +++ b/sys/arch/sgi/conf/files.sgi @@ -1,4 +1,4 @@ -# $OpenBSD: files.sgi,v 1.41 2009/11/18 19:03:25 miod Exp $ +# $OpenBSD: files.sgi,v 1.42 2009/11/21 23:28:14 syuu Exp $ # # maxpartitions must be first item in files.${ARCH} # @@ -13,6 +13,7 @@ file arch/sgi/sgi/autoconf.c file arch/sgi/sgi/bus_dma.c file arch/sgi/sgi/conf.c file arch/sgi/sgi/disksubr.c disk +file arch/sgi/sgi/lock_machdep.c multiprocessor file arch/sgi/sgi/ip27_machdep.c tgt_origin file arch/sgi/sgi/ip30_machdep.c tgt_octane file arch/sgi/sgi/ip32_machdep.c tgt_o2 diff --git a/sys/arch/sgi/include/mplock.h b/sys/arch/sgi/include/mplock.h new file mode 100644 index 00000000000..50dc8f797fd --- /dev/null +++ b/sys/arch/sgi/include/mplock.h @@ -0,0 +1,52 @@ +/* $OpenBSD: mplock.h,v 1.1 2009/11/21 23:28:14 syuu Exp $ */ + +/* + * Copyright (c) 2004 Niklas Hallqvist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MACHINE_MPLOCK_H_ +#define _MACHINE_MPLOCK_H_ + +/* + * Really simple spinlock implementation with recursive capabilities. + * Correctness is paramount, no fancyness allowed. + */ + +struct __mp_lock { + volatile struct cpu_info *mpl_cpu; + volatile long mpl_count; +}; + +#ifndef _LOCORE + +void __mp_lock_init(struct __mp_lock *); +void __mp_lock(struct __mp_lock *); +void __mp_unlock(struct __mp_lock *); +int __mp_release_all(struct __mp_lock *); +int __mp_release_all_but_one(struct __mp_lock *); +void __mp_acquire_count(struct __mp_lock *, int); +int __mp_lock_held(struct __mp_lock *); + +#endif + +#endif /* !_MACHINE_MPLOCK_H */ diff --git a/sys/arch/sgi/sgi/lock_machdep.c b/sys/arch/sgi/sgi/lock_machdep.c new file mode 100644 index 00000000000..2faa3b38fda --- /dev/null +++ b/sys/arch/sgi/sgi/lock_machdep.c @@ -0,0 +1,167 @@ +/* $OpenBSD: lock_machdep.c,v 1.1 2009/11/21 23:28:14 syuu Exp $ */ + +/* + * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#include <sys/param.h> +#include <sys/lock.h> +#include <sys/systm.h> + +#include <machine/atomic.h> +#include <machine/lock.h> + +#include <ddb/db_output.h> + +void +__mp_lock_init(struct __mp_lock *lock) +{ + lock->mpl_cpu = NULL; + lock->mpl_count = 0; +} + +#if defined(MP_LOCKDEBUG) +#ifndef DDB +#error "MP_LOCKDEBUG requires DDB" +#endif + +/* CPU-dependent timing, needs this to be settable from ddb. */ +extern int __mp_lock_spinout; +#endif + +#define SPINLOCK_SPIN_HOOK /**/ + +static __inline void +__mp_lock_spin(struct __mp_lock *mpl) +{ +#ifndef MP_LOCKDEBUG + while (mpl->mpl_count != 0) + SPINLOCK_SPIN_HOOK; +#else + int ticks = __mp_lock_spinout; + + while (mpl->mpl_count != 0 && ticks-- > 0) + SPINLOCK_SPIN_HOOK; + + if (ticks == 0) { + db_printf("__mp_lock(0x%x): lock spun out", mpl); + Debugger(); + } +#endif +} + +void +__mp_lock(struct __mp_lock *mpl) +{ + uint32_t sr; + + /* + * Please notice that mpl_count gets incremented twice for the + * first lock. This is on purpose. The way we release the lock + * in mp_unlock is to decrement the mpl_count and then check if + * the lock should be released. Since mpl_count is what we're + * spinning on, decrementing it in mpl_unlock to 0 means that + * we can't clear mpl_cpu, because we're no longer holding the + * lock. In theory mpl_cpu doesn't need to be cleared, but it's + * safer to clear it and besides, setting mpl_count to 2 on the + * first lock makes most of this code much simpler. + */ + while (1) { + sr = disableintr(); + if (__cpu_cas(&mpl->mpl_count, 0, 1) == 0) + mpl->mpl_cpu = curcpu(); + + if (mpl->mpl_cpu == curcpu()) { + mpl->mpl_count++; + setsr(sr); + break; + } + setsr(sr); + + __mp_lock_spin(mpl); + } +} + +void +__mp_unlock(struct __mp_lock *mpl) +{ + uint32_t sr; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_unlock(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + sr = disableintr(); + if (--mpl->mpl_count == 1) { + mpl->mpl_cpu = NULL; + mpl->mpl_count = 0; + } + + setsr(sr); +} + +int +__mp_release_all(struct __mp_lock *mpl) +{ + int rv = mpl->mpl_count - 1; + uint32_t sr; + +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_release_all(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + sr = disableintr(); + mpl->mpl_cpu = NULL; + mpl->mpl_count = 0; + setsr(sr); + + return (rv); +} + +int +__mp_release_all_but_one(struct __mp_lock *mpl) +{ + int rv = mpl->mpl_count - 2; +#ifdef MP_LOCKDEBUG + if (mpl->mpl_cpu != curcpu()) { + db_printf("__mp_release_all_but_one(%p): not held lock\n", mpl); + Debugger(); + } +#endif + + mpl->mpl_count = 2; + + return (rv); +} + +void +__mp_acquire_count(struct __mp_lock *mpl, int count) +{ + while (count--) + __mp_lock(mpl); +} + +int +__mp_lock_held(struct __mp_lock *mpl) +{ + return mpl->mpl_cpu == curcpu(); +} |