summaryrefslogtreecommitdiff
path: root/sys/arch/sparc64
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2007-09-22 20:04:52 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2007-09-22 20:04:52 +0000
commitd96617953c9b7867562a8d0f28f0015a790661a4 (patch)
tree36760a047fd11b88372a5368c4c2d58b212bf4f3 /sys/arch/sparc64
parentd81c73aadcfecca27a7e1d817b23fa830bd80300 (diff)
Add kernel locking.
Diffstat (limited to 'sys/arch/sparc64')
-rw-r--r--sys/arch/sparc64/sparc64/intr.c23
-rw-r--r--sys/arch/sparc64/sparc64/locore.s17
-rw-r--r--sys/arch/sparc64/sparc64/trap.c101
3 files changed, 122 insertions, 19 deletions
diff --git a/sys/arch/sparc64/sparc64/intr.c b/sys/arch/sparc64/sparc64/intr.c
index e488b25708e..64cabbed82a 100644
--- a/sys/arch/sparc64/sparc64/intr.c
+++ b/sys/arch/sparc64/sparc64/intr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intr.c,v 1.28 2007/09/17 01:33:33 krw Exp $ */
+/* $OpenBSD: intr.c,v 1.29 2007/09/22 20:04:51 kettenis Exp $ */
/* $NetBSD: intr.c,v 1.39 2001/07/19 23:38:11 eeh Exp $ */
/*
@@ -356,3 +356,24 @@ splassert_check(int wantipl, const char *func)
}
}
#endif
+
+#ifdef MULTIPROCESSOR
+
+void sparc64_intlock(struct trapframe64 *);
+void sparc64_intunlock(struct trapframe64 *);
+
+void
+sparc64_intlock(struct trapframe64 *tf)
+{
+ if(tf->tf_pil < PIL_SCHED)
+ __mp_lock(&kernel_lock);
+}
+
+void
+sparc64_intunlock(struct trapframe64 *tf)
+{
+ if(tf->tf_pil < PIL_SCHED)
+ __mp_unlock(&kernel_lock);
+}
+
+#endif
diff --git a/sys/arch/sparc64/sparc64/locore.s b/sys/arch/sparc64/sparc64/locore.s
index 35372f2eed0..9219ab09dba 100644
--- a/sys/arch/sparc64/sparc64/locore.s
+++ b/sys/arch/sparc64/sparc64/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.81 2007/09/10 21:33:16 kettenis Exp $ */
+/* $OpenBSD: locore.s,v 1.82 2007/09/22 20:04:51 kettenis Exp $ */
/* $NetBSD: locore.s,v 1.137 2001/08/13 06:10:10 jdolecek Exp $ */
/*
@@ -3504,8 +3504,16 @@ sparc_intr_retry:
casxa [%l4] ASI_N, %l2, %l7 ! Grab the entire list
cmp %l7, %l2
bne,pn %icc, 1b
- add %sp, CC64FSZ+BIAS, %o2 ! tf = %sp + CC64FSZ + BIAS
+ nop
+
+#ifdef MULTIPROCESSOR
+ call _C_LABEL(sparc64_intlock)
+ add %sp, CC64FSZ+BIAS, %o0 ! tf = %sp + CC64FSZ + BIAS
+#endif
+
2:
+ add %sp, CC64FSZ+BIAS, %o2 ! tf = %sp + CC64FSZ + BIAS
+
ldx [%l2 + IH_PEND], %l7 ! Load next pending
ldx [%l2 + IH_FUN], %o4 ! ih->ih_fun
ldx [%l2 + IH_ARG], %o0 ! ih->ih_arg
@@ -3544,6 +3552,11 @@ sparc_intr_retry:
brnz,pn %l7, 2b ! 'Nother?
mov %l7, %l2
+#ifdef MULTIPROCESSOR
+ call _C_LABEL(sparc64_intunlock)
+ add %sp, CC64FSZ+BIAS, %o0 ! tf = %sp + CC64FSZ + BIAS
+#endif
+
intrcmplt:
/*
* Re-read SOFTINT to see if any new pending interrupts
diff --git a/sys/arch/sparc64/sparc64/trap.c b/sys/arch/sparc64/sparc64/trap.c
index 48680a60d22..319e7d7ea66 100644
--- a/sys/arch/sparc64/sparc64/trap.c
+++ b/sys/arch/sparc64/sparc64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.53 2007/09/08 17:13:18 kettenis Exp $ */
+/* $OpenBSD: trap.c,v 1.54 2007/09/22 20:04:51 kettenis Exp $ */
/* $NetBSD: trap.c,v 1.73 2001/08/09 01:03:01 eeh Exp $ */
/*
@@ -468,7 +468,9 @@ dopanic:
#if defined(COMPAT_SVR4) || defined(COMPAT_SVR4_32)
badtrap:
#endif
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGILL, type, ILL_ILLOPC, sv);
+ KERNEL_PROC_UNLOCK(p);
break;
#if defined(COMPAT_SVR4) || defined(COMPAT_SVR4_32)
@@ -493,7 +495,9 @@ badtrap:
case T_AST:
p->p_md.md_astpending = 0;
if (p->p_flag & P_OWEUPC) {
+ KERNEL_PROC_LOCK(p);
ADDUPROF(p);
+ KERNEL_PROC_UNLOCK(p);
}
if (curcpu()->ci_want_resched)
preempt(NULL);
@@ -505,7 +509,9 @@ badtrap:
if (copyin((caddr_t)pc, &ins, sizeof(ins)) != 0) {
/* XXX Can this happen? */
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
+ KERNEL_PROC_UNLOCK(p);
break;
}
if (ins.i_any.i_op == IOP_mem &&
@@ -524,25 +530,31 @@ badtrap:
ADVANCE;
break;
}
- trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv); /* XXX code?? */
+ KERNEL_PROC_LOCK(p);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv); /* XXX code? */
+ KERNEL_PROC_UNLOCK(p);
break;
}
case T_INST_EXCEPT:
case T_TEXTFAULT:
case T_PRIVINST:
- trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv); /* XXX code?? */
+ KERNEL_PROC_LOCK(p);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv); /* XXX code? */
+ KERNEL_PROC_UNLOCK(p);
break;
case T_FPDISABLED: {
struct fpstate64 *fs = p->p_md.md_fpstate;
if (fs == NULL) {
+ KERNEL_PROC_LOCK(p);
/* NOTE: fpstate must be 64-bit aligned */
fs = malloc((sizeof *fs), M_SUBPROC, M_WAITOK);
*fs = initfpstate;
fs->fs_qsize = 0;
p->p_md.md_fpstate = fs;
+ KERNEL_PROC_UNLOCK(p);
}
/*
@@ -575,7 +587,9 @@ badtrap:
if (copyin((caddr_t)pc, &ins, sizeof(ins)) != 0) {
/* XXX Can this happen? */
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
+ KERNEL_PROC_UNLOCK(p);
break;
}
if (ins.i_any.i_op == IOP_mem &&
@@ -585,8 +599,11 @@ badtrap:
ins.i_op3.i_op3 == IOP3_STQFA)) {
if (emul_qf(ins.i_int, p, sv, tf))
ADVANCE;
- } else
+ } else {
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
+ KERNEL_PROC_UNLOCK(p);
+ }
break;
}
@@ -600,7 +617,9 @@ badtrap:
* core from the interrupt stack is not a good idea.
* It causes random crashes.
*/
+ KERNEL_PROC_LOCK(p);
sigexit(p, SIGKILL);
+ /* NOTREACHED */
break;
case T_ALIGN:
@@ -629,7 +648,9 @@ badtrap:
break;
}
/* XXX sv.sival_ptr should be the fault address! */
- trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv); /* XXX code?? */
+ KERNEL_PROC_LOCK(p);
+ trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv); /* XXX code? */
+ KERNEL_PROC_UNLOCK(p);
break;
case T_FP_IEEE_754:
@@ -662,16 +683,22 @@ badtrap:
break;
case T_TAGOF:
- trapsignal(p, SIGEMT, 0, EMT_TAGOVF, sv); /* XXX code?? */
+ KERNEL_PROC_LOCK(p);
+ trapsignal(p, SIGEMT, 0, EMT_TAGOVF, sv); /* XXX code? */
+ KERNEL_PROC_UNLOCK(p);
break;
case T_BREAKPOINT:
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGTRAP, 0, TRAP_BRKPT, sv);
+ KERNEL_PROC_UNLOCK(p);
break;
case T_DIV0:
ADVANCE;
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGFPE, 0, FPE_INTDIV, sv);
+ KERNEL_PROC_UNLOCK(p);
break;
case T_CLEANWIN:
@@ -687,7 +714,9 @@ badtrap:
case T_RANGECHECK:
ADVANCE;
- trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv); /* XXX code?? */
+ KERNEL_PROC_LOCK(p);
+ trapsignal(p, SIGILL, 0, ILL_ILLOPN, sv); /* XXX code? */
+ KERNEL_PROC_UNLOCK(p);
break;
case T_FIXALIGN:
@@ -702,7 +731,9 @@ badtrap:
case T_INTOF:
uprintf("T_INTOF\n"); /* XXX */
ADVANCE;
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGFPE, FPE_INTOVF_TRAP, FPE_INTOVF, sv);
+ KERNEL_PROC_UNLOCK(p);
break;
}
userret(p);
@@ -823,7 +854,8 @@ data_access_fault(tf, type, pc, addr, sfva, sfsr)
access_type = (sfsr & SFSR_W) ? VM_PROT_READ|VM_PROT_WRITE
: VM_PROT_READ;
}
- if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
+ if (tstate & TSTATE_PRIV) {
+ KERNEL_LOCK();
#ifdef DDB
extern char Lfsprobe[];
/*
@@ -844,12 +876,16 @@ data_access_fault(tf, type, pc, addr, sfva, sfsr)
if (!(addr & TLB_TAG_ACCESS_CTX)) {
/* CTXT == NUCLEUS */
rv = uvm_fault(kernel_map, va, 0, access_type);
- if (rv == 0)
+ if (rv == 0) {
+ KERNEL_UNLOCK();
return;
+ }
goto kfault;
}
- } else
+ } else {
+ KERNEL_PROC_LOCK(p);
p->p_md.md_tf = tf;
+ }
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
@@ -877,7 +913,7 @@ data_access_fault(tf, type, pc, addr, sfva, sfsr)
* address. Any other page fault in kernel, die; if user
* fault, deliver SIGSEGV.
*/
- if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
+ if (tstate & TSTATE_PRIV) {
kfault:
onfault = p->p_addr ?
(long)p->p_addr->u_pcb.pcb_onfault : 0;
@@ -891,6 +927,7 @@ kfault:
}
tf->tf_pc = onfault;
tf->tf_npc = onfault + 4;
+ KERNEL_UNLOCK();
return;
}
@@ -909,9 +946,13 @@ kfault:
trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
}
}
+
if ((tstate & TSTATE_PRIV) == 0) {
+ KERNEL_PROC_UNLOCK(p);
userret(p);
share_fpu(p, tf);
+ } else {
+ KERNEL_UNLOCK();
}
}
@@ -955,7 +996,7 @@ data_access_error(tf, type, afva, afsr, sfva, sfsr)
goto out; /* No fault. Why were we called? */
}
- if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
+ if (tstate & TSTATE_PRIV) {
if (!onfault) {
extern int trap_trace_dis;
@@ -981,8 +1022,11 @@ data_access_error(tf, type, afva, afsr, sfva, sfsr)
return;
}
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGSEGV, VM_PROT_READ|VM_PROT_WRITE, SEGV_MAPERR, sv);
+ KERNEL_PROC_UNLOCK(p);
out:
+
if ((tstate & TSTATE_PRIV) == 0) {
userret(p);
share_fpu(p, tf);
@@ -1021,7 +1065,7 @@ text_access_fault(tf, type, pc, sfsr)
/* Now munch on protections... */
access_type = VM_PROT_EXECUTE;
- if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)) {
+ if (tstate & TSTATE_PRIV) {
extern int trap_trace_dis;
trap_trace_dis = 1; /* Disable traptrace for printf */
(void) splhigh();
@@ -1030,6 +1074,8 @@ text_access_fault(tf, type, pc, sfsr)
} else
p->p_md.md_tf = tf;
+ KERNEL_PROC_LOCK(p);
+
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
rv = uvm_fault(&vm->vm_map, va, 0, access_type);
@@ -1061,6 +1107,9 @@ text_access_fault(tf, type, pc, sfsr)
}
trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
}
+
+ KERNEL_PROC_UNLOCK(p);
+
if ((tstate & TSTATE_PRIV) == 0) {
userret(p);
share_fpu(p, tf);
@@ -1108,11 +1157,13 @@ text_access_error(tf, type, pc, sfsr, afva, afsr)
type, sfsr, pc, afsr, afva, tf);
trap_trace_dis--; /* Reenable traptrace for printf */
- if (tstate & (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT))
+ if (tstate & TSTATE_PRIV)
panic("text_access_error: kernel memory error");
/* User fault -- Berr */
+ KERNEL_PROC_LOCK(p);
trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv);
+ KERNEL_PROC_UNLOCK(p);
}
if ((sfsr & SFSR_FV) == 0 || (sfsr & SFSR_FT) == 0)
@@ -1131,6 +1182,8 @@ text_access_error(tf, type, pc, sfsr, afva, afsr)
} else
p->p_md.md_tf = tf;
+ KERNEL_PROC_LOCK(p);
+
vm = p->p_vmspace;
/* alas! must call the horrible vm code */
rv = uvm_fault(&vm->vm_map, va, 0, access_type);
@@ -1164,6 +1217,9 @@ text_access_error(tf, type, pc, sfsr, afva, afsr)
}
trapsignal(p, SIGSEGV, access_type, SEGV_MAPERR, sv);
}
+
+ KERNEL_PROC_UNLOCK(p);
+
out:
if ((tstate & TSTATE_PRIV) == 0) {
userret(p);
@@ -1298,6 +1354,8 @@ syscall(tf, code, pc)
error = EFAULT;
goto bad;
}
+
+ KERNEL_PROC_LOCK(p);
#ifdef SYSCALL_DEBUG
scdebug_call(p, code, args);
#endif
@@ -1309,6 +1367,7 @@ syscall(tf, code, pc)
else
#endif
error = (*callp->sy_call)(p, args, rval);
+ KERNEL_PROC_UNLOCK(p);
switch (error) {
vaddr_t dest;
@@ -1350,12 +1409,17 @@ syscall(tf, code, pc)
}
#ifdef SYSCALL_DEBUG
+ KERNEL_PROC_LOCK(p);
scdebug_ret(p, code, error, rval);
+ KERNEL_PROC_UNLOCK(p);
#endif
userret(p);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
+ if (KTRPOINT(p, KTR_SYSRET)) {
+ KERNEL_PROC_LOCK(p);
ktrsysret(p, code, error, rval[0]);
+ KERNEL_PROC_UNLOCK(p);
+ }
#endif
share_fpu(p, tf);
}
@@ -1377,11 +1441,16 @@ child_return(arg)
tf->tf_out[1] = 0;
tf->tf_tstate &= ~(((int64_t)(ICC_C|XCC_C))<<TSTATE_CCR_SHIFT);
+ KERNEL_PROC_UNLOCK(p);
+
userret(p);
#ifdef KTRACE
- if (KTRPOINT(p, KTR_SYSRET))
+ if (KTRPOINT(p, KTR_SYSRET)) {
+ KERNEL_PROC_LOCK(p);
ktrsysret(p,
(p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
+ KERNEL_PROC_UNLOCK(p);
+ }
#endif
}