diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2009-10-22 22:08:55 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2009-10-22 22:08:55 +0000 |
commit | 96629079d3f1a0da94f72c0045f030432bddc51d (patch) | |
tree | 91639a4e0d7c5dc462c850a9bed7dc553484768d /sys/arch | |
parent | 94815ddee781a683d8d1d9258ec9c8959763443f (diff) |
Completely overhaul interrupt handling on sgi. Cpu state now only stores a
logical IPL level, and per-platform (IP27/IP30/IP32) code will from the
necessary hardware mask registers.
This allows the use of more than one interrupt mask register. Also, the
generic (platform independent) interrupt code shrinks a lot, and the actual
interrupt handler chains and masking information is now per-platform private
data.
Interrupt dispatching is generated from a template; more routines will be
added to the template to reduce platform-specific changes and share as much
code as possible.
Tested on IP27, IP30, IP32 and IP35.
Diffstat (limited to 'sys/arch')
24 files changed, 787 insertions, 667 deletions
diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h index ad44f2270a2..a1eb25d6591 100644 --- a/sys/arch/mips64/include/cpu.h +++ b/sys/arch/mips64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.40 2009/10/22 20:59:22 miod Exp $ */ +/* $OpenBSD: cpu.h,v 1.41 2009/10/22 22:08:52 miod Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -374,7 +374,7 @@ struct cpu_info { int ci_want_resched; /* need_resched() invoked */ cpuid_t ci_cpuid; /* our CPU ID */ uint32_t ci_randseed; /* per cpu random seed */ - uint32_t ci_cpl; + int ci_ipl; /* software IPL */ uint32_t ci_softpending; /* pending soft interrupts */ #ifdef MULTIPROCESSOR u_long ci_flags; /* flags; see below */ diff --git a/sys/arch/mips64/include/cpustate.h b/sys/arch/mips64/include/cpustate.h index 7512fdbf9c5..80f642bcc1e 100644 --- a/sys/arch/mips64/include/cpustate.h +++ b/sys/arch/mips64/include/cpustate.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpustate.h,v 1.7 2009/10/07 08:35:47 syuu Exp $ */ +/* $OpenBSD: cpustate.h,v 1.8 2009/10/22 22:08:52 miod Exp $ */ /* * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -80,7 +80,7 @@ SAVE_REG(sp, SP, frame, bo) ;\ PTR_ADDU a0, frame, bo ;\ GET_CPU_INFO(v0, v1) ;\ - lw a2, CI_CPL(v0) ;\ + lw a2, CI_IPL(v0) ;\ SAVE_REG(a2, CPL, frame, bo) /* diff --git a/sys/arch/mips64/include/frame.h b/sys/arch/mips64/include/frame.h index baac64d131c..874fcdb566f 100644 --- a/sys/arch/mips64/include/frame.h +++ b/sys/arch/mips64/include/frame.h @@ -1,4 +1,4 @@ -/* $OpenBSD: frame.h,v 1.4 2004/09/27 17:42:23 pefo Exp $ */ +/* $OpenBSD: frame.h,v 1.5 2009/10/22 22:08:52 miod Exp $ */ /* * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se) @@ -76,7 +76,7 @@ struct trap_frame { register_t cause; register_t pc; register_t ic; - register_t cpl; + register_t ipl; /* From here and on, only saved user processes. */ diff --git a/sys/arch/mips64/include/trap.h b/sys/arch/mips64/include/trap.h index 8095310ad01..413a7c1ef6a 100644 --- a/sys/arch/mips64/include/trap.h +++ b/sys/arch/mips64/include/trap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.h,v 1.9 2008/04/07 22:37:16 miod Exp $ */ +/* $OpenBSD: trap.h,v 1.10 2009/10/22 22:08:52 miod Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -91,7 +91,7 @@ struct trapdebug { /* trap history buffer for debugging */ u_long ra; u_long sp; u_int code; - u_int cpl; + u_int ipl; }; #define trapdebug_enter(x, cd) { \ @@ -102,7 +102,7 @@ struct trapdebug { /* trap history buffer for debugging */ trp->pc = x->pc; \ trp->sp = x->sp; \ trp->ra = x->ra; \ - trp->cpl = x->cpl; \ + trp->ipl = x->ipl; \ trp->code = cd; \ if (++trp == &trapdebug[TRAPSIZE]) \ trp = trapdebug; \ diff --git a/sys/arch/mips64/mips64/clock.c b/sys/arch/mips64/mips64/clock.c index 1db4204d7e6..765b87eeed0 100644 --- a/sys/arch/mips64/mips64/clock.c +++ b/sys/arch/mips64/mips64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.24 2009/10/22 20:05:27 miod Exp $ */ +/* $OpenBSD: clock.c,v 1.25 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -161,13 +161,12 @@ clock_int5(uint32_t mask, struct trap_frame *tf) /* * Process clock interrupt unless it is currently masked. */ - if ((tf->cpl & SPL_CLOCKMASK) == 0) { + if (tf->ipl < IPL_CLOCK) while (pendingticks) { clk_count.ec_count++; hardclock(tf); pendingticks--; } - } return CR_INT_5; /* Clock is always on 5 */ } diff --git a/sys/arch/mips64/mips64/context.S b/sys/arch/mips64/mips64/context.S index a6f26117414..cf05fc6188b 100644 --- a/sys/arch/mips64/mips64/context.S +++ b/sys/arch/mips64/mips64/context.S @@ -1,4 +1,4 @@ -/* $OpenBSD: context.S,v 1.29 2009/10/22 18:46:48 miod Exp $ */ +/* $OpenBSD: context.S,v 1.30 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -61,7 +61,7 @@ LEAF(savectx, 0) REG_S ra, PCB_CONTEXT+10*REGSZ(a0) REG_S v0, PCB_CONTEXT+11*REGSZ(a0) GET_CPU_INFO(t0, t1) - lw t0, CI_CPL(t0) + lw t0, CI_IPL(t0) #ifdef RM7000_ICR cfc0 t1, COP_0_ICR REG_S t1, PCB_CONTEXT+12*REGSZ(a0) # save status register @@ -117,7 +117,7 @@ NON_LEAF(cpu_switchto, FRAMESZ(CF_SZ), ra) beqz a0, 1f mfc0 v0, COP_0_STATUS_REG - lw t0, CI_CPL(t1) + lw t0, CI_IPL(t1) REG_S s0, PCB_CONTEXT+0*REGSZ(t3) # do a 'savectx()' REG_S s1, PCB_CONTEXT+1*REGSZ(t3) REG_S s2, PCB_CONTEXT+2*REGSZ(t3) diff --git a/sys/arch/mips64/mips64/exception.S b/sys/arch/mips64/mips64/exception.S index f4d69d3fe59..3aafc6637a6 100644 --- a/sys/arch/mips64/mips64/exception.S +++ b/sys/arch/mips64/mips64/exception.S @@ -1,4 +1,4 @@ -/* $OpenBSD: exception.S,v 1.25 2009/10/22 18:46:48 miod Exp $ */ +/* $OpenBSD: exception.S,v 1.26 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2002-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -330,7 +330,7 @@ NNON_LEAF(u_intr, FRAMESZ(CF_SZ), ra) GET_CPU_INFO(k1, k0) PTR_L k0, CI_CURPROCPADDR(k1) RESTORE_REG(a3, CPL, k0, 0) - sw a3, CI_CPL(k1) + sw a3, CI_IPL(k1) .set noat RESTORE_REG(a0, PC, k0, 0) #ifdef RM7000_ICR @@ -491,7 +491,7 @@ NNON_LEAF(u_general, FRAMESZ(CF_SZ), ra) GET_CPU_INFO(k1, k0) PTR_L k0, CI_CURPROCPADDR(k1) RESTORE_REG(a3, CPL, k0, 0) - sw a3, CI_CPL(k1) + sw a3, CI_IPL(k1) .set noat RESTORE_CPU_SREG(k0, 0) RESTORE_REG(a0, PC, k0, 0) diff --git a/sys/arch/mips64/mips64/interrupt.c b/sys/arch/mips64/mips64/interrupt.c index aa94dd46b3b..8ec207ab116 100644 --- a/sys/arch/mips64/mips64/interrupt.c +++ b/sys/arch/mips64/mips64/interrupt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: interrupt.c,v 1.48 2009/10/22 20:59:24 miod Exp $ */ +/* $OpenBSD: interrupt.c,v 1.49 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -29,26 +29,16 @@ #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> -#include <sys/signalvar.h> #include <sys/user.h> -#include <sys/malloc.h> -#include <sys/device.h> -#ifdef KTRACE -#include <sys/ktrace.h> -#endif -#include <machine/trap.h> +#include <uvm/uvm_extern.h> + #include <machine/cpu.h> #include <machine/intr.h> -#include <machine/autoconf.h> #include <machine/frame.h> -#include <machine/regnum.h> -#include <machine/atomic.h> #include <mips64/rm7000.h> -#include <mips64/archtype.h> - #ifdef DDB #include <mips64/db_machdep.h> #include <ddb/db_sym.h> @@ -60,8 +50,6 @@ void interrupt(struct trap_frame *); static struct evcount soft_count; static int soft_irq = 0; -uint32_t imask[NIPLS]; - uint32_t idle_mask; int last_low_int; @@ -98,7 +86,7 @@ int_f *splx_hand = &dummy_splx; */ /* - * Handle an interrupt. Both kernel and user mode is handled here. + * Handle an interrupt. Both kernel and user mode are handled here. * * The interrupt handler is called with the CR_INT bits set that * were given when the handler was registered. @@ -111,9 +99,7 @@ interrupt(struct trap_frame *trapframe) { struct cpu_info *ci = curcpu(); u_int32_t pending; - u_int32_t cause; - int i; - uint32_t xcpl; + int i, s; /* * Paranoic? Perhaps. But if we got here with the enable @@ -133,83 +119,69 @@ interrupt(struct trap_frame *trapframe) /* Mask out interrupts from cause that are unmasked */ pending = trapframe->cause & CR_IPEND & trapframe->sr; - cause = pending; - if (cause & SOFT_INT_MASK_0) { + if (pending & SOFT_INT_MASK_0) { clearsoftintr0(); soft_count.ec_count++; } #ifdef RM7K_PERFCNTR - if (cause & CR_INT_PERF) { + if (pending & CR_INT_PERF) rm7k_perfintr(trapframe); - cause &= ~CR_INT_PERF; - } #endif for (i = 0; i <= last_low_int; i++) { uint32_t active; active = cpu_int_tab[i].int_mask & pending; - if (active) { - cause &= ~(*cpu_int_tab[i].int_hand)(active, trapframe); - } + if (active != 0) + (*cpu_int_tab[i].int_hand)(active, trapframe); } /* - * Reenable all non served hardware levels. + * Dispatch soft interrupts if current ipl allows them. */ -#if 0 - /* XXX the following should, when req., change the IC reg as well */ - setsr((trapframe->sr & ~pending) | SR_INT_ENAB); -#endif - - xcpl = splsoft(); - if (ci->ci_softpending & ~xcpl) { - dosoftint(xcpl); + if (ci->ci_ipl < IPL_SOFTINT && ci->ci_softpending != 0) { + s = splsoft(); + dosoftint(); + __asm__ (".set noreorder\n"); + ci->ci_ipl = s; /* no-overhead splx */ + __asm__ ("sync\n\t.set reorder\n"); } - - __asm__ (" .set noreorder\n"); - ci->ci_cpl = xcpl; - __asm__ (" sync\n .set reorder\n"); } /* - * Set up handler for external interrupt events. - * Use CR_INT_<n> to select the proper interrupt - * condition to dispatch on. We also enable the - * software ints here since they are always on. + * Set up handler for external interrupt events. + * Use CR_INT_<n> to select the proper interrupt condition to dispatch on. + * We also enable the software ints here since they are always on. */ void set_intr(int pri, uint32_t mask, - uint32_t (*int_hand)(uint32_t, struct trap_frame *)) + uint32_t (*int_hand)(uint32_t, struct trap_frame *)) { if ((idle_mask & SOFT_INT_MASK) == 0) - evcount_attach(&soft_count, "soft", (void *)&soft_irq, &evcount_intr); - if (pri < 0 || pri >= NLOWINT) { - panic("set_intr: to high priority"); - } + evcount_attach(&soft_count, "soft", (void *)&soft_irq, + &evcount_intr); + if (pri < 0 || pri >= NLOWINT) + panic("set_intr: too high priority (%d), increase NLOWINT", + pri); if (pri > last_low_int) last_low_int = pri; - if ((mask & ~CR_IPEND) != 0) { + if ((mask & ~CR_IPEND) != 0) panic("set_intr: invalid mask 0x%x", mask); - } if (cpu_int_tab[pri].int_mask != 0 && (cpu_int_tab[pri].int_mask != mask || - cpu_int_tab[pri].int_hand != int_hand)) { + cpu_int_tab[pri].int_hand != int_hand)) panic("set_intr: int already set at pri %d", pri); - } cpu_int_tab[pri].int_hand = int_hand; cpu_int_tab[pri].int_mask = mask; idle_mask |= mask | SOFT_INT_MASK; } -struct intrhand *intrhand[INTMASKSIZE]; - void dummy_splx(int newcpl) { @@ -233,7 +205,7 @@ splinit() /* * Update proc0 pcb to contain proper values. */ - pcb->pcb_context.val[13] = 0; /* IPL_NONE */ + pcb->pcb_context.val[13] = IPL_NONE; #ifdef RM7000_ICR pcb->pcb_context.val[12] = (idle_mask << 8) & IC_INT_MASK; #endif @@ -247,31 +219,35 @@ splinit() } int -splraise(int newcpl) +splraise(int newipl) { struct cpu_info *ci = curcpu(); - int oldcpl; - - __asm__ (" .set noreorder\n"); - oldcpl = ci->ci_cpl; - ci->ci_cpl = oldcpl | newcpl; - __asm__ (" sync\n .set reorder\n"); - return (oldcpl); + int oldipl; + + __asm__ (".set noreorder\n"); + oldipl = ci->ci_ipl; + if (oldipl < newipl) { + /* XXX to kill warning about dla being used in a delay slot */ + __asm__("nop"); + ci->ci_ipl = newipl; + } + __asm__ ("sync\n\t.set reorder\n"); + return oldipl; } void -splx(int newcpl) +splx(int newipl) { - (*splx_hand)(newcpl); + (*splx_hand)(newipl); } int -spllower(int newcpl) +spllower(int newipl) { struct cpu_info *ci = curcpu(); - int oldcpl; + int oldipl; - oldcpl = ci->ci_cpl; - splx(newcpl); - return (oldcpl); + oldipl = ci->ci_ipl; + splx(newipl); + return oldipl; } diff --git a/sys/arch/mips64/mips64/process_machdep.c b/sys/arch/mips64/mips64/process_machdep.c index 505ecf55e1c..bdd3609d288 100644 --- a/sys/arch/mips64/mips64/process_machdep.c +++ b/sys/arch/mips64/mips64/process_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: process_machdep.c,v 1.10 2009/05/22 20:37:53 miod Exp $ */ +/* $OpenBSD: process_machdep.c,v 1.11 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 1994 Adam Glass @@ -40,7 +40,7 @@ * From: * Id: procfs_i386.c,v 4.1 1993/12/17 10:47:45 jsp Rel * - * $Id: process_machdep.c,v 1.10 2009/05/22 20:37:53 miod Exp $ + * $Id: process_machdep.c,v 1.11 2009/10/22 22:08:54 miod Exp $ */ /* @@ -103,7 +103,7 @@ process_write_regs(p, regs) struct proc *p; struct reg *regs; { - register_t sr, ic, cpl; + register_t sr, ic, ipl; extern struct proc *machFPCurProcPtr; if (p == machFPCurProcPtr) { @@ -114,11 +114,11 @@ process_write_regs(p, regs) } sr = p->p_md.md_regs->sr; ic = p->p_md.md_regs->ic; - cpl = p->p_md.md_regs->cpl; + ipl = p->p_md.md_regs->ipl; bcopy((caddr_t)regs, (caddr_t)p->p_md.md_regs, REGSIZE); p->p_md.md_regs->sr = sr; p->p_md.md_regs->ic = ic; - p->p_md.md_regs->cpl = cpl; + p->p_md.md_regs->ipl = ipl; return (0); } diff --git a/sys/arch/mips64/mips64/softintr.c b/sys/arch/mips64/mips64/softintr.c index 748657f0ccd..f818219550b 100644 --- a/sys/arch/mips64/mips64/softintr.c +++ b/sys/arch/mips64/mips64/softintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: softintr.c,v 1.4 2009/10/22 20:59:24 miod Exp $ */ +/* $OpenBSD: softintr.c,v 1.5 2009/10/22 22:08:54 miod Exp $ */ /* $NetBSD: softintr.c,v 1.2 2003/07/15 00:24:39 lukem Exp $ */ /* @@ -200,12 +200,12 @@ netintr(void) } void -dosoftint(uint32_t xcpl) +dosoftint() { struct cpu_info *ci = curcpu(); int sir, q, mask; - while ((sir = (ci->ci_softpending & ~xcpl)) != 0) { + while ((sir = ci->ci_softpending) != 0) { atomic_clearbits_int(&ci->ci_softpending, sir); for (q = SI_NQUEUES - 1; q >= 0; q--) { diff --git a/sys/arch/mips64/mips64/vm_machdep.c b/sys/arch/mips64/mips64/vm_machdep.c index 8ac4a8ffdcd..a73db7dfd0a 100644 --- a/sys/arch/mips64/mips64/vm_machdep.c +++ b/sys/arch/mips64/mips64/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.19 2009/10/22 18:46:48 miod Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.20 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 1988 University of Utah. * Copyright (c) 1992, 1993 @@ -104,11 +104,11 @@ cpu_fork(p1, p2, stack, stacksize, func, arg) /* * Copy the process control block to the new proc and * create a clean stack for exit through trampoline. - * pcb_context has s0-s7, sp, s8, ra, sr, icr, cpl. + * pcb_context has s0-s7, sp, s8, ra, sr, icr, ipl. */ if (p1 != curproc) { - pcb->pcb_context.val[13] = 0; + pcb->pcb_context.val[13] = IPL_NONE; #ifdef RM7000_ICR pcb->pcb_context.val[12] = (idle_mask << 8) & IC_INT_MASK; #endif diff --git a/sys/arch/sgi/include/intr.h b/sys/arch/sgi/include/intr.h index 33cbd8aec22..1829fc7dd22 100644 --- a/sys/arch/sgi/include/intr.h +++ b/sys/arch/sgi/include/intr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: intr.h,v 1.33 2009/10/22 20:39:17 miod Exp $ */ +/* $OpenBSD: intr.h,v 1.34 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -30,14 +30,19 @@ #define _MACHINE_INTR_H_ /* - * The interrupt mask cpl is a mask which is used with an external - * HW mask register. - * The CPU mask is never changed from the value it gets when interrupt - * dispatchers are registered. + * The interrupt level ipl is a logical level; per-platform interrupt + * code will turn it into the appropriate hardware interrupt masks + * values. * - * Clock interrupts are always allowed to happen but will not be serviced + * Interrupt sources on the CPU are kept enabled regardless of the + * current ipl value; individual hardware sources interrupting while + * logically masked are masked on the fly, remembered as pending, and + * unmasked at the first splx() opportunity. + * + * An exception to this rule is the clock interrupt. Clock interrupts + * are always allowed to happen, but will (of course!) not be serviced * if logically masked. The reason for this is that clocks usually sit on - * INT5 and cannot be easily masked if external HW masking is used. + * INT5 and cannot be easily masked if external hardware masking is used. */ /* Interrupt priority `levels'; not mutually exclusive. */ @@ -58,12 +63,9 @@ #define IST_EDGE 2 /* edge-triggered */ #define IST_LEVEL 3 /* level-triggered */ -#define SINTBIT(q) (31 - (q)) +#define SINTBIT(q) (q) #define SINTMASK(q) (1 << SINTBIT(q)) -#define SPL_CLOCK SINTBIT(SI_NQUEUES) -#define SPL_CLOCKMASK SINTMASK(SI_NQUEUES) - /* Soft interrupt masks. */ #define IPL_SOFT 0 @@ -76,8 +78,6 @@ #define SI_SOFTNET 2 /* for IPL_SOFTNET */ #define SI_SOFTTTY 3 /* for IPL_SOFTTTY */ -#define SINT_ALLMASK (SINTMASK(SI_SOFT) | SINTMASK(SI_SOFTCLOCK) | \ - SINTMASK(SI_SOFTNET) | SINTMASK(SI_SOFTTTY)) #define SI_NQUEUES 4 #ifndef _LOCORE @@ -110,45 +110,32 @@ extern struct soft_intrhand *softnet_intrhand; #define setsoftnet() softintr_schedule(softnet_intrhand) -#define splsoft() splraise(imask[IPL_SOFTINT]) -#define splbio() splraise(imask[IPL_BIO]) -#define splnet() splraise(imask[IPL_NET]) -#define spltty() splraise(imask[IPL_TTY]) -#define splaudio() splraise(imask[IPL_AUDIO]) -#define splclock() splraise(imask[IPL_CLOCK]) -#define splvm() splraise(imask[IPL_VM]) -#define splsoftclock() splraise(SINTMASK(SI_SOFTCLOCK) | \ - SINTMASK(SI_SOFT)) -#define splsoftnet() splraise(SINTMASK(SI_SOFTNET) | \ - SINTMASK(SI_SOFTCLOCK) | \ - SINTMASK(SI_SOFT)) -#define splstatclock() splhigh() -#define splsched() splhigh() -#define spllock() splhigh() -#define splhigh() splraise(-1) -#define spl0() spllower(0) +#define splsoft() splraise(IPL_SOFTINT) +#define splbio() splraise(IPL_BIO) +#define splnet() splraise(IPL_NET) +#define spltty() splraise(IPL_TTY) +#define splaudio() splraise(IPL_AUDIO) +#define splclock() splraise(IPL_CLOCK) +#define splvm() splraise(IPL_VM) +#define splhigh() splraise(IPL_HIGH) + +#define splsoftclock() splsoft() +#define splsoftnet() splsoft() +#define splstatclock() splhigh() + +#define splsched() splhigh() +#define spllock() splhigh() +#define spl0() spllower(0) void splinit(void); #define splassert(X) #define splsoftassert(X) -/* - * Schedule prioritys for base interrupts (CPU) - */ -#define INTPRI_CLOCK 1 -#define INTPRI_MACEIO 2 /* O2 I/O interrupt */ -#define INTPRI_XBOWMUX 2 /* Origin 200/2000 I/O interrupt */ -#define INTPRI_MACEAUX 3 - -#define INTMASKSIZE 32 - -extern uint32_t imask[NIPLS]; - /* Inlines */ static __inline void register_splx_handler(void (*)(int)); -typedef void (int_f) (int); +typedef void (int_f)(int); extern int_f *splx_hand; static __inline void @@ -169,32 +156,32 @@ int spllower(int); #include <sys/evcount.h> struct intrhand { - struct intrhand *ih_next; - int (*ih_fun)(void *); - void *ih_arg; - int ih_level; - int ih_irq; - void *frame; - struct evcount ih_count; + struct intrhand *ih_next; + int (*ih_fun)(void *); + void *ih_arg; + int ih_level; + int ih_irq; + void *frame; + struct evcount ih_count; }; -extern struct intrhand *intrhand[INTMASKSIZE]; - /* * Low level interrupt dispatcher registration data. */ -#define NLOWINT 16 /* Number of low level registrations possible */ -struct trap_frame; +/* Schedule priorities for base interrupts (CPU) */ +#define INTPRI_CLOCK 0 +/* other values are system-specific */ + +#define NLOWINT 16 /* Number of low level registrations possible */ extern uint32_t idle_mask; -extern int last_low_int; -void set_intr(int, uint32_t, uint32_t(*)(uint32_t, struct trap_frame *)); +struct trap_frame; +void set_intr(int, uint32_t, uint32_t(*)(uint32_t, struct trap_frame *)); -void hw_setintrmask(uint32_t); -u_int32_t updateimask(uint32_t); -void dosoftint(uint32_t); +uint32_t updateimask(uint32_t); +void dosoftint(void); #endif /* _LOCORE */ diff --git a/sys/arch/sgi/include/mutex.h b/sys/arch/sgi/include/mutex.h index 858d27c6a8f..df505d80ba8 100644 --- a/sys/arch/sgi/include/mutex.h +++ b/sys/arch/sgi/include/mutex.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.h,v 1.3 2007/05/14 17:32:15 miod Exp $ */ +/* $OpenBSD: mutex.h,v 1.4 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> @@ -34,7 +34,7 @@ struct mutex { int mtx_lock; int mtx_wantipl; - int mtx_oldcpl; + int mtx_oldipl; }; void mtx_init(struct mutex *, int); @@ -56,6 +56,6 @@ void mtx_init(struct mutex *, int); #define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) #endif -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldcpl +#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl #endif diff --git a/sys/arch/sgi/localbus/crimebus.h b/sys/arch/sgi/localbus/crimebus.h index 902669355da..db7c6bf19bc 100644 --- a/sys/arch/sgi/localbus/crimebus.h +++ b/sys/arch/sgi/localbus/crimebus.h @@ -1,4 +1,4 @@ -/* $OpenBSD: crimebus.h,v 1.7 2007/10/31 13:59:53 jsing Exp $ */ +/* $OpenBSD: crimebus.h,v 1.8 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se). @@ -84,6 +84,8 @@ #define CRIME_INT_SOFT_2 0x40000000 /* ??? */ #define CRIME_INT_VICE 0x80000000 /* Video Image Compression Engine */ +#define CRIME_NINTS 32 + /* * Watchdog? diff --git a/sys/arch/sgi/localbus/macebus.c b/sys/arch/sgi/localbus/macebus.c index 39ff5fdc706..98e91dbddcd 100644 --- a/sys/arch/sgi/localbus/macebus.c +++ b/sys/arch/sgi/localbus/macebus.c @@ -1,4 +1,4 @@ -/* $OpenBSD: macebus.c,v 1.50 2009/10/22 20:59:24 miod Exp $ */ +/* $OpenBSD: macebus.c,v 1.51 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se) @@ -57,6 +57,7 @@ void macebus_intr_makemasks(void); void macebus_splx(int); uint32_t macebus_iointr(uint32_t, struct trap_frame *); uint32_t macebus_aux(uint32_t, struct trap_frame *); +void mace_setintrmask(int); u_int8_t mace_read_1(bus_space_tag_t, bus_space_handle_t, bus_size_t); u_int16_t mace_read_2(bus_space_tag_t, bus_space_handle_t, bus_size_t); @@ -157,6 +158,20 @@ struct machine_bus_dma_tag mace_bus_dma_tag = { }; /* + * CRIME/MACE interrupt handling declarations: 32 CRIME sources, 32 MACE + * sources (unmanaged); 1 level. + * We define another level for periodic tasks as well. + */ + +struct intrhand *mace_intrhand[CRIME_NINTS]; + +#define INTPRI_MACEIO (INTPRI_CLOCK + 1) +#define INTPRI_MACEAUX (INTPRI_MACEIO + 1) + +uint64_t mace_intem; +uint64_t mace_imask[NIPLS]; + +/* * Match bus only to targets which have this bus. */ int @@ -237,12 +252,12 @@ macebusattach(struct device *parent, struct device *self, void *aux) * Map and setup MACE ISA control registers. */ if (bus_space_map(&macebus_tag, MACE_ISA_OFFS, 0x400, 0, &mace_h)) { - printf("%s: can't map MACE ISA control registers\n", + printf("%s: can't map MACE control registers\n", self->dv_xname); return; } - /* Turn on all interrupts except for MACE compare/timer. */ + /* Turn on all MACE interrupts except for MACE compare/timer. */ bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_INT_MASK, 0xffffffff & ~MACE_ISA_INT_TIMER); bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_INT_STAT, 0); @@ -430,93 +445,56 @@ macebus_device_to_pa(bus_addr_t addr) * Macebus interrupt handler driver. */ -uint64_t mace_intem = 0x0; -static uint32_t intrtype[INTMASKSIZE]; -static uint32_t intrmask[INTMASKSIZE]; -static uint32_t intrlevel[INTMASKSIZE]; - -static int fakeintr(void *); -static int fakeintr(void *a) {return 0;} - /* * Establish an interrupt handler called from the dispatcher. * The interrupt function established should return zero if there was nothing * to serve (no int) and non-zero when an interrupt was serviced. + * * Interrupts are numbered from 1 and up where 1 maps to HW int 0. + * XXX There is no reason to keep this... except for hardcoded interrupts + * XXX in kernel configuration files... */ void * macebus_intr_establish(void *icp, u_long irq, int type, int level, int (*ih_fun)(void *), void *ih_arg, const char *ih_what) { struct intrhand **p, *q, *ih; - static struct intrhand fakehand = {NULL, fakeintr}; - int edge; - extern int cold; - static int initialized = 0; - - if (!initialized) { - /*INIT CODE HERE*/ - initialized = 1; - } + int s; - if (irq > SPL_CLOCK || irq < 1) { +#ifdef DIAGNOSTIC + if (irq > CRIME_NINTS || irq < 1) panic("intr_establish: illegal irq %d", irq); - } +#endif + irq -= 1; /* Adjust for 1 being first (0 is no int) */ - /* No point in sleeping unless someone can free memory. */ - ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); + ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT); if (ih == NULL) - panic("intr_establish: can't malloc handler info"); + return NULL; - if (type == IST_NONE || type == IST_PULSE) - panic("intr_establish: bogus type"); - - switch (intrtype[irq]) { - case IST_EDGE: - case IST_LEVEL: - if (type == intrtype[irq]) - break; - } + ih->ih_next = NULL; + ih->ih_fun = ih_fun; + ih->ih_arg = ih_arg; + ih->ih_level = level; + ih->ih_irq = irq + 1; + evcount_attach(&ih->ih_count, ih_what, (void *)&ih->ih_irq, + &evcount_intr); - switch (type) { - case IST_EDGE: - edge |= 1 << irq; - break; - case IST_LEVEL: - edge &= ~(1 << irq); - break; - } + s = splhigh(); /* * Figure out where to put the handler. * This is O(N^2), but we want to preserve the order, and N is * generally small. */ - for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) + for (p = &mace_intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) ; + *p = ih; - /* - * Actually install a fake handler momentarily, since we might be doing - * this with interrupts enabled and don't want the real routine called - * until masking is set up. - */ - fakehand.ih_level = level; - *p = &fakehand; - + mace_intem |= 1UL << irq; macebus_intr_makemasks(); - /* - * Poke the real handler in now. - */ - ih->ih_fun = ih_fun; - ih->ih_arg = ih_arg; - ih->ih_next = NULL; - ih->ih_level = level; - ih->ih_irq = irq + 1; - evcount_attach(&ih->ih_count, ih_what, (void *)&ih->ih_irq, - &evcount_intr); - *p = ih; + splx(s); /* causes hw mask update */ return (ih); } @@ -536,165 +514,85 @@ macebus_intr_makemasks(void) { int irq, level; struct intrhand *q; + uint intrlevel[CRIME_NINTS]; /* First, figure out which levels each IRQ uses. */ - for (irq = 0; irq < INTMASKSIZE; irq++) { - int levels = 0; - for (q = intrhand[irq]; q; q = q->ih_next) + for (irq = 0; irq < CRIME_NINTS; irq++) { + uint levels = 0; + for (q = mace_intrhand[irq]; q; q = q->ih_next) levels |= 1 << q->ih_level; intrlevel[irq] = levels; } /* Then figure out which IRQs use each level. */ - for (level = IPL_NONE; level < NIPLS; level++) { - int irqs = 0; - for (irq = 0; irq < INTMASKSIZE; irq++) + for (level = IPL_NONE; level < IPL_HIGH; level++) { + uint64_t irqs = 0; + for (irq = 0; irq < CRIME_NINTS; irq++) if (intrlevel[irq] & (1 << level)) - irqs |= 1 << irq; - if (level != IPL_NONE) - irqs |= SINT_ALLMASK; - imask[level] = irqs; + irqs |= 1UL << irq; + mace_imask[level] = irqs; } /* * There are tty, network and disk drivers that use free() at interrupt - * time, so imp > (tty | net | bio). + * time, so vm > (tty | net | bio). * * Enforce a hierarchy that gives slow devices a better chance at not * dropping data. */ - imask[IPL_NET] |= imask[IPL_BIO]; - imask[IPL_TTY] |= imask[IPL_NET]; - imask[IPL_VM] |= imask[IPL_TTY]; - imask[IPL_CLOCK] |= imask[IPL_VM] | SPL_CLOCKMASK; + mace_imask[IPL_NET] |= mace_imask[IPL_BIO]; + mace_imask[IPL_TTY] |= mace_imask[IPL_NET]; + mace_imask[IPL_VM] |= mace_imask[IPL_TTY]; + mace_imask[IPL_CLOCK] |= mace_imask[IPL_VM]; /* * These are pseudo-levels. */ - imask[IPL_NONE] = 0; - imask[IPL_HIGH] = -1; - - /* And eventually calculate the complete masks. */ - for (irq = 0; irq < INTMASKSIZE; irq++) { - int irqs = 1 << irq; - for (q = intrhand[irq]; q; q = q->ih_next) - irqs |= imask[q->ih_level]; - intrmask[irq] = irqs | SINT_ALLMASK; - } - - /* Lastly, determine which IRQs are actually in use. */ - irq = 0; - for (level = 0; level < INTMASKSIZE; level++) { - if (intrhand[level]) { - irq |= 1 << level; - } - } - mace_intem = irq & 0x0000ffff; - hw_setintrmask(0); + mace_imask[IPL_NONE] = 0; + mace_imask[IPL_HIGH] = -1UL; } void -macebus_splx(int newcpl) +macebus_splx(int newipl) { struct cpu_info *ci = curcpu(); - /* Update masks to new cpl. Order highly important! */ - __asm__ (" .set noreorder\n"); - ci->ci_cpl = newcpl; - __asm__ (" sync\n .set reorder\n"); - hw_setintrmask(newcpl); + /* Update masks to new ipl. Order highly important! */ + __asm__ (".set noreorder\n"); + ci->ci_ipl = newipl; + __asm__ ("sync\n\t.set reorder\n"); + mace_setintrmask(newipl); /* If we still have softints pending trigger processing. */ - if (ci->ci_softpending & ~newcpl) + if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) setsoftintr0(); } /* - * Process interrupts. The parameter pending has non-masked interrupts. + * Crime interrupt handler. */ -uint32_t -macebus_iointr(uint32_t hwpend, struct trap_frame *cf) -{ - struct intrhand *ih; - uint32_t caught, vm; - int v; - uint32_t pending; - u_int64_t intstat, isastat, mask; -#ifdef DIAGNOSTIC - static int spurious = 0; -#endif - struct cpu_info *ci = curcpu(); - - intstat = bus_space_read_8(&crimebus_tag, crime_h, CRIME_INT_STAT); - intstat &= 0xffff; - - isastat = bus_space_read_8(&macebus_tag, mace_h, MACE_ISA_INT_STAT); - caught = 0; - - /* Mask off masked interrupts and save them as pending. */ - if (intstat & cf->cpl) { - mask = bus_space_read_8(&crimebus_tag, crime_h, CRIME_INT_MASK); - bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_MASK, mask); - caught++; - } - - /* Scan all unmasked. Scan the first 16 for now. */ - pending = intstat & ~cf->cpl; - - for (v = 0, vm = 1; pending != 0 && v < 16 ; v++, vm <<= 1) { - if (pending & vm) { - ih = intrhand[v]; - - while (ih) { - ih->frame = cf; - if ((*ih->ih_fun)(ih->ih_arg)) { - caught |= vm; - ih->ih_count.ec_count++; - } - ih = ih->ih_next; - } - } - } - - if (caught) { -#ifdef DIAGNOSTIC - spurious = 0; -#endif - return CR_INT_0; - } - -#ifdef DIAGNOSTIC - if (pending != 0) { - intstat = bus_space_read_8(&crimebus_tag, crime_h, - CRIME_INT_STAT) & - bus_space_read_8(&crimebus_tag, crime_h, CRIME_INT_MASK); - isastat = bus_space_read_8(&macebus_tag, mace_h, - MACE_ISA_INT_STAT) & - bus_space_read_8(&macebus_tag, mace_h, MACE_ISA_INT_MASK); - - if (intstat != 0 || isastat != 0) { - printf("stray interrupt, mace mask %lx stat %lx\n" - "crime mask %lx stat %lx hard %lx " - "(pending %lx caught %lx)\n", - bus_space_read_8(&macebus_tag, mace_h, - MACE_ISA_INT_MASK), - bus_space_read_8(&macebus_tag, mace_h, - MACE_ISA_INT_STAT), - bus_space_read_8(&crimebus_tag, crime_h, - CRIME_INT_MASK), - bus_space_read_8(&crimebus_tag, crime_h, - CRIME_INT_STAT), - bus_space_read_8(&crimebus_tag, crime_h, - CRIME_INT_HARD), - pending, caught); - if (++spurious >= 10) - panic("too many stray interrupts"); - } - } -#endif - - return 0; /* Not found here. */ -} +#define INTR_FUNCTIONNAME macebus_iointr +#define INTR_LOCAL_DECLS +#define INTR_GETMASKS \ +do { \ + isr = bus_space_read_8(&crimebus_tag, crime_h, CRIME_INT_STAT); \ + imr = bus_space_read_8(&crimebus_tag, crime_h, CRIME_INT_MASK); \ + bit = 63; \ +} while (0) +#define INTR_MASKPENDING \ + bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_MASK, imr & ~isr) +#define INTR_IMASK(ipl) mace_imask[ipl] +#define INTR_HANDLER(bit) mace_intrhand[bit] +#define INTR_SPURIOUS(bit) \ +do { \ + /* XXX +1 because of -1 in intr_establish() */ \ + if (bit != 4) \ + printf("spurious crime interrupt %d\n", bit + 1); \ +} while (0) +#define INTR_MASKRESTORE \ + bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_MASK, imr) + +#include <sgi/sgi/intr_template.c> /* * Macebus auxilary functions run each clock interrupt. @@ -720,12 +618,12 @@ macebus_aux(uint32_t hwpend, struct trap_frame *cf) } bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_MISC_REG, mask); - return 0; /* Real clock int handler registers. */ + return 0; /* Real clock int handler will claim the interrupt. */ } void -hw_setintrmask(uint32_t m) +mace_setintrmask(int level) { *(volatile uint64_t *)(PHYS_TO_XKPHYS(CRIMEBUS_BASE, CCA_NC) + - CRIME_INT_MASK) = mace_intem & ~((uint64_t)m); + CRIME_INT_MASK) = mace_intem & ~mace_imask[level]; } diff --git a/sys/arch/sgi/sgi/genassym.cf b/sys/arch/sgi/sgi/genassym.cf index d619bccd0a6..5833c78c70b 100644 --- a/sys/arch/sgi/sgi/genassym.cf +++ b/sys/arch/sgi/sgi/genassym.cf @@ -1,4 +1,4 @@ -# $OpenBSD: genassym.cf,v 1.16 2009/10/22 20:59:24 miod Exp $ +# $OpenBSD: genassym.cf,v 1.17 2009/10/22 22:08:54 miod Exp $ # # Copyright (c) 1997 Per Fogelstrom / Opsycon AB # @@ -58,7 +58,7 @@ member pcb_segtab struct cpu_info member ci_curproc member ci_curprocpaddr -member ci_cpl +member ci_ipl export VM_MIN_KERNEL_ADDRESS export SIGFPE diff --git a/sys/arch/sgi/sgi/intr_template.c b/sys/arch/sgi/sgi/intr_template.c new file mode 100644 index 00000000000..30ca5ceb517 --- /dev/null +++ b/sys/arch/sgi/sgi/intr_template.c @@ -0,0 +1,125 @@ +/* $OpenBSD: intr_template.c,v 1.1 2009/10/22 22:08:54 miod Exp $ */ + +/* + * Copyright (c) 2009 Miodrag Vallat. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Common interrupt dispatcher bowels. + * + * This file is not a standalone file; to use it, define the following + * macros and #include <sgi/sgi/intr_template.c>: + * + * INTR_FUNCTIONNAME interrupt handler function name + * INTR_GETMASKS logic to get `imr', `isr', and initialize `bit' + * INTR_HANDLER(bit) logic to access intrhand array head for `bit' + * INTR_IMASK(ipl) logic to access imask array for `ipl' + * INTR_LOCAL_DECLS local declarations (may be empty) + * INTR_MASKPENDING logic to mask `isr' + * INTR_MASKRESTORE logic to reset `imr' + * INTR_SPURIOUS(bit) print a spurious interrupt message for `bit' + */ + +uint32_t +INTR_FUNCTIONNAME(uint32_t hwpend, struct trap_frame *frame) +{ + struct cpu_info *ci = curcpu(); + uint64_t imr, isr, mask; + int ipl; + int bit; + struct intrhand *ih; + int rc; + INTR_LOCAL_DECLS + + INTR_GETMASKS; + + isr &= imr; + if (isr == 0) + return 0; /* not for us */ + + /* + * Mask all pending interrupts. + */ + INTR_MASKPENDING; + + /* + * If interrupts are spl-masked, mask them and wait for splx() + * to reenable them when necessary. + */ + if ((mask = isr & INTR_IMASK(frame->ipl)) != 0) { + isr &= ~mask; + imr &= ~mask; + } + + /* + * Now process allowed interrupts. + */ + if (isr != 0) { + int lvl, bitno; + uint64_t tmpisr; + + __asm__ (".set noreorder\n"); + ipl = ci->ci_ipl; + __asm__ ("sync\n\t.set reorder\n"); + + /* Service higher level interrupts first */ + for (lvl = IPL_HIGH - 1; lvl != IPL_NONE; lvl--) { + tmpisr = isr & (INTR_IMASK(lvl) ^ INTR_IMASK(lvl - 1)); + if (tmpisr == 0) + continue; + for (bitno = bit, mask = 1UL << bitno; tmpisr != 0; + bitno--, mask >>= 1) { + if ((tmpisr & mask) == 0) + continue; + + rc = 0; + for (ih = INTR_HANDLER(bitno); ih != NULL; + ih = ih->ih_next) { + splraise(ih->ih_level); + ih->frame = frame; + if ((*ih->ih_fun)(ih->ih_arg) != 0) { + rc = 1; + ih->ih_count.ec_count++; + } + __asm__ (".set noreorder\n"); + ci->ci_ipl = ipl; + __asm__ ("sync\n\t.set reorder\n"); + } + if (rc == 0) + INTR_SPURIOUS(bitno); + + isr ^= mask; + if ((tmpisr ^= mask) == 0) + break; + } + } + + /* + * Reenable interrupts which have been serviced. + */ + INTR_MASKRESTORE; + } + + return hwpend; +} + +#undef INTR_FUNCTIONNAME +#undef INTR_GETMASKS +#undef INTR_HANDLER +#undef INTR_IMASK +#undef INTR_LOCAL_DECLS +#undef INTR_MASKPENDING +#undef INTR_MASKRESTORE +#undef INTR_SPURIOUS diff --git a/sys/arch/sgi/sgi/ip27_machdep.c b/sys/arch/sgi/sgi/ip27_machdep.c index 77d547a8758..1c4bfc70bb8 100644 --- a/sys/arch/sgi/sgi/ip27_machdep.c +++ b/sys/arch/sgi/sgi/ip27_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ip27_machdep.c,v 1.27 2009/10/22 20:59:24 miod Exp $ */ +/* $OpenBSD: ip27_machdep.c,v 1.28 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2008, 2009 Miodrag Vallat. @@ -70,14 +70,38 @@ int ip27_hub_intr_register(int, int, int *); int ip27_hub_intr_establish(int (*)(void *), void *, int, int, const char *); void ip27_hub_intr_disestablish(int); -uint32_t ip27_hub_intr_handler(uint32_t, struct trap_frame *); +uint32_t hubpi_intr0(uint32_t, struct trap_frame *); +uint32_t hubpi_intr1(uint32_t, struct trap_frame *); void ip27_hub_intr_makemasks(void); +void ip27_hub_setintrmask(int); void ip27_hub_splx(int); void ip27_attach_node(struct device *, int16_t); int ip27_print(void *, const char *); void ip27_nmi(void *); +/* + * IP27 interrupt handling declarations: 128 hw sources, plus timers and + * hub error sources; 5 levels. + */ + +struct intrhand *hubpi_intrhand0[HUBPI_NINTS]; +struct intrhand *hubpi_intrhand1[HUBPI_NINTS]; + +#ifdef notyet +#define INTPRI_XBOW_HUB (INTPRI_CLOCK + 1) /* HUB errors */ +#define INTPRI_XBOW_TIMER (INTPRI_XBOW_HUB + 1) /* prof timer */ +#define INTPRI_XBOW_CLOCK (INTPRI_XBOW_TIMER + 1) /* RTC */ +#define INTPRI_XBOW_HW1 (INTPRI_XBOW_CLOCK + 1) /* HW level 1 */ +#else +#define INTPRI_XBOW_HW1 (INTPRI_CLOCK + 1) /* HW level 1 */ +#endif +#define INTPRI_XBOW_HW0 (INTPRI_XBOW_HW1 + 1) /* HW level 0 */ + +struct { + uint64_t hw[2]; +} hubpi_intem, hubpi_imask[NIPLS]; + void ip27_setup() { @@ -230,7 +254,8 @@ ip27_setup() xbow_intr_widget_intr_establish = ip27_hub_intr_establish; xbow_intr_widget_intr_disestablish = ip27_hub_intr_disestablish; - set_intr(INTPRI_XBOWMUX, CR_INT_0, ip27_hub_intr_handler); + set_intr(INTPRI_XBOW_HW1, CR_INT_1, hubpi_intr1); + set_intr(INTPRI_XBOW_HW0, CR_INT_0, hubpi_intr0); register_splx_handler(ip27_hub_splx); /* @@ -560,8 +585,6 @@ ip27_halt(int howto) * Local HUB interrupt handling routines */ -uint64_t ip27_hub_intrmask; - /* * Find a suitable interrupt bit for the given interrupt. */ @@ -571,18 +594,27 @@ ip27_hub_intr_register(int widget, int level, int *intrbit) int bit; /* - * All interrupts will be serviced at hardware level 0, - * so the `level' argument can be ignored. - * On HUB, the low 7 bits of the level 0 interrupt register - * are reserved. + * Try to allocate a bit on hardware level 0 first. */ - for (bit = SPL_CLOCK - 1; bit >= 7; bit--) - if ((ip27_hub_intrmask & (1 << bit)) == 0) - break; + for (bit = HUBPI_INTR0_WIDGET_MAX; bit >= HUBPI_INTR0_WIDGET_MIN; bit--) + if ((hubpi_intem.hw[0] & (1UL << bit)) == 0) + goto found; - if (bit < 7) - return EINVAL; +#ifdef notyet + /* + * If all level 0 sources are in use, try to allocate a bit on + * level 1. + */ + for (bit = HUBPI_INTR1_WIDGET_MAX; bit >= HUBPI_INTR1_WIDGET_MIN; bit--) + if ((hubpi_intem.hw[1] & (1UL << bit)) == 0) { + bit += HUBPI_NINTS; + goto found; + } +#endif + + return EINVAL; +found: *intrbit = bit; return 0; } @@ -594,18 +626,27 @@ int ip27_hub_intr_establish(int (*func)(void *), void *arg, int intrbit, int level, const char *name) { - struct intrhand *ih; + struct intrhand *ih, **anchor; + int s; #ifdef DIAGNOSTIC - if (intrbit < 0 || intrbit >= SPL_CLOCK) + if (intrbit < 0 || intrbit >= HUBPI_NINTS + HUBPI_NINTS) return EINVAL; #endif /* * Widget interrupts are not supposed to be shared - the interrupt - * mask is large enough for all widgets. + * mask is supposedly large enough for all interrupt sources. + * + * XXX On systems with many widgets and/or nodes, this assumption + * XXX will no longer stand; we'll need to implement interrupt + * XXX sharing at some point. */ - if (intrhand[intrbit] != NULL) + if (intrbit >= HUBPI_NINTS) + anchor = &hubpi_intrhand1[intrbit % HUBPI_NINTS]; + else + anchor = &hubpi_intrhand0[intrbit]; + if (*anchor != NULL) return EEXIST; ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); @@ -620,15 +661,15 @@ ip27_hub_intr_establish(int (*func)(void *), void *arg, int intrbit, if (name != NULL) evcount_attach(&ih->ih_count, name, &ih->ih_level, &evcount_intr); - intrhand[intrbit] = ih; - ip27_hub_intrmask |= 1UL << intrbit; + s = splhigh(); + + *anchor = ih; + + hubpi_intem.hw[intrbit / HUBPI_NINTS] |= 1UL << (intrbit % HUBPI_NINTS); ip27_hub_intr_makemasks(); - /* XXX this assumes we run on cpu0 */ - IP27_LHUB_S(HUBPI_CPU0_IMR0, - IP27_LHUB_L(HUBPI_CPU0_IMR0) | (1UL << intrbit)); - (void)IP27_LHUB_L(HUBPI_IR0); + splx(s); /* causes hw mask update */ return 0; } @@ -636,34 +677,35 @@ ip27_hub_intr_establish(int (*func)(void *), void *arg, int intrbit, void ip27_hub_intr_disestablish(int intrbit) { - struct intrhand *ih; + struct intrhand *ih, **anchor; int s; #ifdef DIAGNOSTIC - if (intrbit < 0 || intrbit >= SPL_CLOCK) + if (intrbit < 0 || intrbit >= HUBPI_NINTS + HUBPI_NINTS) return; #endif + if (intrbit >= HUBPI_NINTS) + anchor = &hubpi_intrhand1[intrbit % HUBPI_NINTS]; + else + anchor = &hubpi_intrhand0[intrbit]; + s = splhigh(); - if ((ih = intrhand[intrbit]) == NULL) { + if ((ih = *anchor) == NULL) { splx(s); return; } - /* XXX this assumes we run on cpu0 */ - IP27_LHUB_S(HUBPI_CPU0_IMR0, - IP27_LHUB_L(HUBPI_CPU0_IMR0) & ~(1UL << intrbit)); - (void)IP27_LHUB_L(HUBPI_IR0); + *anchor = NULL; - intrhand[intrbit] = NULL; - - ip27_hub_intrmask &= ~(1UL << intrbit); + hubpi_intem.hw[intrbit / HUBPI_NINTS] &= + ~(1UL << (intrbit % HUBPI_NINTS)); ip27_hub_intr_makemasks(); - free(ih, M_DEVBUF); - splx(s); + + free(ih, M_DEVBUF); } /* @@ -672,27 +714,38 @@ ip27_hub_intr_disestablish(int intrbit) void ip27_hub_intr_makemasks() { - int irq, level; + int irq, level, i; struct intrhand *q; - uint32_t intrlevel[INTMASKSIZE]; + uint intrlevel[HUBPI_NINTS + HUBPI_NINTS]; /* First, figure out which levels each IRQ uses. */ - for (irq = 0; irq < INTMASKSIZE; irq++) { - int levels = 0; - for (q = intrhand[irq]; q; q = q->ih_next) + for (irq = 0; irq < HUBPI_NINTS; irq++) { + uint levels = 0; + for (q = hubpi_intrhand0[irq]; q; q = q->ih_next) + levels |= 1 << q->ih_level; + for (q = hubpi_intrhand1[irq]; q; q = q->ih_next) levels |= 1 << q->ih_level; intrlevel[irq] = levels; } - /* Then figure out which IRQs use each level. */ - for (level = IPL_NONE; level < NIPLS; level++) { - int irqs = 0; - for (irq = 0; irq < INTMASKSIZE; irq++) + /* + * Then figure out which IRQs use each level. + * Note that we make sure never to overwrite imask[IPL_HIGH], in + * case an interrupt occurs during intr_disestablish() and causes + * an unfortunate splx() while we are here recomputing the masks. + */ + for (level = IPL_NONE; level < IPL_HIGH; level++) { + uint64_t irqs = 0; + for (irq = 0; irq < HUBPI_NINTS; irq++) if (intrlevel[irq] & (1 << level)) - irqs |= 1 << irq; - if (level != IPL_NONE) - irqs |= SINT_ALLMASK; - imask[level] = irqs; + irqs |= 1UL << irq; + hubpi_imask[level].hw[0] = irqs; + + irqs = 0; + for (irq = 0; irq < HUBPI_NINTS; irq++) + if (intrlevel[HUBPI_NINTS + irq] & (1 << level)) + irqs |= 1UL << irq; + hubpi_imask[level].hw[1] = irqs; } /* @@ -702,119 +755,106 @@ ip27_hub_intr_makemasks() * Enforce a hierarchy that gives slow devices a better chance at not * dropping data. */ - imask[IPL_NET] |= imask[IPL_BIO]; - imask[IPL_TTY] |= imask[IPL_NET]; - imask[IPL_VM] |= imask[IPL_TTY]; - imask[IPL_CLOCK] |= imask[IPL_VM] | SPL_CLOCKMASK; - - /* - * These are pseudo-levels. - */ - imask[IPL_NONE] = 0; - imask[IPL_HIGH] = -1; + for (i = 0; i < 2; i++) { + hubpi_imask[IPL_NET].hw[i] |= hubpi_imask[IPL_BIO].hw[i]; + hubpi_imask[IPL_TTY].hw[i] |= hubpi_imask[IPL_NET].hw[i]; + hubpi_imask[IPL_VM].hw[i] |= hubpi_imask[IPL_TTY].hw[i]; + hubpi_imask[IPL_CLOCK].hw[i] |= hubpi_imask[IPL_VM].hw[i]; - if(CPU_IS_PRIMARY(curcpu())) - hw_setintrmask(0); + /* + * These are pseudo-levels. + */ + hubpi_imask[IPL_NONE].hw[i] = 0; + hubpi_imask[IPL_HIGH].hw[i] = -1; + } } void -ip27_hub_splx(int newcpl) +ip27_hub_splx(int newipl) { struct cpu_info *ci = curcpu(); - /* Update masks to new cpl. Order highly important! */ - __asm__ (" .set noreorder\n"); - ci->ci_cpl = newcpl; - __asm__ (" sync\n .set reorder\n"); + /* Update masks to new ipl. Order highly important! */ + __asm__ (".set noreorder\n"); + ci->ci_ipl = newipl; + __asm__ ("sync\n\t.set reorder\n"); if (CPU_IS_PRIMARY(ci)) - hw_setintrmask(newcpl); + ip27_hub_setintrmask(newipl); /* If we still have softints pending trigger processing. */ - if (ci->ci_softpending & ~newcpl) + if (ci->ci_softpending && newipl < IPL_SOFTINT) setsoftintr0(); } -uint32_t -ip27_hub_intr_handler(uint32_t hwpend, struct trap_frame *frame) -{ - uint64_t imr, isr; - int icpl; - int bit; - uint32_t mask; - struct intrhand *ih; - int rc; - struct cpu_info *ci = curcpu(); - - /* XXX this assumes we run on cpu0 */ - isr = IP27_LHUB_L(HUBPI_IR0); - imr = IP27_LHUB_L(HUBPI_CPU0_IMR0); - - isr &= imr; - if (isr == 0) - return 0; /* not for us */ - - /* - * Mask all pending interrupts. - */ - IP27_LHUB_S(HUBPI_CPU0_IMR0, imr & ~isr); - (void)IP27_LHUB_L(HUBPI_IR0); - - /* - * If interrupts are spl-masked, mark them as pending only. - */ - if ((mask = isr & frame->cpl) != 0) { - isr &= ~mask; - imr &= ~mask; - } - - /* - * Now process unmasked interrupts. - */ - if (isr != 0) { - __asm__ (" .set noreorder\n"); - icpl = ci->ci_cpl; - __asm__ (" sync\n .set reorder\n"); - - /* XXX Rework this to dispatch in decreasing levels */ - for (bit = SPL_CLOCK - 1, mask = 1 << bit; bit >= 7; - bit--, mask >>= 1) { - if ((isr & mask) == 0) - continue; - - rc = 0; - for (ih = intrhand[bit]; ih != NULL; ih = ih->ih_next) { - splraise(imask[ih->ih_level]); - ih->frame = frame; - if ((*ih->ih_fun)(ih->ih_arg) != 0) { - rc = 1; - ih->ih_count.ec_count++; - } - } - if (rc == 0) - printf("spurious interrupt, source %d\n", bit); - - if ((isr ^= mask) == 0) - break; - } - - /* - * Reenable interrupts which have been serviced. - */ - IP27_LHUB_S(HUBPI_CPU0_IMR0, imr); - (void)IP27_LHUB_L(HUBPI_IR0); - - __asm__ (" .set noreorder\n"); - ci->ci_cpl = icpl; - __asm__ (" sync\n .set reorder\n"); - } +/* + * Level 0 and level 1 interrupt dispatchers. + */ - return CR_INT_0; -} +#define INTR_FUNCTIONNAME hubpi_intr0 +#define INTR_LOCAL_DECLS +#define INTR_GETMASKS \ +do { \ + /* XXX this assumes we run on cpu0 */ \ + isr = IP27_LHUB_L(HUBPI_IR0); \ + imr = IP27_LHUB_L(HUBPI_CPU0_IMR0); \ + bit = HUBPI_INTR0_WIDGET_MAX; \ +} while (0) +#define INTR_MASKPENDING \ +do { \ + IP27_LHUB_S(HUBPI_CPU0_IMR0, imr & ~isr); \ + (void)IP27_LHUB_L(HUBPI_IR0); \ +} while (0) +#define INTR_IMASK(ipl) hubpi_imask[ipl].hw[0] +#define INTR_HANDLER(bit) hubpi_intrhand0[bit] +#define INTR_SPURIOUS(bit) \ +do { \ + printf("spurious interrupt, source %d\n", bit); \ +} while (0) +#define INTR_MASKRESTORE \ +do { \ + IP27_LHUB_S(HUBPI_CPU0_IMR0, imr); \ + (void)IP27_LHUB_L(HUBPI_IR0); \ +} while (0) + +#include <sgi/sgi/intr_template.c> + +#define INTR_FUNCTIONNAME hubpi_intr1 +#define INTR_LOCAL_DECLS +#define INTR_GETMASKS \ +do { \ + /* XXX this assumes we run on cpu0 */ \ + isr = IP27_LHUB_L(HUBPI_IR1); \ + imr = IP27_LHUB_L(HUBPI_CPU0_IMR1); \ + bit = HUBPI_INTR1_WIDGET_MAX; \ +} while (0) +#define INTR_MASKPENDING \ +do { \ + IP27_LHUB_S(HUBPI_CPU0_IMR1, imr & ~isr); \ + (void)IP27_LHUB_L(HUBPI_IR1); \ +} while (0) +#define INTR_IMASK(ipl) hubpi_imask[ipl].hw[1] +#define INTR_HANDLER(bit) hubpi_intrhand1[bit] +#define INTR_SPURIOUS(bit) \ +do { \ + printf("spurious interrupt, source %d\n", bit + HUBPI_NINTS); \ +} while (0) +#define INTR_MASKRESTORE \ +do { \ + IP27_LHUB_S(HUBPI_CPU0_IMR1, imr); \ + (void)IP27_LHUB_L(HUBPI_IR1); \ +} while (0) + +#include <sgi/sgi/intr_template.c> void -hw_setintrmask(uint32_t m) +ip27_hub_setintrmask(int level) { - IP27_LHUB_S(HUBPI_CPU0_IMR0, ip27_hub_intrmask & ~((uint64_t)m)); + /* XXX this assumes we run on cpu0 */ + IP27_LHUB_S(HUBPI_CPU0_IMR0, + hubpi_intem.hw[0] & ~hubpi_imask[level].hw[0]); (void)IP27_LHUB_L(HUBPI_IR0); + IP27_LHUB_S(HUBPI_CPU0_IMR1, + hubpi_intem.hw[1] & ~hubpi_imask[level].hw[1]); + (void)IP27_LHUB_L(HUBPI_IR1); } void diff --git a/sys/arch/sgi/sgi/ip30_machdep.c b/sys/arch/sgi/sgi/ip30_machdep.c index e8ad964a40f..ac05828741e 100644 --- a/sys/arch/sgi/sgi/ip30_machdep.c +++ b/sys/arch/sgi/sgi/ip30_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ip30_machdep.c,v 1.12 2009/10/22 20:05:28 miod Exp $ */ +/* $OpenBSD: ip30_machdep.c,v 1.13 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2008, 2009 Miodrag Vallat. @@ -198,14 +198,3 @@ ip30_widget_id(int16_t nasid, u_int widget, uint32_t *wid) return 0; } - -void -hw_setintrmask(uint32_t m) -{ - extern uint64_t heart_intem; - - paddr_t heart; - heart = PHYS_TO_XKPHYS(HEART_PIU_BASE, CCA_NC); - *(volatile uint64_t *)(heart + HEART_IMR(0)) = - heart_intem & ~((uint64_t)m); -} diff --git a/sys/arch/sgi/sgi/mutex.c b/sys/arch/sgi/sgi/mutex.c index 8e9202d3588..9b431010b5b 100644 --- a/sys/arch/sgi/sgi/mutex.c +++ b/sys/arch/sgi/sgi/mutex.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.c,v 1.6 2009/08/13 17:06:05 miod Exp $ */ +/* $OpenBSD: mutex.c,v 1.7 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> @@ -43,16 +43,15 @@ void mtx_init(struct mutex *mtx, int wantipl) { mtx->mtx_lock = 0; - /* We can't access imask[] here, since MUTEX_INITIALIZER can't. */ mtx->mtx_wantipl = wantipl; - mtx->mtx_oldcpl = IPL_NONE; + mtx->mtx_oldipl = IPL_NONE; } void mtx_enter(struct mutex *mtx) { if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldcpl = splraise(imask[mtx->mtx_wantipl]); + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); MUTEX_ASSERT_UNLOCKED(mtx); mtx->mtx_lock = 1; @@ -62,7 +61,7 @@ int mtx_enter_try(struct mutex *mtx) { if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldcpl = splraise(imask[mtx->mtx_wantipl]); + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); MUTEX_ASSERT_UNLOCKED(mtx); mtx->mtx_lock = 1; @@ -75,5 +74,5 @@ mtx_leave(struct mutex *mtx) MUTEX_ASSERT_LOCKED(mtx); mtx->mtx_lock = 0; if (mtx->mtx_wantipl != IPL_NONE) - splx(mtx->mtx_oldcpl); + splx(mtx->mtx_oldipl); } diff --git a/sys/arch/sgi/xbow/hub.h b/sys/arch/sgi/xbow/hub.h index e80ea8dd508..63dfed9fb54 100644 --- a/sys/arch/sgi/xbow/hub.h +++ b/sys/arch/sgi/xbow/hub.h @@ -1,4 +1,4 @@ -/* $OpenBSD: hub.h,v 1.5 2009/10/14 20:21:16 miod Exp $ */ +/* $OpenBSD: hub.h,v 1.6 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2009 Miodrag Vallat. @@ -109,6 +109,60 @@ #define HUBPI_OFFSET 0x00200000 /* + * ISR bit assignments. + */ + +/** Level 1 interrupt */ +/* ?? MSC panic */ +#define HUBPI_ISR1_MSC_ERROR 63 +/* NI interface error */ +#define HUBPI_ISR1_NI_ERROR 62 +/* MD correctable error */ +#define HUBPI_ISR1_MD_COR_ERROR 61 +/* cpu correctable error B */ +#define HUBPI_ISR1_COR_ERROR_B 60 +/* cpu correctable error A */ +#define HUBPI_ISR1_COR_ERROR_A 59 +/* clock error */ +#define HUBPI_ISR1_CLOCK_ERROR 58 +/* IP35 NACK interrupts */ +#define HUBPI_ISR1_NACK_B 57 +#define HUBPI_ISR1_NACK_A 56 +/* IP35 LB error */ +#define HUBPI_ISR1_LB 55 +/* IP35 XB error */ +#define HUBPI_ISR1_XB 54 +/* 53-45 used by PROM */ +/* 44-43 available */ +/* 42-41 LLP errors */ +/* NI broadcast errors */ +#define HUBPI_ISR1_NI_ERROR_B 40 +#define HUBPI_ISR1_NI_ERROR_A 39 +/* 38-36 used by IP35 PROM */ +/* 35-0 available */ + +/** Level 0 interrupt */ +/* 63-7 available */ +/* IPI interrupts */ +#define HUBPI_ISR0_IPI_B 6 +#define HUBPI_ISR0_IPI_A 5 +/* ? */ +#define HUBPI_ISR0_UART 4 +/* page migration interrupt */ +#define HUBPI_ISR0_PAGE_MIGRATION 3 +/* graphics->cpu interrupts */ +#define HUBPI_ISR0_GFX_B 2 +#define HUBPI_ISR0_GFX_A 1 +/* 0 reserved */ + +#define HUBPI_INTR1_WIDGET_MAX 35 +#define HUBPI_INTR1_WIDGET_MIN 0 +#define HUBPI_INTR0_WIDGET_MAX 63 +#define HUBPI_INTR0_WIDGET_MIN 7 + +#define HUBPI_NINTS 64 /* per register */ + +/* * HUB MD - Memory/Directory */ diff --git a/sys/arch/sgi/xbow/xbridge.c b/sys/arch/sgi/xbow/xbridge.c index 1201db1f927..e454d79926b 100644 --- a/sys/arch/sgi/xbow/xbridge.c +++ b/sys/arch/sgi/xbow/xbridge.c @@ -1,4 +1,4 @@ -/* $OpenBSD: xbridge.c,v 1.54 2009/10/22 19:55:45 miod Exp $ */ +/* $OpenBSD: xbridge.c,v 1.55 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2008, 2009 Miodrag Vallat. @@ -1072,11 +1072,18 @@ xbridge_intr_handler(void *v) spurious = 0; LIST_FOREACH(xih, &xi->xi_handlers, xih_nxt) { - splraise(imask[xih->xih_level]); + splraise(xih->xih_level); if ((*xih->xih_func)(xih->xih_arg) != 0) { xih->xih_count.ec_count++; rc = 1; } + /* + * No need to lower spl here, as our caller will lower + * spl upon our return. + * However that splraise() is necessary so that interrupt + * handler code calling splx() will not cause our interrupt + * source to be unmasked. + */ } if (rc == 0 && spurious == 0) printf("%s: spurious irq %d\n", DEVNAME(xb), xi->xi_intrbit); diff --git a/sys/arch/sgi/xbow/xheart.c b/sys/arch/sgi/xbow/xheart.c index 34ba7f2b72d..ed51dfd4b1e 100644 --- a/sys/arch/sgi/xbow/xheart.c +++ b/sys/arch/sgi/xbow/xheart.c @@ -1,4 +1,4 @@ -/* $OpenBSD: xheart.c,v 1.12 2009/10/22 20:59:24 miod Exp $ */ +/* $OpenBSD: xheart.c,v 1.13 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2008 Miodrag Vallat. @@ -43,8 +43,6 @@ struct xheart_softc { struct device sc_dev; struct onewire_bus sc_bus; - - uint64_t sc_intrmask; }; int xheart_match(struct device *, void *, void *); @@ -69,9 +67,30 @@ int xheart_intr_register(int, int, int *); int xheart_intr_establish(int (*)(void *), void *, int, int, const char *); void xheart_intr_disestablish(int); uint32_t xheart_intr_handler(uint32_t, struct trap_frame *); -void xheart_intr_makemasks(struct xheart_softc *); +void xheart_intr_makemasks(void); +void xheart_setintrmask(int); void xheart_splx(int); +/* + * HEART interrupt handling declarations: 64 sources; 5 levels. + */ + +struct intrhand *xheart_intrhand[HEART_NINTS]; + +#ifdef notyet +#define INTPRI_HEART_4 (INTPRI_CLOCK + 1) +#define INTPRI_HEART_3 (INTPRI_HEART_4 + 1) +#define INTPRI_HEART_2 (INTPRI_HEART_3 + 1) +#define INTPRI_HEART_1 (INTPRI_HEART_2 + 1) +#define INTPRI_HEART_0 (INTPRI_HEART_1 + 1) +#else +#define INTPRI_HEART_2 (INTPRI_CLOCK + 1) +#define INTPRI_HEART_0 (INTPRI_HEART_2 + 1) +#endif + +uint64_t xheart_intem; +uint64_t xheart_imask[NIPLS]; + int xheart_match(struct device *parent, void *match, void *aux) { @@ -118,7 +137,7 @@ xheart_attach(struct device *parent, struct device *self, void *aux) xbow_intr_widget_intr_register = xheart_intr_register; xbow_intr_widget_intr_establish = xheart_intr_establish; xbow_intr_widget_intr_disestablish = xheart_intr_disestablish; - sc->sc_intrmask = 0; + xheart_intem = 0; /* * Acknowledge and disable all interrupts. @@ -131,7 +150,15 @@ xheart_attach(struct device *parent, struct device *self, void *aux) *(volatile uint64_t*)(heart + HEART_IMR(2)) = 0UL; *(volatile uint64_t*)(heart + HEART_IMR(3)) = 0UL; - set_intr(INTPRI_XBOWMUX, CR_INT_0, xheart_intr_handler); +#ifdef notyet + set_intr(INTPRI_HEART_4, CR_INT_4, xheart_intr_handler); + set_intr(INTPRI_HEART_3, CR_INT_3, xheart_intr_handler); +#endif + set_intr(INTPRI_HEART_2, CR_INT_2, xheart_intr_handler); +#ifdef notyet + set_intr(INTPRI_HEART_1, CR_INT_1, xheart_intr_handler); +#endif + set_intr(INTPRI_HEART_0, CR_INT_0, xheart_intr_handler); register_splx_handler(xheart_splx); } } @@ -232,7 +259,6 @@ xheart_ow_pulse(struct xheart_softc *sc, int pulse, int data) int xheart_intr_register(int widget, int level, int *intrbit) { - struct xheart_softc *sc = (void *)xheart_cd.cd_devs[0]; int bit; /* @@ -240,12 +266,12 @@ xheart_intr_register(int widget, int level, int *intrbit) * so the `level' argument can be ignored. */ for (bit = HEART_INTR_WIDGET_MAX; bit >= HEART_INTR_WIDGET_MIN; bit--) - if ((sc->sc_intrmask & (1 << bit)) == 0) - break; + if ((xheart_intem & (1UL << bit)) == 0) + goto found; - if (bit < HEART_INTR_WIDGET_MIN) - return EINVAL; + return EINVAL; +found: *intrbit = bit; return 0; } @@ -257,12 +283,11 @@ int xheart_intr_establish(int (*func)(void *), void *arg, int intrbit, int level, const char *name) { - struct xheart_softc *sc = (void *)xheart_cd.cd_devs[0]; struct intrhand *ih; - paddr_t heart; + int s; #ifdef DIAGNOSTIC - if (intrbit < HEART_INTR_MIN || intrbit > HEART_INTR_MAX) + if (intrbit < 0 || intrbit >= HEART_NINTS) return EINVAL; #endif @@ -270,7 +295,7 @@ xheart_intr_establish(int (*func)(void *), void *arg, int intrbit, * HEART interrupts are not supposed to be shared - the interrupt * mask is large enough for all widgets. */ - if (intrhand[intrbit] != NULL) + if (xheart_intrhand[intrbit] != NULL) return EEXIST; ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT); @@ -285,14 +310,15 @@ xheart_intr_establish(int (*func)(void *), void *arg, int intrbit, if (name != NULL) evcount_attach(&ih->ih_count, name, &ih->ih_level, &evcount_intr); - intrhand[intrbit] = ih; - sc->sc_intrmask |= 1UL << intrbit; - xheart_intr_makemasks(sc); + s = splhigh(); + + xheart_intrhand[intrbit] = ih; + + xheart_intem |= 1UL << intrbit; + xheart_intr_makemasks(); - /* XXX this assumes we run on cpu0 */ - heart = PHYS_TO_XKPHYS(HEART_PIU_BASE, CCA_NC); - *(volatile uint64_t *)(heart + HEART_IMR(0)) |= 1UL << intrbit; + splx(s); /* causes hw mask update */ return 0; } @@ -300,70 +326,61 @@ xheart_intr_establish(int (*func)(void *), void *arg, int intrbit, void xheart_intr_disestablish(int intrbit) { - struct xheart_softc *sc = (void *)xheart_cd.cd_devs[0]; struct intrhand *ih; - paddr_t heart; int s; #ifdef DIAGNOSTIC - if (intrbit < HEART_INTR_MIN || intrbit > HEART_INTR_MAX) + if (intrbit < 0 || intrbit >= HEART_NINTS) return; #endif s = splhigh(); - if ((ih = intrhand[intrbit]) == NULL) { + if ((ih = xheart_intrhand[intrbit]) == NULL) { splx(s); return; } - /* XXX this assumes we run on cpu0 */ - heart = PHYS_TO_XKPHYS(HEART_PIU_BASE, CCA_NC); - *(volatile uint64_t *)(heart + HEART_IMR(0)) &= ~(1UL << intrbit); + xheart_intrhand[intrbit] = NULL; - intrhand[intrbit] = NULL; + xheart_intem &= ~(1UL << intrbit); + xheart_intr_makemasks(); - sc->sc_intrmask &= ~(1UL << intrbit); - xheart_intr_makemasks(sc); + splx(s); free(ih, M_DEVBUF); - - splx(s); } /* - * Xheart interrupt handler driver. - */ - -uint64_t heart_intem = 0; - -/* * Recompute interrupt masks. */ void -xheart_intr_makemasks(struct xheart_softc *sc) +xheart_intr_makemasks() { int irq, level; struct intrhand *q; - uint32_t intrlevel[INTMASKSIZE]; + uint intrlevel[HEART_NINTS]; /* First, figure out which levels each IRQ uses. */ - for (irq = 0; irq < INTMASKSIZE; irq++) { - int levels = 0; - for (q = intrhand[irq]; q; q = q->ih_next) + for (irq = 0; irq < HEART_NINTS; irq++) { + uint levels = 0; + for (q = xheart_intrhand[irq]; q; q = q->ih_next) levels |= 1 << q->ih_level; intrlevel[irq] = levels; } - /* Then figure out which IRQs use each level. */ - for (level = IPL_NONE; level < NIPLS; level++) { - int irqs = 0; - for (irq = 0; irq < INTMASKSIZE; irq++) + /* + * Then figure out which IRQs use each level. + * Note that we make sure never to overwrite imask[IPL_HIGH], in + * case an interrupt occurs during intr_disestablish() and causes + * an unfortunate splx() while we are here recomputing the masks. + */ + for (level = IPL_NONE; level < IPL_HIGH; level++) { + uint64_t irqs = 0; + for (irq = 0; irq < HEART_NINTS; irq++) if (intrlevel[irq] & (1 << level)) - irqs |= 1 << irq; - if (level != IPL_NONE) - irqs |= SINT_ALLMASK; - imask[level] = irqs; + irqs |= 1UL << irq; + xheart_imask[level] = irqs; } /* @@ -373,106 +390,87 @@ xheart_intr_makemasks(struct xheart_softc *sc) * Enforce a hierarchy that gives slow devices a better chance at not * dropping data. */ - imask[IPL_NET] |= imask[IPL_BIO]; - imask[IPL_TTY] |= imask[IPL_NET]; - imask[IPL_VM] |= imask[IPL_TTY]; - imask[IPL_CLOCK] |= imask[IPL_VM] | SPL_CLOCKMASK; + xheart_imask[IPL_NET] |= xheart_imask[IPL_BIO]; + xheart_imask[IPL_TTY] |= xheart_imask[IPL_NET]; + xheart_imask[IPL_VM] |= xheart_imask[IPL_TTY]; + xheart_imask[IPL_CLOCK] |= xheart_imask[IPL_VM]; /* * These are pseudo-levels. */ - imask[IPL_NONE] = 0; - imask[IPL_HIGH] = -1; - - heart_intem = sc->sc_intrmask; - if(CPU_IS_PRIMARY(curcpu())) - hw_setintrmask(0); + xheart_imask[IPL_NONE] = 0; + xheart_imask[IPL_HIGH] = -1UL; } void -xheart_splx(int newcpl) +xheart_splx(int newipl) { struct cpu_info *ci = curcpu(); - /* Update masks to new cpl. Order highly important! */ - __asm__ (" .set noreorder\n"); - ci->ci_cpl = newcpl; - __asm__ (" sync\n .set reorder\n"); + /* Update masks to new ipl. Order highly important! */ + __asm__ (".set noreorder\n"); + ci->ci_ipl = newipl; + __asm__ ("sync\n\t.set reorder\n"); if (CPU_IS_PRIMARY(ci)) - hw_setintrmask(newcpl); + xheart_setintrmask(newipl); /* If we still have softints pending trigger processing. */ - if (ci->ci_softpending & ~newcpl) + if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) setsoftintr0(); } -uint32_t -xheart_intr_handler(uint32_t hwpend, struct trap_frame *frame) -{ - struct cpu_info *ci = curcpu(); - paddr_t heart; - uint64_t imr, isr; - int icpl; - int bit; - uint32_t mask; - struct intrhand *ih; - int rc; - - heart = PHYS_TO_XKPHYS(HEART_PIU_BASE, CCA_NC); - isr = *(volatile uint64_t *)(heart + HEART_ISR); - imr = *(volatile uint64_t *)(heart + HEART_IMR(0)); - - isr &= imr; - if (isr == 0) - return 0; /* not for us */ - - /* - * Mask all pending interrupts. - */ - *(volatile uint64_t *)(heart + HEART_IMR(0)) &= ~isr; - - /* - * If interrupts are spl-masked, mark them as pending only. - */ - if ((mask = isr & frame->cpl) != 0) { - isr &= ~mask; - } - - /* - * Now process unmasked interrupts. - */ - if (isr != 0) { - __asm__ (" .set noreorder\n"); - icpl = ci->ci_cpl; - __asm__ (" sync\n .set reorder\n"); - - /* XXX Rework this to dispatch in decreasing levels */ - for (bit = HEART_INTR_MAX, mask = 1 << bit; - bit >= HEART_INTR_MIN; bit--, mask >>= 1) { - if ((isr & mask) == 0) - continue; - - rc = 0; - for (ih = intrhand[bit]; ih != NULL; ih = ih->ih_next) { - splraise(imask[ih->ih_level]); - ih->frame = frame; - if ((*ih->ih_fun)(ih->ih_arg) != 0) { - rc = 1; - ih->ih_count.ec_count++; - } - } - if (rc == 0) - printf("spurious interrupt, source %d\n", bit); - } - - /* - * Reenable interrupts which have been serviced. - */ - *(volatile uint64_t *)(heart + HEART_IMR(0)) |= isr; +/* + * Heart interrupt handler. Can be registered at any hardware interrupt level. + */ - __asm__ (" .set noreorder\n"); - ci->ci_cpl = icpl; - __asm__ (" sync\n .set reorder\n"); - } +#define INTR_FUNCTIONNAME xheart_intr_handler +#define INTR_LOCAL_DECLS \ + paddr_t heart = PHYS_TO_XKPHYS(HEART_PIU_BASE, CCA_NC); +#define INTR_GETMASKS \ +do { \ + isr = *(volatile uint64_t *)(heart + HEART_ISR); \ + imr = *(volatile uint64_t *)(heart + HEART_IMR(0)); \ + switch (hwpend) { \ + case CR_INT_0: \ + isr &= HEART_ISR_LVL0_MASK; \ + bit = HEART_ISR_LVL0_MAX; \ + break; \ + case CR_INT_1: \ + isr &= HEART_ISR_LVL1_MASK; \ + bit = HEART_ISR_LVL1_MAX; \ + break; \ + case CR_INT_2: \ + isr &= HEART_ISR_LVL2_MASK; \ + bit = HEART_ISR_LVL2_MAX; \ + break; \ + case CR_INT_3: \ + isr &= HEART_ISR_LVL3_MASK; \ + bit = HEART_ISR_LVL3_MAX; \ + break; \ + case CR_INT_4: \ + isr &= HEART_ISR_LVL4_MASK; \ + bit = HEART_ISR_LVL4_MAX; \ + break; \ + default: \ + return 0; /* can't happen */ \ + } \ +} while (0) +#define INTR_MASKPENDING \ + *(volatile uint64_t *)(heart + HEART_IMR(0)) &= ~isr +#define INTR_IMASK(ipl) xheart_imask[ipl] +#define INTR_HANDLER(bit) xheart_intrhand[bit] +#define INTR_SPURIOUS(bit) \ +do { \ + printf("spurious xheart interrupt %d\n", bit); \ +} while (0) +#define INTR_MASKRESTORE \ + *(volatile uint64_t *)(heart + HEART_IMR(0)) = imr + +#include <sgi/sgi/intr_template.c> - return CR_INT_0; +void +xheart_setintrmask(int level) +{ + paddr_t heart = PHYS_TO_XKPHYS(HEART_PIU_BASE, CCA_NC); + *(volatile uint64_t *)(heart + HEART_IMR(0)) = + xheart_intem & ~xheart_imask[level]; } diff --git a/sys/arch/sgi/xbow/xheartreg.h b/sys/arch/sgi/xbow/xheartreg.h index 51110690bb5..1b3ba2968c7 100644 --- a/sys/arch/sgi/xbow/xheartreg.h +++ b/sys/arch/sgi/xbow/xheartreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: xheartreg.h,v 1.2 2009/04/18 14:48:09 miod Exp $ */ +/* $OpenBSD: xheartreg.h,v 1.3 2009/10/22 22:08:54 miod Exp $ */ /* * Copyright (c) 2008 Miodrag Vallat. @@ -46,13 +46,59 @@ #define HEART_ISR 0x00010030 /* - * ISR bit assignments (partial). + * ISR bit assignments. */ -#define HEART_INTR_ACFAIL 15 -#define HEART_INTR_POWER 14 -#define HEART_INTR_WIDGET_MAX 13 -#define HEART_INTR_WIDGET_MIN 0 +/** Level 4 interrupt: hardware error */ +#define HEART_ISR_LVL4_MASK 0xfff8000000000000UL +#define HEART_ISR_LVL4_MAX 63 +/* Heart (widget 8) error */ +#define HEART_ISR_WID08_ERROR 63 +/* CPU bus error */ +#define HEART_ISR_CPU_BUSERR(c) (59 + (c)) +/* Crossbow (widget 0) error */ +#define HEART_ISR_WID00_ERROR 58 +/* Widget error */ +#define HEART_ISR_WID0F_ERROR 57 +#define HEART_ISR_WID0E_ERROR 56 +#define HEART_ISR_WID0D_ERROR 55 +#define HEART_ISR_WID0C_ERROR 54 +#define HEART_ISR_WID0B_ERROR 53 +#define HEART_ISR_WID0A_ERROR 52 +#define HEART_ISR_WID09_ERROR 51 -#define HEART_INTR_MAX 15 -#define HEART_INTR_MIN 0 +#define HEART_ISR_WID_ERROR(w) \ + ((w) == 0 ? HEART_ISR_WID00_ERROR : \ + (w) == 8 ? HEART_ISR_WID08_ERROR : HEART_ISR_WID09_ERROR + (w) - 9) + +/** Level 3 interrupt: heart counter/timer */ +#define HEART_ISR_LVL3_MASK 0x0004000000000000UL +#define HEART_ISR_LVL3_MAX 50 +/* Crossbow clock */ +#define HEART_ISR_HEARTCLOCK 50 + +/** Level 2 interrupt */ +#define HEART_ISR_LVL2_MASK 0x0003ffff00000000UL +#define HEART_ISR_LVL2_MAX 49 +/* IPI */ +#define HEART_ISR_IPI(c) (46 + (c)) +/* Debugger interrupts */ +#define HEART_ISR_DBG(c) (42 + (c)) +/* Power switch */ +#define HEART_ISR_POWER 41 +/* 40-32 freely available */ + +/** Level 1 interrupt */ +#define HEART_ISR_LVL1_MASK 0x00000000ffff0000UL +#define HEART_ISR_LVL1_MAX 31 +/* 31-16 freely available */ + +/** Level 0 interrupt */ +#define HEART_ISR_LVL0_MASK 0x000000000000ffffUL +#define HEART_ISR_LVL0_MAX 15 +/* 15-3 freely available */ + +#define HEART_INTR_WIDGET_MAX 15 +#define HEART_INTR_WIDGET_MIN 3 + +#define HEART_NINTS 64 |