diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2009-03-20 18:41:08 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2009-03-20 18:41:08 +0000 |
commit | 98bde83089acb78a73d619de34da67c678906f5c (patch) | |
tree | 731ff679e20fe149baacabafff4f0979aeb9619a /sys/arch/mips64 | |
parent | de205eb784cd13060a4e090ea170659a2d3fcb40 (diff) |
Switch sgi to __HAVE_GENERIC_SOFT_INTERRUPTS.
Diffstat (limited to 'sys/arch/mips64')
-rw-r--r-- | sys/arch/mips64/conf/files.mips64 | 12 | ||||
-rw-r--r-- | sys/arch/mips64/include/_types.h | 3 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/clock.c | 4 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/interrupt.c | 312 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/softintr.c | 215 |
5 files changed, 229 insertions, 317 deletions
diff --git a/sys/arch/mips64/conf/files.mips64 b/sys/arch/mips64/conf/files.mips64 index 3c3ed75da03..9d33ef565ae 100644 --- a/sys/arch/mips64/conf/files.mips64 +++ b/sys/arch/mips64/conf/files.mips64 @@ -1,18 +1,18 @@ -# $OpenBSD: files.mips64,v 1.9 2007/06/21 20:17:12 miod Exp $ - -file arch/mips64/mips64/mem.c -file arch/mips64/mips64/process_machdep.c -file arch/mips64/mips64/sys_machdep.c -file arch/mips64/mips64/vm_machdep.c +# $OpenBSD: files.mips64,v 1.10 2009/03/20 18:41:05 miod Exp $ file arch/mips64/mips64/arcbios.c arcbios file arch/mips64/mips64/clock.c file arch/mips64/mips64/cpu.c file arch/mips64/mips64/interrupt.c file arch/mips64/mips64/mainbus.c +file arch/mips64/mips64/mem.c file arch/mips64/mips64/pmap.c +file arch/mips64/mips64/process_machdep.c file arch/mips64/mips64/sendsig.c +file arch/mips64/mips64/softintr.c +file arch/mips64/mips64/sys_machdep.c file arch/mips64/mips64/trap.c +file arch/mips64/mips64/vm_machdep.c file arch/mips64/mips64/cache_r5k.S file arch/mips64/mips64/cache_r10k.S diff --git a/sys/arch/mips64/include/_types.h b/sys/arch/mips64/include/_types.h index 4531dfd5efb..e0a6e038263 100644 --- a/sys/arch/mips64/include/_types.h +++ b/sys/arch/mips64/include/_types.h @@ -1,4 +1,4 @@ -/* $OpenBSD: _types.h,v 1.5 2008/07/21 20:50:54 martynas Exp $ */ +/* $OpenBSD: _types.h,v 1.6 2009/03/20 18:41:06 miod Exp $ */ /*- * Copyright (c) 1990, 1993 @@ -134,6 +134,7 @@ typedef struct label_t { #define __SWAP_BROKEN /* Feature test macros */ +#define __HAVE_GENERIC_SOFT_INTERRUPTS #define __HAVE_TIMECOUNTER #endif /* _MIPS64__TYPES_H_ */ diff --git a/sys/arch/mips64/mips64/clock.c b/sys/arch/mips64/mips64/clock.c index 80ffa6166f3..86a4d1fad15 100644 --- a/sys/arch/mips64/mips64/clock.c +++ b/sys/arch/mips64/mips64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.21 2008/09/23 04:33:07 miod Exp $ */ +/* $OpenBSD: clock.c,v 1.22 2009/03/20 18:41:06 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -159,7 +159,7 @@ clock_int5(intrmask_t mask, struct trap_frame *tf) cp0_set_compare(cpu_counter_last); } - if ((tf->cpl & SPL_CLOCKMASK) == 0) { + if (clock_started && (tf->cpl & SPL_CLOCKMASK) == 0) { while (pendingticks) { clk_count.ec_count++; hardclock(tf); diff --git a/sys/arch/mips64/mips64/interrupt.c b/sys/arch/mips64/mips64/interrupt.c index 336901af8d0..80897b852ac 100644 --- a/sys/arch/mips64/mips64/interrupt.c +++ b/sys/arch/mips64/mips64/interrupt.c @@ -1,4 +1,4 @@ -/* $OpenBSD: interrupt.c,v 1.33 2008/02/20 19:13:38 miod Exp $ */ +/* $OpenBSD: interrupt.c,v 1.34 2009/03/20 18:41:06 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -36,7 +36,6 @@ #ifdef KTRACE #include <sys/ktrace.h> #endif -#include <net/netisr.h> #include <machine/trap.h> #include <machine/psl.h> @@ -77,8 +76,6 @@ void dummy_do_pending_int(int); int_f *pending_hand = &dummy_do_pending_int; -int netisr; - /* * Modern versions of MIPS processors have extended interrupt * capabilities. How these are handled differs from implementation @@ -191,30 +188,11 @@ interrupt(struct trap_frame *trapframe) setsr((trapframe->sr & ~pending) | SR_INT_ENAB); #endif - xcpl = splsoftnet(); - if ((ipending & SINT_CLOCKMASK) & ~xcpl) { - atomic_clearbits_int(&ipending, SINT_CLOCKMASK); - softclock(); - } - if ((ipending & SINT_NETMASK) & ~xcpl) { - extern int netisr; - int isr; - - atomic_clearbits_int(&ipending, SINT_NETMASK); - while ((isr = netisr) != 0) { - atomic_clearbits_int(&netisr, isr); - -#define DONETISR(b,f) if (isr & (1 << (b))) f(); -#include <net/netisr_dispatch.h> - } + xcpl = splsoft(); + if ((ipending & SINT_ALLMASK) & ~xcpl) { + dosoftint(xcpl); } -#ifdef notyet - if ((ipending & SINT_TTYMASK) & ~xcpl) { - atomic_clearbits_int(&ipending, SINT_TTYMASK); - compoll(NULL); - } -#endif __asm__ (" .set noreorder\n"); cpl = xcpl; __asm__ (" sync\n .set reorder\n"); @@ -280,252 +258,8 @@ softintr() p->p_cpu->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; } - -intrmask_t intem = 0x0; -intrmask_t intrtype[INTMASKSIZE], intrmask[INTMASKSIZE], intrlevel[INTMASKSIZE]; struct intrhand *intrhand[INTMASKSIZE]; -/*======================================================================*/ - -#if 0 - -/* - * Generic interrupt handling code. - * ================================ - * - * This code can be used for interrupt models where only the - * processor status register has to be changed to mask/unmask. - * HW specific setup can be done in a MD function that can then - * call this function to use the generic interrupt code. - */ -static int fakeintr(void *); -static int fakeintr(void *a) {return 0;} - -/* - * Establish an interrupt handler called from the dispatcher. - * The interrupt function established should return zero if - * there was nothing to serve (no int) and non zero when an - * interrupt was serviced. - * Interrupts are numbered from 1 and up where 1 maps to HW int 0. - */ -void * -generic_intr_establish(icp, irq, type, level, ih_fun, ih_arg, ih_what) - void *icp; - u_long irq; /* XXX pci_intr_handle_t compatible XXX */ - int type; - int level; - int (*ih_fun)(void *); - void *ih_arg; - char *ih_what; -{ - struct intrhand **p, *q, *ih; - static struct intrhand fakehand = {NULL, fakeintr}; - int edge; - -static int initialized = 0; - - if (!initialized) { -/*INIT CODE HERE*/ - initialized = 1; - } - - if (irq > 62 || irq < 1) { - panic("intr_establish: illegal irq %d", irq); - } - irq += 1; /* Adjust for softint 1 and 0 */ - - /* no point in sleeping unless someone can free memory. */ - ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); - if (ih == NULL) - panic("intr_establish: can't malloc handler info"); - - if (type == IST_NONE || type == IST_PULSE) - panic("intr_establish: bogus type"); - - switch (intrtype[irq]) { - case IST_EDGE: - case IST_LEVEL: - if (type == intrtype[irq]) - break; - } - - switch (type) { - case IST_EDGE: - edge |= 1 << irq; - break; - case IST_LEVEL: - edge &= ~(1 << irq); - break; - } - - /* - * Figure out where to put the handler. - * This is O(N^2), but we want to preserve the order, and N is - * generally small. - */ - for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next) - ; - - /* - * Actually install a fake handler momentarily, since we might be doing - * this with interrupts enabled and don't want the real routine called - * until masking is set up. - */ - fakehand.ih_level = level; - *p = &fakehand; - - generic_intr_makemasks(); - - /* - * Poke the real handler in now. - */ - ih->ih_fun = ih_fun; - ih->ih_arg = ih_arg; - ih->ih_next = NULL; - ih->ih_level = level; - ih->ih_irq = irq; - ih->ih_what = ih_what; - evcount_attach(&ih->ih_count, ih_what, (void *)&ih->ih_irq, - &evcount_intr); - *p = ih; - - return (ih); -} - -void -generic_intr_disestablish(void *p1, void *p2) -{ -} - -/* - * Regenerate interrupt masks to reflect reality. - */ -void -generic_intr_makemasks() -{ - int irq, level; - struct intrhand *q; - - /* First, figure out which levels each IRQ uses. */ - for (irq = 0; irq < INTMASKSIZE; irq++) { - int levels = 0; - for (q = intrhand[irq]; q; q = q->ih_next) - levels |= 1 << q->ih_level; - intrlevel[irq] = levels; - } - - /* Then figure out which IRQs use each level. */ - for (level = IPL_NONE; level < NIPLS; level++) { - register int irqs = 0; - for (irq = 0; irq < INTMASKSIZE; irq++) - if (intrlevel[irq] & (1 << level)) - irqs |= 1 << irq; - imask[level] = irqs | SINT_ALLMASK; - } - - /* - * There are tty, network and disk drivers that use free() at interrupt - * time, so imp > (tty | net | bio). - * - * Enforce a hierarchy that gives slow devices a better chance at not - * dropping data. - */ - imask[IPL_NET] |= imask[IPL_BIO]; - imask[IPL_TTY] |= imask[IPL_NET]; - imask[IPL_VM] |= imask[IPL_TTY]; - imask[IPL_CLOCK] |= imask[IPL_VM] | SPL_CLOCKMASK; - - /* - * These are pseudo-levels. - */ - imask[IPL_NONE] = 0; - imask[IPL_HIGH] = -1; - - /* And eventually calculate the complete masks. */ - for (irq = 0; irq < INTMASKSIZE; irq++) { - register int irqs = 1 << irq; - for (q = intrhand[irq]; q; q = q->ih_next) - irqs |= imask[q->ih_level]; - intrmask[irq] = irqs | SINT_ALLMASK; - } - - /* Lastly, determine which IRQs are actually in use. */ - irq = 0; - for (level = 0; level < INTMASKSIZE; level++) { - if (intrhand[level]) { - irq |= 1 << level; - } - } - intem = irq; -} - -void -generic_do_pending_int(int newcpl) -{ - struct intrhand *ih; - int vector; - intrmask_t hwpend; - struct trap_frame cf; - static volatile int processing; - - /* Don't recurse... but change the mask. */ - if (processing) { - __asm__ (" .set noreorder\n"); - cpl = newcpl; - __asm__ (" sync\n .set reorder\n"); - return; - } - processing = 1; - - /* XXX Fake a trapframe for clock pendings... */ - cf.pc = (int)&generic_do_pending_int; - cf.sr = 0; - cf.cpl = cpl; - - hwpend = ipending & ~newcpl; /* Do pendings being unmasked */ - hwpend &= ~(SINT_ALLMASK); - atomic_clearbits_int(&ipending, hwpend); - intem |= hwpend; - while (hwpend) { - vector = ffs(hwpend) - 1; - hwpend &= ~(1L << vector); - ih = intrhand[vector]; - while (ih) { - ih->frame = &cf; - if ((*ih->ih_fun)(ih->ih_arg)) { - ih->ih_count.ec_count++; - } - ih = ih->ih_next; - } - } - if ((ipending & SINT_CLOCKMASK) & ~newcpl) { - atomic_clearbits_int(&ipending, SINT_CLOCKMASK); - softclock(); - } - if ((ipending & SINT_NETMASK) & ~newcpl) { - int isr = netisr; - netisr = 0; - atomic_clearbits_int(&ipending, SINT_NETMASK); -#define DONETISR(b,f) if (isr & (1 << (b))) f(); -#include <net/netisr_dispatch.h> - } - -#ifdef NOTYET - if ((ipending & SINT_TTYMASK) & ~newcpl) { - atomic_clearbits_int(&ipending, SINT_TTYMASK); - compoll(NULL); - } -#endif - - __asm__ (" .set noreorder\n"); - cpl = newcpl; - __asm__ (" sync\n .set reorder\n"); - updateimask(newcpl); /* Update CPU mask ins SR register */ - processing = 0; -} - -#endif - void dummy_do_pending_int(int newcpl) { @@ -553,44 +287,6 @@ splinit() #endif } -#if 0 - -/* - * Process interrupts. The parameter pending has non-masked interrupts. - */ -intrmask_t -generic_iointr(intrmask_t pending, struct trap_frame *cf) -{ - struct intrhand *ih; - intrmask_t caught, vm; - int v; - - caught = 0; - - atomic_setbits_int(&ipending, (pending >> 8) & cpl); - pending &= ~(cpl << 8); - cf->sr &= ~((ipending << 8) & SR_INT_MASK); - cf->ic &= ~(ipending & IC_INT_MASK); - - for (v = 2, vm = 0x400; pending != 0 && v < 16 ; v++, vm <<= 1) { - if (pending & vm) { - ih = intrhand[v]; - - while (ih) { - ih->frame = cf; - if ((*ih->ih_fun)(ih->ih_arg)) { - caught |= vm; - ih->ih_count.ec_count++; - } - ih = ih->ih_next; - } - } - } - return caught; -} - -#endif - #ifndef INLINE_SPLRAISE int splraise(int newcpl) diff --git a/sys/arch/mips64/mips64/softintr.c b/sys/arch/mips64/mips64/softintr.c new file mode 100644 index 00000000000..182d979466d --- /dev/null +++ b/sys/arch/mips64/mips64/softintr.c @@ -0,0 +1,215 @@ +/* $OpenBSD: softintr.c,v 1.1 2009/03/20 18:41:06 miod Exp $ */ +/* $NetBSD: softintr.c,v 1.2 2003/07/15 00:24:39 lukem Exp $ */ + +/* + * Copyright (c) 2001 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Jason R. Thorpe for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/malloc.h> + +/* XXX Network interrupts should be converted to new softintrs. */ +#include <net/netisr.h> + +#include <uvm/uvm_extern.h> + +#include <machine/atomic.h> +#include <machine/intr.h> + +struct soft_intrq soft_intrq[SI_NQUEUES]; + +struct soft_intrhand *softnet_intrhand; + +void netintr(void); + +/* + * Initialize the software interrupt system. + */ +void +softintr_init(void) +{ + struct soft_intrq *siq; + int i; + + for (i = 0; i < SI_NQUEUES; i++) { + siq = &soft_intrq[i]; + TAILQ_INIT(&siq->siq_list); + siq->siq_si = i; + mtx_init(&siq->siq_mtx, IPL_HIGH); + } + + /* XXX Establish legacy software interrupt handlers. */ + softnet_intrhand = softintr_establish(IPL_SOFTNET, + (void (*)(void *))netintr, NULL); +} + +/* + * Process pending software interrupts on the specified queue. + * + * NOTE: We must already be at the correct interrupt priority level. + */ +void +softintr_dispatch(int si) +{ + struct soft_intrq *siq = &soft_intrq[si]; + struct soft_intrhand *sih; + + for (;;) { + mtx_enter(&siq->siq_mtx); + sih = TAILQ_FIRST(&siq->siq_list); + if (sih == NULL) { + mtx_leave(&siq->siq_mtx); + break; + } + + TAILQ_REMOVE(&siq->siq_list, sih, sih_list); + sih->sih_pending = 0; + + uvmexp.softs++; + + mtx_leave(&siq->siq_mtx); + + (*sih->sih_func)(sih->sih_arg); + } +} + +/* + * Register a software interrupt handler. + */ +void * +softintr_establish(int ipl, void (*func)(void *), void *arg) +{ + struct soft_intrhand *sih; + int si; + + switch (ipl) { + case IPL_SOFT: + si = SI_SOFT; + break; + case IPL_SOFTCLOCK: + si = SI_SOFTCLOCK; + break; + case IPL_SOFTNET: + si = SI_SOFTNET; + break; + case IPL_TTY: /* XXX until MI code is fixed */ + case IPL_SOFTTTY: + si = SI_SOFTTTY; + break; + default: + printf("softintr_establish: unknown soft IPL %d\n", ipl); + return NULL; + } + + sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT); + if (__predict_true(sih != NULL)) { + sih->sih_func = func; + sih->sih_arg = arg; + sih->sih_siq = &soft_intrq[si]; + sih->sih_pending = 0; + } + return (sih); +} + +/* + * Unregister a software interrupt handler. + */ +void +softintr_disestablish(void *arg) +{ + struct soft_intrhand *sih = arg; + struct soft_intrq *siq = sih->sih_siq; + + mtx_enter(&siq->siq_mtx); + if (sih->sih_pending) { + TAILQ_REMOVE(&siq->siq_list, sih, sih_list); + sih->sih_pending = 0; + } + mtx_leave(&siq->siq_mtx); + + free(sih, M_DEVBUF); +} + +/* + * Schedule a software interrupt. + */ +void +softintr_schedule(void *arg) +{ + struct soft_intrhand *sih = (struct soft_intrhand *)arg; + struct soft_intrq *siq = sih->sih_siq; + + mtx_enter(&siq->siq_mtx); + if (sih->sih_pending == 0) { + TAILQ_INSERT_TAIL(&siq->siq_list, sih, sih_list); + sih->sih_pending = 1; + atomic_setbits_int(&ipending, SINTMASK(siq->siq_si)); + } + mtx_leave(&siq->siq_mtx); +} + +int netisr; + +void +netintr(void) +{ + int n; + + while ((n = netisr) != 0) { + atomic_clearbits_int(&netisr, n); +#define DONETISR(bit, fn) \ + do { \ + if (n & (1 << (bit))) \ + fn(); \ + } while (0) +#include <net/netisr_dispatch.h> +#undef DONETISR + } +} + +void +dosoftint(intrmask_t xcpl) +{ + int sir, q, mask; + + while ((sir = (ipending & SINT_ALLMASK & ~xcpl)) != 0) { + atomic_clearbits_int(&ipending, sir); + + for (q = SI_NQUEUES - 1; q >= 0; q--) { + mask = SINTMASK(q); + if (sir & mask) + softintr_dispatch(q); + } + } +} |