summaryrefslogtreecommitdiff
path: root/sys/arch/hppa
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2009-02-08 18:33:30 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2009-02-08 18:33:30 +0000
commit80c131c81f81221c157d47db229a0c86ed7e5ad1 (patch)
tree2554f6fa6161488ca32491b3b3cc6e53ba04dd19 /sys/arch/hppa
parentd61fc84d53fe7acdd5cbd7ff2490d07fb3a6e2b9 (diff)
Make sure than cpu_hardclock() never sets a ``next interrupt value'' which has
already been hit by the running timer; this happens very often on oosiop-based machines, due to these machines being among the slowest hppa, and oosiop being interrupt greedy. Unfortunately, when this happened, one had to wait for the timer to wrap, which would take up to 128 seconds on the 33MHz machines. Also, invoke hardclock() as many times as necessary if it turns out that we had to delay the interrupt 1/hz seconds to avoid the aforementioned wrap problem. With help from kettenis@; ok kettenis@
Diffstat (limited to 'sys/arch/hppa')
-rw-r--r--sys/arch/hppa/dev/clock.c68
-rw-r--r--sys/arch/hppa/dev/cpu.c11
-rw-r--r--sys/arch/hppa/hppa/locore.S23
3 files changed, 68 insertions, 34 deletions
diff --git a/sys/arch/hppa/dev/clock.c b/sys/arch/hppa/dev/clock.c
index 1d3aa2a3bd9..b9faf6d0d6b 100644
--- a/sys/arch/hppa/dev/clock.c
+++ b/sys/arch/hppa/dev/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.22 2007/07/22 19:24:45 kettenis Exp $ */
+/* $OpenBSD: clock.c,v 1.23 2009/02/08 18:33:28 miod Exp $ */
/*
* Copyright (c) 1998-2003 Michael Shalayeff
@@ -48,7 +48,10 @@
#include <ddb/db_extern.h>
#endif
-u_int itmr_get_timecount(struct timecounter *);
+u_long cpu_itmr, cpu_hzticks;
+
+int cpu_hardclock(void *);
+u_int itmr_get_timecount(struct timecounter *);
struct timecounter itmr_timecounter = {
itmr_get_timecount, NULL, 0xffffffff, 0, "itmr", 0, NULL
@@ -57,8 +60,6 @@ struct timecounter itmr_timecounter = {
void
cpu_initclocks()
{
- extern volatile u_long cpu_itmr;
- extern u_long cpu_hzticks;
u_long __itmr;
itmr_timecounter.tc_frequency = PAGE0->mem_10msec * 100;
@@ -70,6 +71,65 @@ cpu_initclocks()
mtctl(__itmr, CR_ITMR);
}
+int
+cpu_hardclock(void *v)
+{
+ u_long __itmr, delta, eta;
+ int wrap;
+ register_t eiem;
+
+ /*
+ * Invoke hardclock as many times as there has been cpu_hzticks
+ * ticks since the last interrupt.
+ */
+ for (;;) {
+ mfctl(CR_ITMR, __itmr);
+ delta = __itmr - cpu_itmr;
+ if (delta >= cpu_hzticks) {
+ hardclock(v);
+ cpu_itmr += cpu_hzticks;
+ } else
+ break;
+ }
+
+ /*
+ * Program the next clock interrupt, making sure it will
+ * indeed happen in the future. This is done with interrupts
+ * disabled to avoid a possible race.
+ */
+ eta = cpu_itmr + cpu_hzticks;
+ wrap = eta < cpu_itmr; /* watch out for a wraparound */
+ __asm __volatile("mfctl %%cr15, %0": "=r" (eiem));
+ __asm __volatile("mtctl %r0, %cr15");
+ mtctl(eta, CR_ITMR);
+ mfctl(CR_ITMR, __itmr);
+ /*
+ * If we were close enough to the next tick interrupt
+ * value, by the time we have programmed itmr, it might
+ * have passed the value, which would cause a complete
+ * cycle until the next interrupt occurs. On slow
+ * models, this would be a disaster (a complete cycle
+ * taking over two minutes on a 715/33).
+ *
+ * We expect that it will only be necessary to postpone
+ * the interrupt once. Thus, there are two cases:
+ * - We are expecting a wraparound: eta < cpu_itmr.
+ * itmr is in tracks if either >= cpu_itmr or < eta.
+ * - We are not wrapping: eta > cpu_itmr.
+ * itmr is in tracks if >= cpu_itmr and < eta (we need
+ * to keep the >= cpu_itmr test because itmr might wrap
+ * before eta does).
+ */
+ if ((wrap && !(eta > __itmr || __itmr >= cpu_itmr)) ||
+ (!wrap && !(eta > __itmr && __itmr >= cpu_itmr))) {
+ eta += cpu_hzticks;
+ mtctl(eta, CR_ITMR);
+ }
+ __asm __volatile("mtctl %0, %%cr15":: "r" (eiem));
+
+ return (1);
+}
+
/*
* initialize the system time from the time of day clock
*/
diff --git a/sys/arch/hppa/dev/cpu.c b/sys/arch/hppa/dev/cpu.c
index daf2c005171..943a6e5beb6 100644
--- a/sys/arch/hppa/dev/cpu.c
+++ b/sys/arch/hppa/dev/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.28 2004/12/28 05:18:25 mickey Exp $ */
+/* $OpenBSD: cpu.c,v 1.29 2009/02/08 18:33:28 miod Exp $ */
/*
* Copyright (c) 1998-2003 Michael Shalayeff
@@ -76,13 +76,6 @@ cpumatch(parent, cfdata, aux)
return 1;
}
-int
-cpu_hardclock(void *v)
-{
- hardclock(v);
- return (1);
-}
-
void
cpuattach(parent, self, aux)
struct device *parent;
@@ -95,6 +88,8 @@ cpuattach(parent, self, aux)
extern struct pdc_btlb pdc_btlb;
extern u_int cpu_ticksnum, cpu_ticksdenom;
extern u_int fpu_enable;
+ /* clock.c */
+ extern int cpu_hardclock(void *);
struct cpu_softc *sc = (struct cpu_softc *)self;
struct confargs *ca = aux;
diff --git a/sys/arch/hppa/hppa/locore.S b/sys/arch/hppa/hppa/locore.S
index f0104e28123..c4c75675c11 100644
--- a/sys/arch/hppa/hppa/locore.S
+++ b/sys/arch/hppa/hppa/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.158 2008/07/28 19:08:46 miod Exp $ */
+/* $OpenBSD: locore.S,v 1.159 2009/02/08 18:33:29 miod Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -118,15 +118,6 @@ netisr
.size netisr, .-netisr
.align 16
- .export cpu_hzticks, data
-cpu_hzticks /* itmr ticks in one hz */
- .word 0
- .size cpu_hzticks, .-cpu_hzticks
- .export cpu_itmr, data
-cpu_itmr /* itmr value at the most recent clk int */
- .word 0
- .size cpu_itmr, .-cpu_itmr
-
BSS(pdc_stack, 4) /* temp stack for PDC call */
BSS(emrg_stack, 4) /* stack for HPMC/TOC/PWRF */
BSS(fpemu_stack, 4) /* stack for FPU emulation */
@@ -2124,18 +2115,6 @@ ENTRY(TLABEL(intr),0)
INTR_PROF_PRE
- bb,>=,n r8, 0, $intr_noclock
-
- /* reload the itmr */
- ldil L%cpu_hzticks, r25 /* those both are aligned properly */
- ldw R%cpu_hzticks(r25), r16
- ldw R%cpu_itmr(r25), r9
- sh1add r16, r9, r17
- add r16, r9, r16
- mtctl r17, itmr
- stw r16, R%cpu_itmr(r25)
-
-$intr_noclock
ldil L%intr_table + 32*32, r1
ldo R%intr_table + 32*32(r1), r1
ldil L%ipending, r17