diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2008-04-17 19:52:28 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2008-04-17 19:52:28 +0000 |
commit | 60507fc96e456912e170f2d314dd70f884b5aeb8 (patch) | |
tree | b83b6259041ee06735f8c73b757f1e8debc3b6e7 /sys/arch/sparc64 | |
parent | dc91fc42a77afeef014bc99ae83a45521bc45f46 (diff) |
Really try to schedule clock ticks at fixed intervals. Make sure hardclock()
gets called for every clock tick, even if we miss one.
Diffstat (limited to 'sys/arch/sparc64')
-rw-r--r-- | sys/arch/sparc64/include/cpu.h | 3 | ||||
-rw-r--r-- | sys/arch/sparc64/sparc64/clock.c | 37 |
2 files changed, 21 insertions, 19 deletions
diff --git a/sys/arch/sparc64/include/cpu.h b/sys/arch/sparc64/include/cpu.h index 608333983a4..59c938281aa 100644 --- a/sys/arch/sparc64/include/cpu.h +++ b/sys/arch/sparc64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.60 2008/04/13 16:32:55 kettenis Exp $ */ +/* $OpenBSD: cpu.h,v 1.61 2008/04/17 19:52:27 kettenis Exp $ */ /* $NetBSD: cpu.h,v 1.28 2001/06/14 22:56:58 thorpej Exp $ */ /* @@ -124,6 +124,7 @@ struct cpu_info { int ci_want_resched; int ci_handled_intr_level; void *ci_intrpending[16][8]; + u_int64_t ci_tick; /* DEBUG/DIAGNOSTIC stuff */ u_long ci_spin_locks; /* # of spin locks held */ diff --git a/sys/arch/sparc64/sparc64/clock.c b/sys/arch/sparc64/sparc64/clock.c index a49332cdd02..0e317bc8892 100644 --- a/sys/arch/sparc64/sparc64/clock.c +++ b/sys/arch/sparc64/sparc64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.40 2008/04/15 22:39:26 kettenis Exp $ */ +/* $OpenBSD: clock.c,v 1.41 2008/04/17 19:52:27 kettenis Exp $ */ /* $NetBSD: clock.c,v 1.41 2001/07/24 19:29:25 eeh Exp $ */ /* @@ -715,20 +715,22 @@ int tickintr(cap) void *cap; { - u_int64_t base, s; - - hardclock((struct clockframe *)cap); + struct cpu_info *ci = curcpu(); + u_int64_t s; - /* - * Reset the interrupt. We need to disable interrupts to - * block out IPIs, otherwise a value that is in the past could - * be written to the TICK_CMPR register, causing hardclock to - * stop. + /* + * No need to worry about overflow; %tick is architecturally + * defined not to do that for at least 10 years. */ + while (ci->ci_tick < tick()) { + ci->ci_tick += tick_increment; + hardclock((struct clockframe *)cap); + level0.ih_count.ec_count++; + } + + /* Reset the interrupt. */ s = intr_disable(); - base = sparc_rdpr(tick); - tickcmpr_set((base + tick_increment) & TICK_TICKS); - level0.ih_count.ec_count++; + tickcmpr_set(ci->ci_tick); intr_restore(s); return (1); @@ -878,18 +880,17 @@ resettodr() void tick_start(void) { - u_int64_t base, s; + struct cpu_info *ci = curcpu(); + u_int64_t s; /* * Try to make the tick interrupts as synchronously as possible on - * all CPUs to avoid inaccuracies for migrating processes. Leave out - * one tick to make sure that it is not missed. + * all CPUs to avoid inaccuracies for migrating processes. */ s = intr_disable(); - base = sparc_rdpr(tick) & TICK_TICKS; - base = roundup(base, tick_increment); - sparc_wr(tick_cmpr, (base + tick_increment) & TICK_TICKS, 0); + ci->ci_tick = roundup(tick(), tick_increment); + tickcmpr_set(ci->ci_tick); intr_restore(s); } |