summaryrefslogtreecommitdiff
path: root/sys/arch/sparc64
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2007-11-11 19:47:35 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2007-11-11 19:47:35 +0000
commit199d75fbc0e4ab09057c414ba16f507e238aabd4 (patch)
tree628100497b02333a745828bb5327dc0cb867d2ce /sys/arch/sparc64
parent5509528a6364e4de54e193b4068875bb679198bb (diff)
Replace next_tick() with simpler C code that I can actually understand.
Diffstat (limited to 'sys/arch/sparc64')
-rw-r--r--sys/arch/sparc64/sparc64/clock.c22
-rw-r--r--sys/arch/sparc64/sparc64/locore.s67
2 files changed, 15 insertions, 74 deletions
diff --git a/sys/arch/sparc64/sparc64/clock.c b/sys/arch/sparc64/sparc64/clock.c
index eb169f97074..578de52f41d 100644
--- a/sys/arch/sparc64/sparc64/clock.c
+++ b/sys/arch/sparc64/sparc64/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.36 2007/10/21 21:00:38 kettenis Exp $ */
+/* $OpenBSD: clock.c,v 1.37 2007/11/11 19:47:34 kettenis Exp $ */
/* $NetBSD: clock.c,v 1.41 2001/07/24 19:29:25 eeh Exp $ */
/*
@@ -589,7 +589,7 @@ cpu_initclocks()
printf("Using %%tick -- intr in %ld cycles...",
tick_increment);
#endif
- next_tick(tick_increment);
+ tick_start();
#ifdef DEBUG
printf("done.\n");
#endif
@@ -718,15 +718,21 @@ int
tickintr(cap)
void *cap;
{
- int s;
+ u_int64_t base, s;
hardclock((struct clockframe *)cap);
- s = splhigh();
- /* Reset the interrupt */
- next_tick(tick_increment);
+ /*
+ * Reset the interrupt. We need to disable interrupts to
+ * block out IPIs, otherwise a value that is in the past could
+ * be written to the TICK_CMPR register, causing hardclock to
+ * stop.
+ */
+ s = intr_disable();
+ base = sparc_rdpr(tick);
+ sparc_wr(tick_cmpr, (base + tick_increment) & TICK_TICKS, 0);
level0.ih_count.ec_count++;
- splx(s);
+ intr_restore(s);
return (1);
}
@@ -900,7 +906,7 @@ tick_start(void)
s = intr_disable();
base = sparc_rdpr(tick) & TICK_TICKS;
base = roundup(base, tick_increment);
- sparc_wr(tick_cmpr, base + tick_increment, 0);
+ sparc_wr(tick_cmpr, (base + tick_increment) & TICK_TICKS, 0);
intr_restore(s);
}
diff --git a/sys/arch/sparc64/sparc64/locore.s b/sys/arch/sparc64/sparc64/locore.s
index 145124854df..ad469396c45 100644
--- a/sys/arch/sparc64/sparc64/locore.s
+++ b/sys/arch/sparc64/sparc64/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.103 2007/11/10 10:46:59 kettenis Exp $ */
+/* $OpenBSD: locore.s,v 1.104 2007/11/11 19:47:34 kettenis Exp $ */
/* $NetBSD: locore.s,v 1.137 2001/08/13 06:10:10 jdolecek Exp $ */
/*
@@ -8862,71 +8862,6 @@ Lstupid_loop:
retl
nop
-/*
- * next_tick(long increment)
- *
- * Sets the %tick_cmpr register to fire off in `increment' machine
- * cycles in the future. Also handles %tick wraparound. In 32-bit
- * mode we're limited to a 32-bit increment.
- */
- .data
- .align 8
-tlimit:
- .xword 0
- .text
-ENTRY(next_tick)
- rd TICK_CMPR, %o2
- rdpr %tick, %o1
-
- mov 1, %o3 ! Mask off high bits of these registers
- sllx %o3, 63, %o3
- andn %o1, %o3, %o1
- andn %o2, %o3, %o2
- cmp %o1, %o2 ! Did we wrap? (tick < tick_cmpr)
- bgt,pt %icc, 1f
- add %o1, 1000, %o1 ! Need some slack so we don't lose intrs.
-
- /*
- * Handle the unlikely case of %tick wrapping.
- *
- * This should only happen every 10 years or more.
- *
- * We need to increment the time base by the size of %tick in
- * microseconds. This will require some divides and multiplies
- * which can take time. So we re-read %tick.
- *
- */
-
- /* XXXXX NOT IMPLEMENTED */
-
-
-
-1:
- add %o2, %o0, %o2
- andn %o2, %o3, %o4
- brlz,pn %o4, Ltick_ovflw
- cmp %o2, %o1 ! Has this tick passed?
- blt,pn %xcc, 1b ! Yes
- nop
-
- retl
- wr %o2, TICK_CMPR
-
-Ltick_ovflw:
-/*
- * When we get here tick_cmpr has wrapped, but we don't know if %tick
- * has wrapped. If bit 62 is set then we have not wrapped and we can
- * use the current value of %o4 as %tick. Otherwise we need to return
- * to our loop with %o4 as %tick_cmpr (%o2).
- */
- srlx %o3, 1, %o5
- btst %o5, %o1
- bz,pn %xcc, 1b
- mov %o4, %o2
- retl
- wr %o2, TICK_CMPR
-
-
ENTRY(setjmp)
save %sp, -CC64FSZ, %sp ! Need a frame to return to.
flushw