diff options
author | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2022-11-19 16:23:49 +0000 |
---|---|---|
committer | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2022-11-19 16:23:49 +0000 |
commit | 80ed729e08f13b4c92d3f73e4c0091003c978f39 (patch) | |
tree | dc7aaf30342c6fc021fde87bd188af6852e18406 /sys/arch/mips64 | |
parent | 328894c4a068dfbe71c693ce00c494c8cf96db65 (diff) |
mips64, loongson, octeon: switch to clockintr
- Remove mips64-specific clock interrupt scheduling bits from cpu_info.
- Add missing tick_nsec initialization to cpu_initclocks().
- Disable the glxclk interrupt clock on loongson. visa@/miod@ say it
can be removed later if it isn't useful for anything else.
- Wire up cp0_intrclock.
Notes:
- The loongson apm_suspend() changes are untested, but deraadt@ claims
APM suspend/resume on loongson doesn't work anyway.
- loongson and octeon now have a randomized statclock(), stathz = hz.
With input from miod@, visa@. Tested by miod@, visa@.
Link: https://marc.info/?l=openbsd-tech&m=166776379603497&w=2
ok visa@ mlarkin@
Diffstat (limited to 'sys/arch/mips64')
-rw-r--r-- | sys/arch/mips64/include/_types.h | 4 | ||||
-rw-r--r-- | sys/arch/mips64/include/cpu.h | 7 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/clock.c | 156 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/mips64_machdep.c | 14 |
4 files changed, 109 insertions, 72 deletions
diff --git a/sys/arch/mips64/include/_types.h b/sys/arch/mips64/include/_types.h index cfdf2548edb..535abead1de 100644 --- a/sys/arch/mips64/include/_types.h +++ b/sys/arch/mips64/include/_types.h @@ -1,4 +1,4 @@ -/* $OpenBSD: _types.h,v 1.23 2018/03/05 01:15:25 deraadt Exp $ */ +/* $OpenBSD: _types.h,v 1.24 2022/11/19 16:23:48 cheloha Exp $ */ /*- * Copyright (c) 1990, 1993 @@ -35,6 +35,8 @@ #ifndef _MIPS64__TYPES_H_ #define _MIPS64__TYPES_H_ +#define __HAVE_CLOCKINTR + /* * _ALIGN(p) rounds p (pointer or byte index) up to a correctly-aligned * value for all data types (int, long, ...). The result is an diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h index 170661b1b09..221d4c35d9a 100644 --- a/sys/arch/mips64/include/cpu.h +++ b/sys/arch/mips64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.139 2022/08/22 00:35:06 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.140 2022/11/19 16:23:48 cheloha Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -106,6 +106,7 @@ #if defined(_KERNEL) && !defined(_LOCORE) +#include <sys/clockintr.h> #include <sys/device.h> #include <machine/intr.h> #include <sys/sched.h> @@ -179,8 +180,8 @@ struct cpu_info { uint32_t ci_softpending; /* pending soft interrupts */ int ci_clock_started; volatile int ci_clock_deferred; /* clock interrupt postponed */ - u_int32_t ci_cpu_counter_last; /* last compare value loaded */ - u_int32_t ci_cpu_counter_interval; /* # of counter ticks/tick */ + struct clockintr_queue + ci_queue; struct pmap *ci_curpmap; uint ci_intrdepth; /* interrupt depth */ diff --git a/sys/arch/mips64/mips64/clock.c b/sys/arch/mips64/mips64/clock.c index 3a0aae58c7f..d9a9be8c9a9 100644 --- a/sys/arch/mips64/mips64/clock.c +++ b/sys/arch/mips64/mips64/clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: clock.c,v 1.47 2022/10/31 13:59:10 visa Exp $ */ +/* $OpenBSD: clock.c,v 1.48 2022/11/19 16:23:48 cheloha Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -38,8 +38,10 @@ #include <sys/kernel.h> #include <sys/systm.h> #include <sys/atomic.h> +#include <sys/clockintr.h> #include <sys/device.h> #include <sys/evcount.h> +#include <sys/stdint.h> #include <machine/autoconf.h> #include <machine/cpu.h> @@ -47,6 +49,8 @@ static struct evcount cp0_clock_count; static int cp0_clock_irq = 5; +uint64_t cp0_nsec_cycle_ratio; +uint64_t cp0_nsec_max; int clockmatch(struct device *, void *, void *); void clockattach(struct device *, struct device *, void *); @@ -59,9 +63,18 @@ const struct cfattach clock_ca = { sizeof(struct device), clockmatch, clockattach }; -void cp0_startclock(struct cpu_info *); -void cp0_trigger_int5(void); +void cp0_rearm_int5(void *, uint64_t); +void cp0_trigger_int5_wrapper(void *); + +const struct intrclock cp0_intrclock = { + .ic_rearm = cp0_rearm_int5, + .ic_trigger = cp0_trigger_int5_wrapper +}; + uint32_t cp0_int5(uint32_t, struct trapframe *); +void cp0_startclock(struct cpu_info *); +void cp0_trigger_int5(void); +void cp0_trigger_int5_masked(void); int clockmatch(struct device *parent, void *vcf, void *aux) @@ -74,8 +87,13 @@ clockmatch(struct device *parent, void *vcf, void *aux) void clockattach(struct device *parent, struct device *self, void *aux) { + uint64_t cp0_freq = curcpu()->ci_hw.clock / CP0_CYCLE_DIVIDER; + printf(": int 5\n"); + cp0_nsec_cycle_ratio = cp0_freq * (1ULL << 32) / 1000000000; + cp0_nsec_max = UINT64_MAX / cp0_nsec_cycle_ratio; + /* * We need to register the interrupt now, for idle_mask to * be computed correctly. @@ -100,20 +118,19 @@ clockattach(struct device *parent, struct device *self, void *aux) uint32_t cp0_int5(uint32_t mask, struct trapframe *tf) { - u_int32_t clkdiff, pendingticks = 0; struct cpu_info *ci = curcpu(); int s; + atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count); + + cp0_set_compare(cp0_get_count() - 1); /* clear INT5 */ + /* - * If we got an interrupt before we got ready to process it, - * retrigger it as far as possible. cpu_initclocks() will - * take care of retriggering it correctly. + * Just ignore the interrupt if we're not ready to process it. + * cpu_initclocks() will retrigger it later. */ - if (ci->ci_clock_started == 0) { - cp0_set_compare(cp0_get_count() - 1); - + if (!ci->ci_clock_started) return CR_INT_5; - } /* * If the clock interrupt is logically masked, defer all @@ -121,36 +138,11 @@ cp0_int5(uint32_t mask, struct trapframe *tf) */ if (tf->ipl >= IPL_CLOCK) { ci->ci_clock_deferred = 1; - cp0_set_compare(cp0_get_count() - 1); return CR_INT_5; } ci->ci_clock_deferred = 0; /* - * Count how many ticks have passed since the last clock interrupt... - */ - clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; - while (clkdiff >= ci->ci_cpu_counter_interval) { - ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; - clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; - pendingticks++; - } - pendingticks++; - ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; - - /* - * Set up next tick, and check if it has just been hit; in this - * case count it and schedule one tick ahead. - */ - cp0_set_compare(ci->ci_cpu_counter_last); - clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; - if ((int)clkdiff >= 0) { - ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; - pendingticks++; - cp0_set_compare(ci->ci_cpu_counter_last); - } - - /* * Process clock interrupt. */ s = splclock(); @@ -160,22 +152,65 @@ cp0_int5(uint32_t mask, struct trapframe *tf) sr = getsr(); ENABLEIPI(); #endif - while (pendingticks) { - atomic_inc_long((unsigned long *)&cp0_clock_count.ec_count); - hardclock(tf); - pendingticks--; - } + clockintr_dispatch(tf); #ifdef MULTIPROCESSOR setsr(sr); #endif ci->ci_ipl = s; - return CR_INT_5; /* Clock is always on 5 */ } /* - * Trigger the clock interrupt. - * + * Arm INT5 to fire after the given number of nanoseconds have elapsed. + * Only try once. If we miss, let cp0_trigger_int5_masked() handle it. + */ +void +cp0_rearm_int5(void *unused, uint64_t nsecs) +{ + uint32_t cycles, t0, t1, target; + register_t sr; + + if (nsecs > cp0_nsec_max) + nsecs = cp0_nsec_max; + cycles = (nsecs * cp0_nsec_cycle_ratio) >> 32; + + /* + * Set compare, then immediately reread count. If INT5 is not + * pending then we need to check if we missed. If t0 + cycles + * did not overflow then we need t0 <= t1 < target. Otherwise, + * there are two valid constraints: either t0 <= t1 or t1 < target + * show we didn't miss. + */ + sr = disableintr(); + t0 = cp0_get_count(); + target = t0 + cycles; + cp0_set_compare(target); + t1 = cp0_get_count(); + if (!ISSET(cp0_get_cause(), CR_INT_5)) { + if (t0 <= target) { + if (target <= t1 || t1 < t0) + cp0_trigger_int5_masked(); + } else { + if (t1 < t0 && target <= t1) + cp0_trigger_int5_masked(); + } + } + setsr(sr); +} + +void +cp0_trigger_int5(void) +{ + register_t sr; + + sr = disableintr(); + cp0_trigger_int5_masked(); + setsr(sr); +} + +/* + * Arm INT5 to fire as soon as possible. + * * We need to spin until either (a) INT5 is pending or (b) the compare * register leads the count register, i.e. we know INT5 will be pending * very soon. @@ -187,33 +222,38 @@ cp0_int5(uint32_t mask, struct trapframe *tf) * to arm the timer on most Octeon hardware. */ void -cp0_trigger_int5(void) +cp0_trigger_int5_masked(void) { uint32_t compare, offset = 16; int leading = 0; - register_t sr; - sr = disableintr(); - while (!leading && !ISSET(cp0_get_cause(), CR_INT_5)) { + while (!ISSET(cp0_get_cause(), CR_INT_5) && !leading) { compare = cp0_get_count() + offset; cp0_set_compare(compare); leading = (int32_t)(compare - cp0_get_count()) > 0; offset *= 2; } - setsr(sr); +} + +void +cp0_trigger_int5_wrapper(void *unused) +{ + cp0_trigger_int5(); } /* - * Start the real-time and statistics clocks. Leave stathz 0 since there - * are no other timers available. + * Start the clock interrupt dispatch cycle. */ void cp0_startclock(struct cpu_info *ci) { int s; -#ifdef MULTIPROCESSOR - if (!CPU_IS_PRIMARY(ci)) { + if (CPU_IS_PRIMARY(ci)) { + stathz = hz; + profhz = stathz * 10; + clockintr_init(CL_RNDSTAT); + } else { s = splhigh(); nanouptime(&ci->ci_schedstate.spc_runtime); splx(s); @@ -223,14 +263,12 @@ cp0_startclock(struct cpu_info *ci) cp0_calibrate(ci); } -#endif + + clockintr_cpu_init(&cp0_intrclock); /* Start the clock. */ s = splclock(); - ci->ci_cpu_counter_interval = - (ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz; - ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval; - cp0_set_compare(ci->ci_cpu_counter_last); - ci->ci_clock_started++; + ci->ci_clock_started = 1; + clockintr_trigger(); splx(s); } diff --git a/sys/arch/mips64/mips64/mips64_machdep.c b/sys/arch/mips64/mips64/mips64_machdep.c index fb5a0fdc233..634aaae7d57 100644 --- a/sys/arch/mips64/mips64/mips64_machdep.c +++ b/sys/arch/mips64/mips64/mips64_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mips64_machdep.c,v 1.39 2022/10/30 17:43:39 guenther Exp $ */ +/* $OpenBSD: mips64_machdep.c,v 1.40 2022/11/19 16:23:48 cheloha Exp $ */ /* * Copyright (c) 2009, 2010, 2012 Miodrag Vallat. @@ -44,6 +44,7 @@ #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> +#include <sys/clockintr.h> #include <sys/proc.h> #include <sys/exec.h> #include <sys/sysctl.h> @@ -302,16 +303,15 @@ cp0_calibrate(struct cpu_info *ci) } /* - * Start the real-time and statistics clocks. + * Prepare to start the clock interrupt dispatch cycle. */ void cpu_initclocks(void) { struct cpu_info *ci = curcpu(); - profhz = hz; - tick = 1000000 / hz; /* number of micro-seconds between interrupts */ + tick_nsec = 1000000000 / hz; cp0_calibrate(ci); @@ -331,14 +331,10 @@ cpu_initclocks(void) (*md_startclock)(ci); } -/* - * We assume newhz is either stathz or profhz, and that neither will - * change after being set up above. Could recalculate intervals here - * but that would be a drag. - */ void setstatclockrate(int newhz) { + clockintr_setstatclockrate(newhz); } /* |