diff options
author | cheloha <cheloha@cvs.openbsd.org> | 2019-11-02 16:56:19 +0000 |
---|---|---|
committer | cheloha <cheloha@cvs.openbsd.org> | 2019-11-02 16:56:19 +0000 |
commit | 4045664d37137d32cbe67bf01205c34f59347314 (patch) | |
tree | 0192aa51f1fc4428a4b47ff563ed5937d406db69 /sys/kern | |
parent | 7ef1197174f175a30ea5c09bb643989e517cd70c (diff) |
softclock: move softintr registration/scheduling into timeout module
softclock() is scheduled from hardclock(9) because long ago callouts were
processed from hardclock(9) directly. The introduction of timeout(9) circa
2000 moved all callout processing into a dedicated module, but the softclock
scheduling stayed behind in hardclock(9).
We can move all the softclock() "stuff" into the timeout module to make
kern_clock.c a bit cleaner. Neither initclocks() nor hardclock(9) need
to "know" about softclock(). The initial softclock() softintr registration
can be done from timeout_proc_init() and softclock() can be scheduled
from timeout_hardclock_update().
ok visa@
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_clock.c | 15 | ||||
-rw-r--r-- | sys/kern/kern_timeout.c | 29 |
2 files changed, 25 insertions, 19 deletions
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index edaf8cdec61..0a3679b0510 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clock.c,v 1.99 2019/08/02 02:17:35 cheloha Exp $ */ +/* $OpenBSD: kern_clock.c,v 1.100 2019/11/02 16:56:17 cheloha Exp $ */ /* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */ /*- @@ -87,8 +87,6 @@ int ticks; static int psdiv, pscnt; /* prof => stat divider */ int psratio; /* ratio: prof / stat */ -void *softclock_si; - volatile unsigned long jiffies; /* XXX Linux API for drm(4) */ /* @@ -99,10 +97,6 @@ initclocks(void) { int i; - softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL); - if (softclock_si == NULL) - panic("initclocks: unable to register softclock intr"); - ticks = INT_MAX - (15 * 60 * hz); jiffies = ULONG_MAX - (10 * 60 * hz); @@ -186,12 +180,9 @@ hardclock(struct clockframe *frame) jiffies++; /* - * Update real-time timeout queue. - * Process callouts at a very low cpu priority, so we don't keep the - * relatively high clock interrupt priority any longer than necessary. + * Update the timeout wheel. */ - if (timeout_hardclock_update()) - softintr_schedule(softclock_si); + timeout_hardclock_update(); } /* diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 84e1cb8c9c8..e6968c8eac5 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_timeout.c,v 1.59 2019/09/20 16:44:32 cheloha Exp $ */ +/* $OpenBSD: kern_timeout.c,v 1.60 2019/11/02 16:56:17 cheloha Exp $ */ /* * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org> * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org> @@ -95,6 +95,8 @@ struct mutex timeout_mutex = MUTEX_INITIALIZER(IPL_HIGH); struct timeoutstat tostat; +void *softclock_si; + /* * Circular queue definitions. */ @@ -134,6 +136,7 @@ struct timeoutstat tostat; #define CIRCQ_EMPTY(elem) (CIRCQ_FIRST(elem) == (elem)) +void softclock(void *); void softclock_thread(void *); void softclock_create_thread(void *); @@ -205,6 +208,10 @@ timeout_startup(void) void timeout_proc_init(void) { + softclock_si = softintr_establish(IPL_SOFTCLOCK, softclock, NULL); + if (softclock_si == NULL) + panic("%s: unable to register softclock interrupt", __func__); + WITNESS_INIT(&timeout_sleeplock_obj, &timeout_sleeplock_type); WITNESS_INIT(&timeout_spinlock_obj, &timeout_spinlock_type); @@ -427,13 +434,13 @@ timeout_proc_barrier(void *arg) } /* - * This is called from hardclock() once every tick. - * We return !0 if we need to schedule a softclock. + * This is called from hardclock() on the primary CPU at the start of + * every tick. */ -int +void timeout_hardclock_update(void) { - int ret; + int need_softclock; mtx_enter(&timeout_mutex); @@ -446,10 +453,12 @@ timeout_hardclock_update(void) MOVEBUCKET(3, ticks); } } - ret = !CIRCQ_EMPTY(&timeout_todo); + need_softclock = !CIRCQ_EMPTY(&timeout_todo); + mtx_leave(&timeout_mutex); - return (ret); + if (need_softclock) + softintr_schedule(softclock_si); } void @@ -475,6 +484,12 @@ timeout_run(struct timeout *to) mtx_enter(&timeout_mutex); } +/* + * Timeouts are processed here instead of timeout_hardclock_update() + * to avoid doing any more work at IPL_CLOCK than absolutely necessary. + * Down here at IPL_SOFTCLOCK other interrupts can be serviced promptly + * so the system remains responsive even if there is a surge of timeouts. + */ void softclock(void *arg) { |