summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-07-02 00:55:19 +0000
committerScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-07-02 00:55:19 +0000
commitf28875d9e45436c91dae5c94da4237a3127cf3b3 (patch)
tree4aa8210f6b9a4ae960d5974d101d31c2639949ab /sys
parentcd42cfcb0b8b04d555e0061110132305c310c513 (diff)
clockintr_cpu_init: stagger clock interrupts by MAXCPUS
During clockintr_cpu_init(), we can't stagger by ncpus because not every platform has fully incremented it yet. Instead, stagger by MAXCPUS. The resulting intervals are smaller, but are probably still sufficiently large to avoid aggravating lock contention, even on platforms where MAXCPUS is large. While here, don't bother staggering the statclock if it is randomized. With input from claudio@.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_clockintr.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c
index 21fad46338b..187898caf07 100644
--- a/sys/kern/kern_clockintr.c
+++ b/sys/kern/kern_clockintr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clockintr.c,v 1.25 2023/06/22 16:23:50 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.26 2023/07/02 00:55:18 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@@ -107,7 +107,7 @@ clockintr_init(u_int flags)
void
clockintr_cpu_init(const struct intrclock *ic)
{
- uint64_t multiplier = 0, offset;
+ uint64_t multiplier = 0;
struct cpu_info *ci = curcpu();
struct clockintr_queue *cq = &ci->ci_queue;
int reset_cq_intrclock = 0;
@@ -170,21 +170,28 @@ clockintr_cpu_init(const struct intrclock *ic)
clockintr_advance(cq->cq_hardclock, hardclock_period);
} else {
if (cq->cq_hardclock->cl_expiration == 0) {
- offset = hardclock_period / ncpus * multiplier;
- cq->cq_hardclock->cl_expiration = offset;
+ clockintr_stagger(cq->cq_hardclock, hardclock_period,
+ multiplier, MAXCPUS);
}
clockintr_advance(cq->cq_hardclock, hardclock_period);
}
/*
* We can always advance the statclock and schedclock.
+ * There is no reason to stagger a randomized statclock.
*/
- offset = statclock_avg / ncpus * multiplier;
- clockintr_schedule(cq->cq_statclock, offset);
+ if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
+ if (cq->cq_statclock->cl_expiration == 0) {
+ clockintr_stagger(cq->cq_statclock, statclock_avg,
+ multiplier, MAXCPUS);
+ }
+ }
clockintr_advance(cq->cq_statclock, statclock_avg);
if (schedhz != 0) {
- offset = schedclock_period / ncpus * multiplier;
- clockintr_schedule(cq->cq_schedclock, offset);
+ if (cq->cq_schedclock->cl_expiration == 0) {
+ clockintr_stagger(cq->cq_schedclock, schedclock_period,
+ multiplier, MAXCPUS);
+ }
clockintr_advance(cq->cq_schedclock, schedclock_period);
}