diff options
author | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2023-06-18 23:09:36 +0000 |
---|---|---|
committer | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2023-06-18 23:09:36 +0000 |
commit | 73468cdd9120d772b007da5961c7161f7705235e (patch) | |
tree | 7a4d3b2d3d44d2220a817179e4c1314e56bb02b2 /sys/kern | |
parent | f36f5e12022bdde0cbb606faf77b2f6bc50e8130 (diff) |
clockintr: add clockintr_stagger()
clockintr_stagger() formalizes the "staggered start" pattern for
scheduling a common periodic clock interrupt across multiple CPUs.
Right now we implement a staggered start by hand for hardclock(),
statclock(), and schedclock(). I expect we will do it for
profclock(), too.
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_clockintr.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index bcd583c193c..1f1f11a25cb 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.22 2023/06/15 22:18:06 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.23 2023/06/18 23:09:35 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org> * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> @@ -64,6 +64,7 @@ uint64_t clockintr_nsecuptime(const struct clockintr *); void clockintr_schedclock(struct clockintr *, void *); void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_schedule_locked(struct clockintr *, uint64_t); +void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int); void clockintr_statclock(struct clockintr *, void *); void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *); uint64_t clockqueue_next(const struct clockintr_queue *); @@ -459,6 +460,20 @@ clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration) SET(cl->cl_flags, CLST_PENDING); } +void +clockintr_stagger(struct clockintr *cl, uint64_t period, u_int n, u_int count) +{ + struct clockintr_queue *cq = cl->cl_queue; + + KASSERT(n < count); + + mtx_enter(&cq->cq_mtx); + if (ISSET(cl->cl_flags, CLST_PENDING)) + panic("%s: clock interrupt pending", __func__); + cl->cl_expiration = period / count * n; + mtx_leave(&cq->cq_mtx); +} + /* * Compute the period (avg) for the given frequency and a range around * that period. The range is [min + 1, min + mask]. The range is used |