diff options
-rw-r--r-- | sys/kern/kern_clockintr.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index bcd583c193c..1f1f11a25cb 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.22 2023/06/15 22:18:06 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.23 2023/06/18 23:09:35 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org> * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> @@ -64,6 +64,7 @@ uint64_t clockintr_nsecuptime(const struct clockintr *); void clockintr_schedclock(struct clockintr *, void *); void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_schedule_locked(struct clockintr *, uint64_t); +void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int); void clockintr_statclock(struct clockintr *, void *); void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *); uint64_t clockqueue_next(const struct clockintr_queue *); @@ -459,6 +460,20 @@ clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration) SET(cl->cl_flags, CLST_PENDING); } +void +clockintr_stagger(struct clockintr *cl, uint64_t period, u_int n, u_int count) +{ + struct clockintr_queue *cq = cl->cl_queue; + + KASSERT(n < count); + + mtx_enter(&cq->cq_mtx); + if (ISSET(cl->cl_flags, CLST_PENDING)) + panic("%s: clock interrupt pending", __func__); + cl->cl_expiration = period / count * n; + mtx_leave(&cq->cq_mtx); +} + /* * Compute the period (avg) for the given frequency and a range around * that period. The range is [min + 1, min + mask]. The range is used |