summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-09-05 22:25:42 +0000
committerScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-09-05 22:25:42 +0000
commitf8dda77c3002a4826473c3cbfa9f6cdfa3406917 (patch)
tree43e12e639cf00aa6949e7f4a19f7ada1d8585268 /sys
parent45a3d03fe65a501976848245d0fd20dbe62ffbd3 (diff)
clockintr: add clockintr_advance_random()
Add clockintr_advance_random(). Its sole purpose is to simplify the implementation of statclock's pseudorandom period. Ideally, nothing else will use it and we will get rid of it and the pseudorandom statclock period at some point in the near future. Suggested by mpi@. Thread: https://marc.info/?l=openbsd-tech&m=169392340028978&w=2 ok mpi@ mlarkin@
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_clockintr.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c
index 1f234f15b97..c32643615c3 100644
--- a/sys/kern/kern_clockintr.c
+++ b/sys/kern/kern_clockintr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clockintr.c,v 1.33 2023/08/26 22:21:00 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.34 2023/09/05 22:25:41 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@@ -42,6 +42,7 @@ uint32_t statclock_avg; /* [I] average statclock period (ns) */
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
+uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_cancel_locked(struct clockintr *);
uint64_t clockintr_expiration(const struct clockintr *);
void clockintr_hardclock(struct clockintr *, void *);
@@ -345,6 +346,25 @@ clockintr_advance(struct clockintr *cl, uint64_t period)
return count;
}
+uint64_t
+clockintr_advance_random(struct clockintr *cl, uint64_t min, uint32_t mask)
+{
+ uint64_t count = 0;
+ struct clockintr_queue *cq = cl->cl_queue;
+ uint32_t off;
+
+ KASSERT(cl == &cq->cq_shadow);
+
+ while (cl->cl_expiration <= cq->cq_uptime) {
+ while ((off = (random() & mask)) == 0)
+ continue;
+ cl->cl_expiration += min + off;
+ count++;
+ }
+ SET(cl->cl_flags, CLST_SHADOW_PENDING);
+ return count;
+}
+
void
clockintr_cancel(struct clockintr *cl)
{
@@ -498,20 +518,11 @@ clockintr_hardclock(struct clockintr *cl, void *frame)
void
clockintr_statclock(struct clockintr *cl, void *frame)
{
- uint64_t count, expiration, i, uptime;
- uint32_t off;
+ uint64_t count, i;
if (ISSET(clockintr_flags, CL_RNDSTAT)) {
- count = 0;
- expiration = clockintr_expiration(cl);
- uptime = clockintr_nsecuptime(cl);
- while (expiration <= uptime) {
- while ((off = (random() & statclock_mask)) == 0)
- continue;
- expiration += statclock_min + off;
- count++;
- }
- clockintr_schedule(cl, expiration);
+ count = clockintr_advance_random(cl, statclock_min,
+ statclock_mask);
} else {
count = clockintr_advance(cl, statclock_avg);
}