summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-08-21 17:22:05 +0000
committerScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-08-21 17:22:05 +0000
commit4c688a761bb6018b68636a0c3d12508cd0bbfd52 (patch)
tree78b17ab9147b783e8b2e7d15fb80ff1f2e80a4fa
parent916cebc434d0cbb69b2b9769b7d4f1b0640b3f76 (diff)
clockintr: remove support for independent schedclock()
Remove the scaffolding for an independent schedclock(). With the removal of the independent schedclock() from alpha, schedhz is zero on all platforms and this schedclock-specific code is now unused. It is possible that schedclock() will repurposed for use in the future. Even if this happens, the schedclock handle will not live in struct clockintr_queue.
-rw-r--r--sys/kern/kern_clockintr.c38
-rw-r--r--sys/sys/clockintr.h3
2 files changed, 4 insertions, 37 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c
index 6351a0bb255..48fb0def3f7 100644
--- a/sys/kern/kern_clockintr.c
+++ b/sys/kern/kern_clockintr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clockintr.c,v 1.31 2023/08/11 22:02:50 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.32 2023/08/21 17:22:04 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@@ -38,7 +38,6 @@
*/
u_int clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
-uint32_t schedclock_period; /* [I] schedclock period (ns) */
uint32_t statclock_avg; /* [I] average statclock period (ns) */
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
@@ -47,7 +46,6 @@ void clockintr_cancel_locked(struct clockintr *);
uint64_t clockintr_expiration(const struct clockintr *);
void clockintr_hardclock(struct clockintr *, void *);
uint64_t clockintr_nsecuptime(const struct clockintr *);
-void clockintr_schedclock(struct clockintr *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
@@ -89,10 +87,6 @@ clockintr_init(u_int flags)
statclock_min = statclock_avg - (var / 2);
statclock_mask = var - 1;
- KASSERT(schedhz >= 0 && schedhz <= 1000000000);
- if (schedhz != 0)
- schedclock_period = 1000000000 / schedhz;
-
SET(clockintr_flags, flags | CL_INIT);
}
@@ -128,12 +122,6 @@ clockintr_cpu_init(const struct intrclock *ic)
if (cq->cq_statclock == NULL)
panic("%s: failed to establish statclock", __func__);
}
- if (schedhz != 0 && cq->cq_schedclock == NULL) {
- cq->cq_schedclock = clockintr_establish(cq,
- clockintr_schedclock);
- if (cq->cq_schedclock == NULL)
- panic("%s: failed to establish schedclock", __func__);
- }
/*
* Mask CQ_INTRCLOCK while we're advancing the internal clock
@@ -175,8 +163,8 @@ clockintr_cpu_init(const struct intrclock *ic)
}
/*
- * We can always advance the statclock and schedclock.
- * There is no reason to stagger a randomized statclock.
+ * We can always advance the statclock. There is no reason to
+ * stagger a randomized statclock.
*/
if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
if (cq->cq_statclock->cl_expiration == 0) {
@@ -185,13 +173,6 @@ clockintr_cpu_init(const struct intrclock *ic)
}
}
clockintr_advance(cq->cq_statclock, statclock_avg);
- if (schedhz != 0) {
- if (cq->cq_schedclock->cl_expiration == 0) {
- clockintr_stagger(cq->cq_schedclock, schedclock_period,
- multiplier, MAXCPUS);
- }
- clockintr_advance(cq->cq_schedclock, schedclock_period);
- }
/*
* XXX Need to find a better place to do this. We can't do it in
@@ -515,19 +496,6 @@ clockintr_hardclock(struct clockintr *cl, void *frame)
}
void
-clockintr_schedclock(struct clockintr *cl, void *unused)
-{
- uint64_t count, i;
- struct proc *p = curproc;
-
- count = clockintr_advance(cl, schedclock_period);
- if (p != NULL) {
- for (i = 0; i < count; i++)
- schedclock(p);
- }
-}
-
-void
clockintr_statclock(struct clockintr *cl, void *frame)
{
uint64_t count, expiration, i, uptime;
diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h
index a46a3114462..71e40931d8a 100644
--- a/sys/sys/clockintr.h
+++ b/sys/sys/clockintr.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: clockintr.h,v 1.9 2023/07/25 18:16:19 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.10 2023/08/21 17:22:04 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
@@ -97,7 +97,6 @@ struct clockintr_queue {
TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
struct clockintr *cq_running; /* [m] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
- struct clockintr *cq_schedclock;/* [o] schedclock handle, if any */
struct clockintr *cq_statclock; /* [o] statclock handle */
struct intrclock cq_intrclock; /* [I] local interrupt clock */
struct clockintr_stat cq_stat; /* [o] dispatch statistics */