summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-04-03 00:20:25 +0000
committerScott Soule Cheloha <cheloha@cvs.openbsd.org>2023-04-03 00:20:25 +0000
commitfc467bb2a45211c246b87f4db72f00162856eda7 (patch)
tree728c08fc602839aac93c9d6c1c1fff009fe40c79
parentb3f06f4544b7f3d66bfb14e9b6076d2e3eaf68a3 (diff)
clockintr: protect struct clockintr_queue with a mutex
Add a mutex (cq_mtx) to stuct clockintr_queue so that arbitrary CPUs can manipulate clock interrupts established on arbitrary CPU queues. Refactor the bulk of clockintr_schedule() into clockintr_schedule_locked() so we can reuse it from within the mutex. Tested by mlarkin@. Neat bug found by mlarkin@. With tweaks from kettenis@. ok kettenis@
-rw-r--r--sys/kern/kern_clockintr.c41
-rw-r--r--sys/sys/clockintr.h22
2 files changed, 50 insertions, 13 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c
index c3de89a0b82..c937d63c4e7 100644
--- a/sys/kern/kern_clockintr.c
+++ b/sys/kern/kern_clockintr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_clockintr.c,v 1.5 2023/03/14 00:11:58 cheloha Exp $ */
+/* $OpenBSD: kern_clockintr.c,v 1.6 2023/04/03 00:20:24 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@@ -61,6 +61,7 @@ void clockintr_hardclock(struct clockintr *, void *);
uint64_t clockintr_nsecuptime(const struct clockintr *);
void clockintr_schedclock(struct clockintr *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
+void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *);
void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *);
uint64_t clockqueue_next(const struct clockintr_queue *);
@@ -111,6 +112,7 @@ clockintr_cpu_init(const struct intrclock *ic)
KASSERT(ISSET(clockintr_flags, CL_INIT));
if (!ISSET(cq->cq_flags, CL_CPU_INIT)) {
+ mtx_init(&cq->cq_mtx, IPL_CLOCK);
TAILQ_INIT(&cq->cq_est);
TAILQ_INIT(&cq->cq_pend);
cq->cq_hardclock = clockintr_establish(cq, clockintr_hardclock);
@@ -205,6 +207,8 @@ clockintr_dispatch(void *frame)
splassert(IPL_CLOCK);
KASSERT(ISSET(cq->cq_flags, CL_CPU_INIT));
+ mtx_enter(&cq->cq_mtx);
+
/*
* If nothing is scheduled or we arrived too early, we have
* nothing to do.
@@ -233,9 +237,11 @@ clockintr_dispatch(void *frame)
TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
CLR(cl->cl_flags, CLST_PENDING);
cq->cq_running = cl;
+ mtx_leave(&cq->cq_mtx);
cl->cl_func(cl, frame);
+ mtx_enter(&cq->cq_mtx);
cq->cq_running = NULL;
run++;
}
@@ -269,6 +275,8 @@ stats:
membar_producer();
cq->cq_gen = MAX(1, ogen + 1);
+ mtx_leave(&cq->cq_mtx);
+
if (cq->cq_dispatch != 1)
panic("%s: unexpected value: %u", __func__, cq->cq_dispatch);
cq->cq_dispatch = 0;
@@ -280,10 +288,13 @@ uint64_t
clockintr_advance(struct clockintr *cl, uint64_t period)
{
uint64_t count, expiration;
+ struct clockintr_queue *cq = cl->cl_queue;
+ mtx_enter(&cq->cq_mtx);
expiration = cl->cl_expiration;
- count = nsec_advance(&expiration, period, cl->cl_queue->cq_uptime);
- clockintr_schedule(cl, expiration);
+ count = nsec_advance(&expiration, period, cq->cq_uptime);
+ clockintr_schedule_locked(cl, expiration);
+ mtx_leave(&cq->cq_mtx);
return count;
}
@@ -298,22 +309,43 @@ clockintr_establish(struct clockintr_queue *cq,
return NULL;
cl->cl_func = func;
cl->cl_queue = cq;
+
+ mtx_enter(&cq->cq_mtx);
TAILQ_INSERT_TAIL(&cq->cq_est, cl, cl_elink);
+ mtx_leave(&cq->cq_mtx);
return cl;
}
uint64_t
clockintr_expiration(const struct clockintr *cl)
{
- return cl->cl_expiration;
+ uint64_t expiration;
+ struct clockintr_queue *cq = cl->cl_queue;
+
+ mtx_enter(&cq->cq_mtx);
+ expiration = cl->cl_expiration;
+ mtx_leave(&cq->cq_mtx);
+ return expiration;
}
void
clockintr_schedule(struct clockintr *cl, uint64_t expiration)
{
+ struct clockintr_queue *cq = cl->cl_queue;
+
+ mtx_enter(&cq->cq_mtx);
+ clockintr_schedule_locked(cl, expiration);
+ mtx_leave(&cq->cq_mtx);
+}
+
+void
+clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration)
+{
struct clockintr *elm;
struct clockintr_queue *cq = cl->cl_queue;
+ MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
+
if (ISSET(cl->cl_flags, CLST_PENDING)) {
TAILQ_REMOVE(&cq->cq_pend, cl, cl_plink);
CLR(cl->cl_flags, CLST_PENDING);
@@ -459,6 +491,7 @@ clockintr_statclock(struct clockintr *cl, void *frame)
uint64_t
clockqueue_next(const struct clockintr_queue *cq)
{
+ MUTEX_ASSERT_LOCKED(&cq->cq_mtx);
return TAILQ_FIRST(&cq->cq_pend)->cl_expiration;
}
diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h
index b9c7f656e9c..df81cab5ae3 100644
--- a/sys/sys/clockintr.h
+++ b/sys/sys/clockintr.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: clockintr.h,v 1.3 2023/03/09 03:50:38 cheloha Exp $ */
+/* $OpenBSD: clockintr.h,v 1.4 2023/04/03 00:20:24 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
@@ -32,6 +32,7 @@ struct clockintr_stat {
#ifdef _KERNEL
+#include <sys/mutex.h>
#include <sys/queue.h>
/*
@@ -62,16 +63,16 @@ intrclock_trigger(struct intrclock *ic)
* Struct member protections:
*
* I Immutable after initialization.
- * o Owned by a single CPU.
+ * m Parent queue mutex (cl_queue->cq_mtx).
*/
struct clockintr_queue;
struct clockintr {
- uint64_t cl_expiration; /* [o] dispatch time */
- TAILQ_ENTRY(clockintr) cl_elink; /* [o] cq_est glue */
- TAILQ_ENTRY(clockintr) cl_plink; /* [o] cq_pend glue */
+ uint64_t cl_expiration; /* [m] dispatch time */
+ TAILQ_ENTRY(clockintr) cl_elink; /* [m] cq_est glue */
+ TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */
void (*cl_func)(struct clockintr *, void *); /* [I] callback */
struct clockintr_queue *cl_queue; /* [I] parent queue */
- u_int cl_flags; /* [o] CLST_* flags */
+ u_int cl_flags; /* [m] CLST_* flags */
};
#define CLST_PENDING 0x00000001 /* scheduled to run */
@@ -81,14 +82,17 @@ struct clockintr {
*
* Struct member protections:
*
+ * a Modified atomically.
* I Immutable after initialization.
+ * m Per-queue mutex (cq_mtx).
* o Owned by a single CPU.
*/
struct clockintr_queue {
+ struct mutex cq_mtx; /* [a] per-queue mutex */
uint64_t cq_uptime; /* [o] cached uptime */
- TAILQ_HEAD(, clockintr) cq_est; /* [o] established clockintr list */
- TAILQ_HEAD(, clockintr) cq_pend;/* [o] pending clockintr list */
- struct clockintr *cq_running; /* [o] running clockintr */
+ TAILQ_HEAD(, clockintr) cq_est; /* [m] established clockintr list */
+ TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
+ struct clockintr *cq_running; /* [m] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
struct clockintr *cq_schedclock;/* [o] schedclock handle, if any */
struct clockintr *cq_statclock; /* [o] statclock handle */