diff options
author | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2024-01-15 01:15:38 +0000 |
---|---|---|
committer | Scott Soule Cheloha <cheloha@cvs.openbsd.org> | 2024-01-15 01:15:38 +0000 |
commit | 55ac248e98a7a68a8749956113880954542b51c7 (patch) | |
tree | 28dabc05f50a5f95f6ede2af321bd7290ae2618d | |
parent | 0927ab4a35a5edc16f0d86bcd49eae05e4c8d9ef (diff) |
clockintr: move CLST_IGNORE_REQUESTS from cl_flags to cq_flags
In the near future, we will add support for destroying clockintr
objects. When this happens, it will no longer be safe to dereference
the pointer to the expired clockintr during the dispatch loop in
clockintr_dispatch() after reentering cq_mtx. This means we will not
be able to safely check for the CLST_IGNORE_REQUESTS flag.
So replace the CLST_IGNORE_REQUESTS flag in cl_flags with the
CQ_IGNORE_REQUESTS flag in cq_flags. The semantics are the same.
Both cl_flags and cq_flags are protected by cq_mtx.
Note that we cannot move the CLST_IGNORE_REQUESTS flag to cr_flags in
struct clockrequest: that member is owned by the dispatching CPU and
is not mutated with atomic operations.
-rw-r--r-- | sys/kern/kern_clockintr.c | 10 | ||||
-rw-r--r-- | sys/sys/clockintr.h | 8 |
2 files changed, 9 insertions, 9 deletions
diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index c9f5fd2cd49..6e771a043ba 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.62 2023/10/17 00:04:02 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.63 2024/01/15 01:15:37 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn <drahn@openbsd.org> * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org> @@ -220,8 +220,8 @@ clockintr_dispatch(void *frame) mtx_enter(&cq->cq_mtx); cq->cq_running = NULL; - if (ISSET(cl->cl_flags, CLST_IGNORE_REQUEST)) { - CLR(cl->cl_flags, CLST_IGNORE_REQUEST); + if (ISSET(cq->cq_flags, CQ_IGNORE_REQUEST)) { + CLR(cq->cq_flags, CQ_IGNORE_REQUEST); CLR(request->cr_flags, CR_RESCHEDULE); } if (ISSET(request->cr_flags, CR_RESCHEDULE)) { @@ -333,7 +333,7 @@ clockintr_cancel(struct clockintr *cl) } } if (cl == cq->cq_running) - SET(cl->cl_flags, CLST_IGNORE_REQUEST); + SET(cq->cq_flags, CQ_IGNORE_REQUEST); mtx_leave(&cq->cq_mtx); } @@ -384,7 +384,7 @@ clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration) } } if (cl == cq->cq_running) - SET(cl->cl_flags, CLST_IGNORE_REQUEST); + SET(cq->cq_flags, CQ_IGNORE_REQUEST); } void diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h index 967e0f9f95c..350694edcf3 100644 --- a/sys/sys/clockintr.h +++ b/sys/sys/clockintr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: clockintr.h,v 1.23 2023/10/17 00:04:02 cheloha Exp $ */ +/* $OpenBSD: clockintr.h,v 1.24 2024/01/15 01:15:37 cheloha Exp $ */ /* * Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org> * @@ -68,7 +68,6 @@ struct clockintr { }; #define CLST_PENDING 0x00000001 /* scheduled to run */ -#define CLST_IGNORE_REQUEST 0x00000002 /* ignore callback requests */ /* * Interface for callback rescheduling requests. @@ -108,12 +107,13 @@ struct clockintr_queue { struct clockintr_stat cq_stat; /* [o] dispatch statistics */ volatile uint32_t cq_gen; /* [o] cq_stat update generation */ volatile uint32_t cq_dispatch; /* [o] dispatch is running */ - uint32_t cq_flags; /* [I] CQ_* flags; see below */ + uint32_t cq_flags; /* [m] CQ_* flags; see below */ }; #define CQ_INIT 0x00000001 /* clockintr_cpu_init() done */ #define CQ_INTRCLOCK 0x00000002 /* intrclock installed */ -#define CQ_STATE_MASK 0x00000003 +#define CQ_IGNORE_REQUEST 0x00000004 /* ignore callback requests */ +#define CQ_STATE_MASK 0x00000007 void clockintr_cpu_init(const struct intrclock *); int clockintr_dispatch(void *); |