diff options
author | David Gwynne <dlg@cvs.openbsd.org> | 2017-11-24 02:36:54 +0000 |
---|---|---|
committer | David Gwynne <dlg@cvs.openbsd.org> | 2017-11-24 02:36:54 +0000 |
commit | 8c8ca2984185a4971d1e2ba781ed9b1c266168b1 (patch) | |
tree | f6529c40d72bac8cd0232dabce88ac7ac2c8ae73 /sys/kern | |
parent | 8f9817eee2dd2a01973e2a9a1b27d58c9900a4db (diff) |
add timeout_barrier, which is like intr_barrier and taskq_barrier.
if you're trying to free something that a timeout is using, you
have to wait for that timeout to finish running before doing the
free. timeout_del can stop a timeout from running in the future,
but it doesn't know if a timeout has finished being scheduled and
is now running.
previously you could know that timeouts are not running by simply
masking softclock interrupts on the cpu running the kernel. however,
code is now running outside the kernel lock, and timeouts can run
in a thread instead of softclock.
timeout_barrier solves the first problem by taking the kernel lock
and then masking softclock interrupts. that is enough to ensure
that any further timeout processing is waiting for those resources
to run again.
the second problem is solved by having timeout_barrier insert work
into the thread. when that work runs, that means all previous work
running in that thread has completed.
fixes and ok visa@, who thinks this will be useful for his work
too.
Diffstat (limited to 'sys/kern')
-rw-r--r-- | sys/kern/kern_timeout.c | 42 |
1 files changed, 41 insertions, 1 deletions
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index f3e2fe29c3d..00565e8a909 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_timeout.c,v 1.50 2016/10/03 11:54:29 dlg Exp $ */ +/* $OpenBSD: kern_timeout.c,v 1.51 2017/11/24 02:36:53 dlg Exp $ */ /* * Copyright (c) 2001 Thomas Nordin <nordin@openbsd.org> * Copyright (c) 2000-2001 Artur Grabowski <art@openbsd.org> @@ -324,6 +324,46 @@ timeout_del(struct timeout *to) return (ret); } +void timeout_proc_barrier(void *); + +void +timeout_barrier(struct timeout *to) +{ + if (!ISSET(to->to_flags, TIMEOUT_NEEDPROCCTX)) { + KERNEL_LOCK(); + splx(splsoftclock()); + KERNEL_UNLOCK(); + } else { + int wait = 1; + struct timeout barrier; + struct sleep_state sls; + + timeout_set_proc(&barrier, timeout_proc_barrier, &wait); + + mtx_enter(&timeout_mutex); + barrier.to_flags |= TIMEOUT_ONQUEUE; + CIRCQ_INSERT(&barrier.to_list, &timeout_proc); + mtx_leave(&timeout_mutex); + + wakeup_one(&timeout_proc); + + while (wait) { + sleep_setup(&sls, &wait, PSWP, "tmobar"); + sleep_finish(&sls, wait); + } + } +} + +void +timeout_proc_barrier(void *arg) +{ + int *wait = arg; + + *wait = 0; + + wakeup_one(wait); +} + /* * This is called from hardclock() once every tick. * We return !0 if we need to schedule a softclock. |