diff options
author | David Gwynne <dlg@cvs.openbsd.org> | 2022-01-16 06:27:15 +0000 |
---|---|---|
committer | David Gwynne <dlg@cvs.openbsd.org> | 2022-01-16 06:27:15 +0000 |
commit | 4b756a35865ae5d275cb9cdaf9dc8140ad407994 (patch) | |
tree | a8fa1f55dea777c74975386077d1eb5f5181e90b | |
parent | 17fb9272bb2e6e8b6aca5d0212eb91d2cbf111dd (diff) |
activate/notify waiting kq kevents from bpf_wakeup directly.
this builds on the mpsafe kq/kevent work visa has been doing.
normally kevents are notified by calling selwakeup, but selwakeup
needs the KERNEL_LOCK. because bpf runs from all sorts of contexts
that may or may not have the kernel lock, the call to selwakeup is
deferred to the systq which already has the kernel lock. while this
avoids spinning in bpf for the kernel lock, it still adds latency
between when the buffer is ready for a program and when that program
gets notified about it. now that bpf kevents are mpsafe and bpf_wakeup
is already holding the necessary locks, we can avoid that latency.
bpf_wakeup now checks if there are waiting kevents and notifies
them immediately. if there are no other things to wake up, bpf_wakeup
avoids the task_add (and associated reference counting) to defer
the selwakeup call.
selwakeup can still try to notify waiting kevents, so this uses the
hint passed to knote() to differentiate between the notification
from bpf_wakeup and selwakeup and returns early from the latter.
ok visa@
-rw-r--r-- | sys/net/bpf.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/sys/net/bpf.c b/sys/net/bpf.c index 31892af3415..ee2708249af 100644 --- a/sys/net/bpf.c +++ b/sys/net/bpf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bpf.c,v 1.209 2022/01/13 14:15:27 visa Exp $ */ +/* $OpenBSD: bpf.c,v 1.210 2022/01/16 06:27:14 dlg Exp $ */ /* $NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $ */ /* @@ -566,18 +566,23 @@ out: void bpf_wakeup(struct bpf_d *d) { + struct klist *klist; + MUTEX_ASSERT_LOCKED(&d->bd_mtx); if (d->bd_nreaders) wakeup(d); + klist = &d->bd_sel.si_note; + if (!klist_empty(klist)) + knote(klist, 0); + /* * As long as pgsigio() and selwakeup() need to be protected * by the KERNEL_LOCK() we have to delay the wakeup to * another context to keep the hot path KERNEL_LOCK()-free. */ - if ((d->bd_async && d->bd_sig) || - (!klist_empty(&d->bd_sel.si_note) || d->bd_sel.si_seltid != 0)) { + if ((d->bd_async && d->bd_sig) || d->bd_sel.si_seltid != 0) { bpf_get(d); if (!task_add(systq, &d->bd_wake_task)) bpf_put(d); @@ -1229,6 +1234,9 @@ filt_bpfread(struct knote *kn, long hint) { struct bpf_d *d = kn->kn_hook; + if (hint == NOTE_SUBMIT) /* ignore activation from selwakeup */ + return (0); + return (filt_bpfread_common(kn, d)); } |