summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2023-03-09 05:56:59 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2023-03-09 05:56:59 +0000
commitc7067d12974d5b1bc55027de824c43c7026dc983 (patch)
treee9143729a0370d3abd5e570c9ad4a1ac8b04b61b
parent1844fbe0da7e4da94504c0d83ea763e02dfa22b4 (diff)
add a timeout between capturing a packet and making the buffer readable.
before this, there were three reasons that a bpf read will finish. the first is the obvious one: the bpf packet buffer in the kernel fills up. by default this is about 32k, so if you're only capturing a small packet packet every few seconds, it can take a long time for the buffer to fill up before you can read them. the second is if bpf has been configured to enable immediate mode with ioctl(BIOCIMMEDIATE). this means that when any packet is written into the bpf buffer, the buffer is immediately readable. this is fine if the packet rate is low, but if the packet rate is high you don't get the benefit of buffering many packets that bpf is supposed to provide. the third mechanism is if bpf has been configured with the BIOCSRTIMEOUT ioctl, which sets a maximum wait time on a bpf read. BIOCSRTIMEOUT means than a clock starts ticking down when a program (eg pflogd) reads from bpf. when the clock reaches zero then the read returns with whatever is in the bpf packet buffer. however, there could be nothing in the buffer, and the read will still complete. deraadt@ noticed this behaviour with pflogd. it wants packets logged by pf to end up on disk in a timely fashion, but it's fine with tolerating a bit of delay so it can take advantatage of buffering to amortise the cost of the reads per packet. it currently does this with BIOCSRTIMEOUT set to half a second, which means it's always waking up every half second even if there's nothing to log. this diff adds BIOCSWTIMEOUT, which specifies a timeout from when bpf first puts a packet in the capture buffer, and when the buffer becomes readable. by default this wait timeout is infinite, meaning the buffer has to be filled before it becomes readable. BIOCSWTIMEOUT can be set to enable the new functionality. BIOCIMMEDIATE is turned into a variation of BIOCSWTIMEOUT with the wait time set to 0, ie, wait 0 seconds between when a packet is written to the buffer and when the buffer becomes readable. combining BIOCSWTIMEOUT and BIOCIMMEDIATE simplifies the code a lot. for pflogd, this means if there are no packets to capture, pflogd won't wake up every half second to do nothing. however, when a packet is logged by pf, bpf will wait another half second to see if any more packets arrive (or the buffer fills up) before the read fires. discussed a lot with deraadt@ and sashan@ ok sashan@
-rw-r--r--sys/net/bpf.c119
-rw-r--r--sys/net/bpf.h5
-rw-r--r--sys/net/bpfdesc.h9
3 files changed, 123 insertions, 10 deletions
diff --git a/sys/net/bpf.c b/sys/net/bpf.c
index 3ac175e459a..b05c5177f6e 100644
--- a/sys/net/bpf.c
+++ b/sys/net/bpf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bpf.c,v 1.220 2023/02/10 14:34:17 visa Exp $ */
+/* $OpenBSD: bpf.c,v 1.221 2023/03/09 05:56:58 dlg Exp $ */
/* $NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $ */
/*
@@ -77,6 +77,10 @@
#define BPF_BUFSIZE 32768
+#define BPF_S_IDLE 0
+#define BPF_S_WAIT 1
+#define BPF_S_DONE 2
+
#define PRINET 26 /* interruptible */
/*
@@ -101,6 +105,7 @@ int bpf_setif(struct bpf_d *, struct ifreq *);
int bpfkqfilter(dev_t, struct knote *);
void bpf_wakeup(struct bpf_d *);
void bpf_wakeup_cb(void *);
+void bpf_wait_cb(void *);
int _bpf_mtap(caddr_t, const struct mbuf *, const struct mbuf *, u_int);
void bpf_catchpacket(struct bpf_d *, u_char *, size_t, size_t,
const struct bpf_hdr *);
@@ -392,11 +397,13 @@ bpfopen(dev_t dev, int flag, int mode, struct proc *p)
bd->bd_sig = SIGIO;
mtx_init(&bd->bd_mtx, IPL_NET);
task_set(&bd->bd_wake_task, bpf_wakeup_cb, bd);
+ timeout_set(&bd->bd_wait_tmo, bpf_wait_cb, bd);
smr_init(&bd->bd_smr);
sigio_init(&bd->bd_sigio);
klist_init_mutex(&bd->bd_klist, &bd->bd_mtx);
bd->bd_rtout = 0; /* no timeout by default */
+ bd->bd_wtout = INFSLP; /* wait for the buffer to fill by default */
refcnt_init(&bd->bd_refcnt);
LIST_INSERT_HEAD(&bpf_d_list, bd, bd_list);
@@ -435,6 +442,7 @@ bpfclose(dev_t dev, int flag, int mode, struct proc *p)
(d)->bd_hbuf = (d)->bd_sbuf; \
(d)->bd_hlen = (d)->bd_slen; \
(d)->bd_sbuf = (d)->bd_fbuf; \
+ (d)->bd_state = BPF_S_IDLE; \
(d)->bd_slen = 0; \
(d)->bd_fbuf = NULL;
@@ -492,7 +500,7 @@ bpfread(dev_t dev, struct uio *uio, int ioflag)
ROTATE_BUFFERS(d);
break;
}
- if (d->bd_immediate && d->bd_slen != 0) {
+ if (d->bd_state == BPF_S_DONE) {
/*
* A packet(s) either arrived since the previous
* read or arrived while we were asleep.
@@ -611,6 +619,21 @@ bpf_wakeup_cb(void *xd)
bpf_put(d);
}
+void
+bpf_wait_cb(void *xd)
+{
+ struct bpf_d *d = xd;
+
+ mtx_enter(&d->bd_mtx);
+ if (d->bd_state == BPF_S_WAIT) {
+ d->bd_state = BPF_S_DONE;
+ bpf_wakeup(d);
+ }
+ mtx_leave(&d->bd_mtx);
+
+ bpf_put(d);
+}
+
int
bpfwrite(dev_t dev, struct uio *uio, int ioflag)
{
@@ -674,17 +697,64 @@ bpf_resetd(struct bpf_d *d)
MUTEX_ASSERT_LOCKED(&d->bd_mtx);
KASSERT(d->bd_in_uiomove == 0);
+ if (timeout_del(&d->bd_wait_tmo))
+ bpf_put(d);
+
if (d->bd_hbuf != NULL) {
/* Free the hold buffer. */
d->bd_fbuf = d->bd_hbuf;
d->bd_hbuf = NULL;
}
+ d->bd_state = BPF_S_IDLE;
d->bd_slen = 0;
d->bd_hlen = 0;
d->bd_rcount = 0;
d->bd_dcount = 0;
}
+static int
+bpf_set_wtout(struct bpf_d *d, uint64_t wtout)
+{
+ mtx_enter(&d->bd_mtx);
+ d->bd_wtout = wtout;
+ mtx_leave(&d->bd_mtx);
+
+ return (0);
+}
+
+static int
+bpf_set_wtimeout(struct bpf_d *d, const struct timeval *tv)
+{
+ uint64_t nsec;
+
+ if (tv->tv_sec < 0 || !timerisvalid(tv))
+ return (EINVAL);
+
+ nsec = TIMEVAL_TO_NSEC(tv);
+ if (nsec > MAXTSLP)
+ return (EOVERFLOW);
+
+ return (bpf_set_wtout(d, nsec));
+}
+
+static int
+bpf_get_wtimeout(struct bpf_d *d, struct timeval *tv)
+{
+ uint64_t nsec;
+
+ mtx_enter(&d->bd_mtx);
+ nsec = d->bd_wtout;
+ mtx_leave(&d->bd_mtx);
+
+ if (nsec == INFSLP)
+ return (ENXIO);
+
+ memset(tv, 0, sizeof(*tv));
+ NSEC_TO_TIMEVAL(nsec, tv);
+
+ return (0);
+}
+
/*
* FIONREAD Check for read packet available.
* BIOCGBLEN Get buffer len [for read()].
@@ -698,6 +768,9 @@ bpf_resetd(struct bpf_d *d)
* BIOCSETIF Set interface.
* BIOCSRTIMEOUT Set read timeout.
* BIOCGRTIMEOUT Get read timeout.
+ * BIOCSWTIMEOUT Set wait timeout.
+ * BIOCGWTIMEOUT Get wait timeout.
+ * BIOCDWTIMEOUT Del wait timeout.
* BIOCGSTATS Get packet stats.
* BIOCIMMEDIATE Set immediate mode.
* BIOCVERSION Get filter language version.
@@ -720,6 +793,7 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
case BIOCGDLTLIST:
case BIOCGETIF:
case BIOCGRTIMEOUT:
+ case BIOCGWTIMEOUT:
case BIOCGSTATS:
case BIOCVERSION:
case BIOCGRSIG:
@@ -727,6 +801,8 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
case FIONREAD:
case BIOCLOCK:
case BIOCSRTIMEOUT:
+ case BIOCSWTIMEOUT:
+ case BIOCDWTIMEOUT:
case BIOCIMMEDIATE:
case TIOCGPGRP:
case BIOCGDIRFILT:
@@ -933,7 +1009,20 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
* Set immediate mode.
*/
case BIOCIMMEDIATE:
- d->bd_immediate = *(u_int *)addr;
+ error = bpf_set_wtout(d, *(int *)addr ? 0 : INFSLP);
+ break;
+
+ /*
+ * Wait timeout.
+ */
+ case BIOCSWTIMEOUT:
+ error = bpf_set_wtimeout(d, (const struct timeval *)addr);
+ break;
+ case BIOCGWTIMEOUT:
+ error = bpf_get_wtimeout(d, (struct timeval *)addr);
+ break;
+ case BIOCDWTIMEOUT:
+ error = bpf_set_wtout(d, INFSLP);
break;
case BIOCVERSION:
@@ -1193,7 +1282,7 @@ filt_bpfread(struct knote *kn, long hint)
MUTEX_ASSERT_LOCKED(&d->bd_mtx);
kn->kn_data = d->bd_hlen;
- if (d->bd_immediate)
+ if (d->bd_wtout == 0)
kn->kn_data += d->bd_slen;
return (kn->kn_data > 0);
@@ -1510,6 +1599,11 @@ bpf_catchpacket(struct bpf_d *d, u_char *pkt, size_t pktlen, size_t snaplen,
++d->bd_dcount;
return;
}
+
+ /* cancel pending wtime */
+ if (timeout_del(&d->bd_wait_tmo))
+ bpf_put(d);
+
ROTATE_BUFFERS(d);
do_wakeup = 1;
curlen = 0;
@@ -1530,12 +1624,27 @@ bpf_catchpacket(struct bpf_d *d, u_char *pkt, size_t pktlen, size_t snaplen,
bpf_mcopy(pkt, (u_char *)bh + hdrlen, bh->bh_caplen);
d->bd_slen = curlen + totlen;
- if (d->bd_immediate) {
+ switch (d->bd_wtout) {
+ case 0:
/*
* Immediate mode is set. A packet arrived so any
* reads should be woken up.
*/
+ if (d->bd_state == BPF_S_IDLE)
+ d->bd_state = BPF_S_DONE;
do_wakeup = 1;
+ break;
+ case INFSLP:
+ break;
+ default:
+ if (d->bd_state == BPF_S_IDLE) {
+ d->bd_state = BPF_S_WAIT;
+
+ bpf_get(d);
+ if (!timeout_add_nsec(&d->bd_wait_tmo, d->bd_wtout))
+ bpf_put(d);
+ }
+ break;
}
if (do_wakeup)
diff --git a/sys/net/bpf.h b/sys/net/bpf.h
index f5e0fa91e69..9ce4dcd63a4 100644
--- a/sys/net/bpf.h
+++ b/sys/net/bpf.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bpf.h,v 1.70 2020/08/03 03:21:24 dlg Exp $ */
+/* $OpenBSD: bpf.h,v 1.71 2023/03/09 05:56:58 dlg Exp $ */
/* $NetBSD: bpf.h,v 1.15 1996/12/13 07:57:33 mikel Exp $ */
/*
@@ -119,6 +119,9 @@ struct bpf_version {
#define BIOCGDLTLIST _IOWR('B',123, struct bpf_dltlist)
#define BIOCGDIRFILT _IOR('B',124, u_int)
#define BIOCSDIRFILT _IOW('B',125, u_int)
+#define BIOCSWTIMEOUT _IOW('B',126, struct timeval)
+#define BIOCGWTIMEOUT _IOR('B',126, struct timeval)
+#define BIOCDWTIMEOUT _IO('B',126)
/*
* Direction filters for BIOCSDIRFILT/BIOCGDIRFILT
diff --git a/sys/net/bpfdesc.h b/sys/net/bpfdesc.h
index 20a7fd0f0f0..76b75ff5a20 100644
--- a/sys/net/bpfdesc.h
+++ b/sys/net/bpfdesc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bpfdesc.h,v 1.47 2022/07/09 12:48:21 visa Exp $ */
+/* $OpenBSD: bpfdesc.h,v 1.48 2023/03/09 05:56:58 dlg Exp $ */
/* $NetBSD: bpfdesc.h,v 1.11 1995/09/27 18:30:42 thorpej Exp $ */
/*
@@ -79,6 +79,7 @@ struct bpf_d {
struct bpf_if *bd_bif; /* interface descriptor */
uint64_t bd_rtout; /* [m] Read timeout in nanoseconds */
+ uint64_t bd_wtout; /* [m] Wait time in nanoseconds */
u_long bd_nreaders; /* [m] # threads asleep in bpfread() */
struct bpf_program_smr
*bd_rfilter; /* read filter code */
@@ -88,8 +89,7 @@ struct bpf_d {
u_long bd_dcount; /* number of packets dropped */
u_char bd_promisc; /* true if listening promiscuously */
- u_char bd_state; /* idle, waiting, or timed out */
- u_char bd_immediate; /* true to return on packet arrival */
+ u_char bd_state; /* [m] idle, waiting, or timed out */
u_char bd_locked; /* true if descriptor is locked */
u_char bd_fildrop; /* true if filtered packets will be dropped */
u_char bd_dirfilt; /* direction filter */
@@ -103,7 +103,8 @@ struct bpf_d {
int bd_unit; /* logical unit number */
LIST_ENTRY(bpf_d) bd_list; /* descriptor list */
- struct task bd_wake_task; /* delay pgsigio() and selwakeup() */
+ struct task bd_wake_task; /* defer pgsigio() and selwakeup() */
+ struct timeout bd_wait_tmo; /* delay wakeup after catching pkt */
struct smr_entry
bd_smr;