summaryrefslogtreecommitdiff
path: root/sys/net/bpf_filter.c
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2020-08-03 03:21:25 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2020-08-03 03:21:25 +0000
commit65d5e51cda1c03a5d1cac9d93f2894ccd1fbc805 (patch)
tree90e45fca700fd393e48e4ef7447921d899f40db0 /sys/net/bpf_filter.c
parent7405517931404a9d5d6fbbe1a7c4f1eb0adfe591 (diff)
add a BPF_RND load location that provides a random value.
this will be used so a bpf filter can make a decision based on a random number, which in turn will be used so a filter can perform random sampling of packets rather than capturing all packets. random sampling means that we don't have to figure out how to make bpf coordinate multiple concurrent calls to do counter based sampling. BPF_RND is currently backed with arc4random. discussed with many including jmatthew@, alex wilson, claudio@, sthen@, deraadt@, and tb@ ok kn@ tb@ jmatthew@ i call this extended bpf... xBPF.
Diffstat (limited to 'sys/net/bpf_filter.c')
-rw-r--r--sys/net/bpf_filter.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/sys/net/bpf_filter.c b/sys/net/bpf_filter.c
index e46bf0799ba..d6561ec5923 100644
--- a/sys/net/bpf_filter.c
+++ b/sys/net/bpf_filter.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bpf_filter.c,v 1.33 2017/09/08 05:36:53 deraadt Exp $ */
+/* $OpenBSD: bpf_filter.c,v 1.34 2020/08/03 03:21:24 dlg Exp $ */
/* $NetBSD: bpf_filter.c,v 1.12 1996/02/13 22:00:00 christos Exp $ */
/*
@@ -199,6 +199,10 @@ _bpf_filter(const struct bpf_insn *pc, const struct bpf_ops *ops,
X = wirelen;
continue;
+ case BPF_LD|BPF_W|BPF_RND:
+ A = arc4random();
+ continue;
+
case BPF_LD|BPF_W|BPF_IND:
k = X + pc->k;
A = ops->ldw(pkt, k, &err);
@@ -414,6 +418,7 @@ bpf_validate(struct bpf_insn *f, int len)
return 0;
break;
case BPF_LEN:
+ case BPF_RND:
break;
default:
return 0;