summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCedric Berger <cedric@cvs.openbsd.org>2002-12-29 20:07:35 +0000
committerCedric Berger <cedric@cvs.openbsd.org>2002-12-29 20:07:35 +0000
commitb57ae452b4f7a90dee3b5e0e2fe313ad76a0b91f (patch)
tree8576540b508e1bbb4c59c9916cf050f93b863291
parent311b67e4b4a4804e1693c21aae59a1bea7eff88b (diff)
Add support for radix tables for source and destination of PF rules.
ok dhartmei@, mcbride@, henning@
-rw-r--r--share/man/man4/pf.4107
-rw-r--r--sys/conf/files3
-rw-r--r--sys/net/pf.c29
-rw-r--r--sys/net/pf_ioctl.c155
-rw-r--r--sys/net/pf_table.c1347
-rw-r--r--sys/net/pfvar.h153
6 files changed, 1781 insertions, 13 deletions
diff --git a/share/man/man4/pf.4 b/share/man/man4/pf.4
index b635f49e9ec..e62ddc10572 100644
--- a/share/man/man4/pf.4
+++ b/share/man/man4/pf.4
@@ -1,4 +1,4 @@
-.\" $OpenBSD: pf.4,v 1.24 2002/12/22 20:02:54 mcbride Exp $
+.\" $OpenBSD: pf.4,v 1.25 2002/12/29 20:07:34 cedric Exp $
.\"
.\" Copyright (C) 2001, Kjell Wooding. All rights reserved.
.\"
@@ -361,6 +361,111 @@ struct pfioc_limit {
};
.Ed
.It Dv DIOCGETLIMIT Fa "struct pfioc_limit"
+.It Dv DIOCRCLRTABLES Fa "struct pfioc_table"
+Clear all tables. All the IOCTLs that manipulate radix tables
+use the same structure described below.
+For
+.Dv DIOCRCLRTABLES, pfrio_ndel contains on exit the number
+of tables deleted.
+.Bd -literal
+struct pfioc_table {
+ struct pfr_table pfrio_table;
+ void *pfrio_buffer;
+ int pfrio_size;
+ int pfrio_size2;
+ int pfrio_nadd;
+ int pfrio_ndel;
+ int pfrio_nchange;
+ int pfrio_flags;
+};
+#define pfrio_exists pfrio_nadd
+#define pfrio_nzero pfrio_nadd
+.Ed
+.It Dv DIOCRADDTABLES Fa "struct pfioc_table"
+Creates one or more tables.
+On entry, pfrio_buffer[pfrio_size] contains a table of pfr_table structures.
+On exit, pfrio_nadd contains the number of tables effectively created.
+.It Dv DIOCRDELTABLES Fa "struct pfioc_table"
+Deletes one or more tables.
+On entry, pfrio_buffer[pfrio_size] contains a table of pfr_table structures.
+On exit, pfrio_nadd contains the number of tables effectively deleted.
+.It Dv DIOCRGETTABLES Fa "struct pfioc_table"
+Get the list of all tables.
+On entry, pfrio_buffer[pfrio_size] contains a valid writeable buffer for
+pfr_table structures.
+On exit, pfrio_size contains the number of tables written into the buffer.
+If the buffer is too small, the kernel does not store anything but just
+return the required buffer size, without error.
+.It Dv DIOCRGETTSTATS Fa "struct pfioc_table"
+Like
+.Dv DIOCRGETTABLES, but returns an array of pfr_tstats structures.
+.It Dv DIOCRCLRTSTATS Fa "struct pfioc_table"
+Clears the statistics of one or more tables.
+On entry, pfrio_buffer[pfrio_size] contains a table of pfr_table structures.
+On exit, pfrio_nzero contains the number of tables effectively cleared.
+.It Dv DIOCRCLRADDRS Fa "struct pfioc_table"
+Clear all addresses in a table.
+On entry, pfrio_table contains the table to clear.
+On exit, pfrio_ndel contains the number of addresses removed.
+.It Dv DIOCRADDADDRS Fa "struct pfioc_table"
+Add one or more addresses to a table.
+On entry, pfrio_table contain the table id and pfrio_buffer[pfrio_size]
+contains the list of pfr_addr structures to add.
+On exit, pfrio_nadd contains the number of addresses effectively added.
+.It Dv DIOCRDELTABLES Fa "struct pfioc_table"
+Delete one or more addresses from a table.
+On entry, pfrio_table contain the table id and pfrio_buffer[pfrio_size]
+contains the list of pfr_addr structures to delete.
+On exit, pfrio_ndel contains the number of addresses effectively deleted.
+.It Dv DIOCRSETTABLES Fa "struct pfioc_table"
+Replace the content of a table by a new address list.
+This is the most complicated command, which uses all the structure members.
+On entry, pfrio_table contain the table id and pfrio_buffer[pfrio_size]
+contains the new list of pfr_addr structures.
+In addition to that, if size2 is nonzero, pfrio_buffer[pfrio_size..pfrio_size2]
+must be a writeable buffer, into which the kernel can copy the addresses that
+have been deleted during the replace operation.
+On exit, pfrio_ndel, pfrio_nadd and pfrio_nchange contains the number of
+addresses deleted, added and changed by the kernel. if pfrio_size2 was set on
+entry, pfrio_size2 will point to the size of the buffer used, exactly like
+.Dv DIOCRGETADDRS.
+.It Dv DIOCRGETADDRS Fa "struct pfioc_table"
+Get all the addresses of a table.
+On entry, pfrio_table contain the table id and pfrio_buffer[pfrio_size]
+contains a valid writeable buffer for pfr_addr structures.
+On exit, pfrio_size contains the number of addresses written into the buffer.
+If the buffer was too small, the kernel does not store anything but just
+return the required buffer size, without returning an error.
+.It Dv DIOCRGETASTATS Fa "struct pfioc_table"
+Like
+.Dv DIOCRGETADDRS, but returns an array of pfr_astats structures.
+.It Dv DIOCRCLRASTATS Fa "struct pfioc_table"
+Clears the statistics of one or more addresses.
+On entry, pfrio_table contain the table id and pfrio_buffer[pfrio_size]
+contains a table of pfr_addr structures to clear.
+On exit, pfrio_nzero contains the number of addresses effectively cleared.
+.It Dv DIOCRTSTADDRS Fa "struct pfioc_table"
+Test if the given addresses match a table.
+On entry, pfrio_table contain the table id and pfrio_buffer[pfrio_size]
+contains a table of pfr_addr structures to test.
+On exit, the kernel update the pfr_addr table by setting the pfra_fback
+member appropriately.
+.It Dv DIOCRWRAPTABLE Fa "struct pfioc_table"
+Compute the SHA1 hash of a table and pack it into a pf_addr_wrap structure,
+along with a magic mask in the first word of the mask.
+On entry, pfrio_table contain the table id, and pfrio_buffer[pfrio_size]
+should contain a buffer large enough to contain one pf_addr_wrap structure.
+If the kernel should check if the table exists, then pfrio_exists must be
+set to a nonzero value.
+On exit, the kernel fill the pf_addr_wrap structure and set pfrio_exists
+if that flag was requested.
+.It Dv DIOCRUNWRTABLE Fa "struct pfioc_table"
+Do the opposite of
+.Dv DIOCRWRAPTABLE, and lookup a table from its hash value.
+On entry, pfrio_buffer[pfrio_size] should point to a pf_addr_wrap structure
+(a one-entry table).
+On exit, the kernel fills pfrio_table or returns ENOENT if it cannot find
+the matching table.
.El
.Sh EXAMPLES
The following example demonstrates how to use the DIOCNATLOOK command
diff --git a/sys/conf/files b/sys/conf/files
index b5dfa3d416e..d5bdaca371f 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,4 +1,4 @@
-# $OpenBSD: files,v 1.261 2002/12/18 00:15:21 henning Exp $
+# $OpenBSD: files,v 1.262 2002/12/29 20:07:34 cedric Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@@ -354,6 +354,7 @@ pseudo-device pf: ifnet
file net/pf.c pf needs-flag
file net/pf_norm.c pf
file net/pf_ioctl.c pf
+file net/pf_table.c pf
pseudo-device pflog: ifnet
file net/if_pflog.c pflog needs-flag
pseudo-device pfsync: ifnet
diff --git a/sys/net/pf.c b/sys/net/pf.c
index 7fd4179502e..e0c0a874187 100644
--- a/sys/net/pf.c
+++ b/sys/net/pf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf.c,v 1.287 2002/12/27 21:43:58 mcbride Exp $ */
+/* $OpenBSD: pf.c,v 1.288 2002/12/29 20:07:34 cedric Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -141,6 +141,7 @@ int *pftm_timeouts[PFTM_MAX] = { &pftm_tcp_first_packet,
struct pool pf_tree_pl, pf_rule_pl, pf_addr_pl;
struct pool pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
+struct pool pfr_ktable_pl, pfr_kentry_pl;
void pf_addrcpy(struct pf_addr *, struct pf_addr *,
sa_family_t);
@@ -738,10 +739,8 @@ pf_calc_skip_steps(struct pf_rulequeue *rules)
if (cur->src.addr.addr_dyn != NULL ||
prev->src.addr.addr_dyn != NULL ||
cur->src.not != prev->src.not ||
- !PF_AEQ(&cur->src.addr.addr, &prev->src.addr.addr,
- cur->af) ||
- !PF_AEQ(&cur->src.addr.mask, &prev->src.addr.mask,
- cur->af))
+ !PF_AEQ(&cur->src.addr.addr, &prev->src.addr.addr, 0) ||
+ !PF_AEQ(&cur->src.addr.mask, &prev->src.addr.mask, 0))
PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
if (cur->src.port[0] != prev->src.port[0] ||
cur->src.port[1] != prev->src.port[1] ||
@@ -750,10 +749,8 @@ pf_calc_skip_steps(struct pf_rulequeue *rules)
if (cur->dst.addr.addr_dyn != NULL ||
prev->dst.addr.addr_dyn != NULL ||
cur->dst.not != prev->dst.not ||
- !PF_AEQ(&cur->dst.addr.addr, &prev->dst.addr.addr,
- cur->af) ||
- !PF_AEQ(&cur->dst.addr.mask, &prev->dst.addr.mask,
- cur->af))
+ !PF_AEQ(&cur->dst.addr.addr, &prev->dst.addr.addr, 0) ||
+ !PF_AEQ(&cur->dst.addr.mask, &prev->dst.addr.mask, 0))
PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
if (cur->dst.port[0] != prev->dst.port[0] ||
cur->dst.port[1] != prev->dst.port[1] ||
@@ -1156,6 +1153,9 @@ pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
{
int match = 0;
+ if (m->addr32[0] == PF_TABLE_MASK)
+ return (pfr_match_addr(a, m, b, af) != n);
+
switch (af) {
#ifdef INET
case AF_INET:
@@ -4191,6 +4191,17 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0)
}
done:
+ if (r != NULL && r->src.addr.mask.addr32[0] == PF_TABLE_MASK)
+ pfr_update_stats(&r->src.addr.addr, &r->src.addr.mask,
+ (r->direction == dir) ? pd.src : pd.dst,
+ pd.af, pd.tot_len, dir == PF_OUT,
+ r->action == PF_PASS, r->src.not);
+ if (r != NULL && r->dst.addr.mask.addr32[0] == PF_TABLE_MASK)
+ pfr_update_stats(&r->dst.addr.addr, &r->dst.addr.mask,
+ (r->direction == dir) ? pd.dst : pd.src,
+ pd.af, pd.tot_len, dir == PF_OUT,
+ r->action == PF_PASS, r->dst.not);
+
if (action != PF_DROP && h->ip_hl > 5 &&
!((s && s->allow_opts) || (r && r->allow_opts))) {
action = PF_DROP;
diff --git a/sys/net/pf_ioctl.c b/sys/net/pf_ioctl.c
index b9a236121c7..99ea25fd041 100644
--- a/sys/net/pf_ioctl.c
+++ b/sys/net/pf_ioctl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_ioctl.c,v 1.33 2002/12/27 21:45:14 mcbride Exp $ */
+/* $OpenBSD: pf_ioctl.c,v 1.34 2002/12/29 20:07:34 cedric Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -104,6 +104,10 @@ pfattach(int num)
NULL);
pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
"pfpooladdrpl", NULL);
+ pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0,
+ "pfr_ktable", NULL);
+ pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0,
+ "pfr_kentry", NULL);
TAILQ_INIT(&pf_anchors);
pf_init_ruleset(&pf_main_ruleset);
@@ -430,6 +434,19 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
case DIOCGETANCHOR:
case DIOCGETRULESETS:
case DIOCGETRULESET:
+ case DIOCRGETTABLES:
+ case DIOCRGETTSTATS:
+ case DIOCRCLRTSTATS:
+ case DIOCRCLRADDRS:
+ case DIOCRADDADDRS:
+ case DIOCRDELADDRS:
+ case DIOCRSETADDRS:
+ case DIOCRGETADDRS:
+ case DIOCRGETASTATS:
+ case DIOCRCLRASTATS:
+ case DIOCRTSTADDRS:
+ case DIOCRWRAPTABLE:
+ case DIOCRUNWRTABLE:
break;
default:
return (EPERM);
@@ -453,6 +470,13 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
case DIOCGETANCHOR:
case DIOCGETRULESETS:
case DIOCGETRULESET:
+ case DIOCRGETTABLES:
+ case DIOCRGETTSTATS:
+ case DIOCRGETADDRS:
+ case DIOCRGETASTATS:
+ case DIOCRTSTADDRS:
+ case DIOCRWRAPTABLE:
+ case DIOCRUNWRTABLE:
break;
default:
return (EACCES);
@@ -1672,6 +1696,135 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
break;
}
+ case DIOCRCLRTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_clr_tables(&io->pfrio_ndel, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRADDTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
+ &io->pfrio_nadd, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRDELTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
+ &io->pfrio_ndel, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRGETTABLES: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_get_tables(io->pfrio_buffer, &io->pfrio_size,
+ io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRGETTSTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_get_tstats(io->pfrio_buffer, &io->pfrio_size,
+ io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRCLRTSTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
+ &io->pfrio_nzero, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRCLRADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
+ io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRADDADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRDELADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRSETADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
+ &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRGETADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
+ &io->pfrio_size, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRGETASTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
+ &io->pfrio_size, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRCLRASTATS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRTSTADDRS: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_size, io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRWRAPTABLE: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_wrap_table(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_exists ? &io->pfrio_exists : NULL,
+ io->pfrio_flags);
+ break;
+ }
+
+ case DIOCRUNWRTABLE: {
+ struct pfioc_table *io = (struct pfioc_table *)addr;
+
+ error = pfr_unwrap_table(&io->pfrio_table, io->pfrio_buffer,
+ io->pfrio_flags);
+ break;
+ }
+
default:
error = ENODEV;
break;
diff --git a/sys/net/pf_table.c b/sys/net/pf_table.c
new file mode 100644
index 00000000000..ab0d1fea21c
--- /dev/null
+++ b/sys/net/pf_table.c
@@ -0,0 +1,1347 @@
+/* $OpenBSD: pf_table.c,v 1.1 2002/12/29 20:07:34 cedric Exp $ */
+
+/*
+ * Copyright (c) 2002 Cedric Berger
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/socket.h>
+#include <sys/mbuf.h>
+#include <sys/kernel.h>
+
+#include <net/if.h>
+#include <net/route.h>
+#include <netinet/in.h>
+#include <netinet/ip_ipsp.h>
+#include <net/pfvar.h>
+
+#include <crypto/sha1.h>
+
+#define ACCEPT_FLAGS(oklist) \
+ do { \
+ if ((flags & ~(oklist)) & \
+ PFR_FLAG_ALLMASK) \
+ return (EINVAL); \
+ } while(0)
+
+#define FILLIN_SIN(sin, addr) \
+ do { \
+ (sin).sin_len = sizeof(sin); \
+ (sin).sin_family = AF_INET; \
+ (sin).sin_addr = (addr); \
+ } while (0)
+
+#define FILLIN_SIN6(sin6, addr) \
+ do { \
+ (sin6).sin6_len = sizeof(sin6); \
+ (sin6).sin6_family = AF_INET6; \
+ (sin6).sin6_addr = (addr); \
+ } while (0)
+
+#define AF_BITS(af) (((af)==AF_INET)?32:128)
+#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
+#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
+
+struct pfr_walktree {
+ enum pfrw_op {
+ PFRW_MARK,
+ PFRW_SWEEP,
+ PFRW_ENQUEUE,
+ PFRW_GET_ADDRS,
+ PFRW_GET_ASTATS
+ } pfrw_op;
+ union {
+ struct pfr_addr *pfrw1_addr;
+ struct pfr_astats *pfrw1_astats;
+ struct pfr_kentryworkq *pfrw1_workq;
+ } pfrw_1;
+ int pfrw_free;
+};
+#define pfrw_addr pfrw_1.pfrw1_addr
+#define pfrw_astats pfrw_1.pfrw1_astats
+#define pfrw_workq pfrw_1.pfrw1_workq
+#define pfrw_cnt pfrw_free
+
+#define PFR_HASH_BUCKETS 1024
+#define PFR_HASH_BUCKET(hash) ((hash).pfrh_int32[1] & (PFR_HASH_BUCKETS-1))
+
+#define senderr(e) do { rv = (e); goto _bad; } while(0)
+
+struct pool pfr_ktable_pl;
+struct pool pfr_kentry_pl;
+struct sockaddr_in pfr_sin = { sizeof(pfr_sin), AF_INET };
+struct sockaddr_in6 pfr_sin6 = { sizeof(pfr_sin6), AF_INET6 };
+
+int pfr_validate_addr(struct pfr_addr *);
+int pfr_enqueue_addrs(struct pfr_ktable *,
+ struct pfr_kentryworkq *, int *);
+struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
+ struct pfr_addr *, int);
+struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, long);
+void pfr_destroy_kentry(struct pfr_kentry *);
+void pfr_destroy_kentries(struct pfr_kentryworkq *);
+int pfr_insert_kentries(struct pfr_ktable *,
+ struct pfr_kentryworkq *);
+void pfr_remove_kentries(struct pfr_ktable *,
+ struct pfr_kentryworkq *);
+void pfr_clstats_kentries(struct pfr_kentryworkq *, long);
+void pfr_reset_feedback(struct pfr_addr *, int);
+void pfr_prepare_network(union sockaddr_union *, int, int);
+int pfr_route_kentry(struct pfr_ktable *,
+ struct pfr_kentry *);
+int pfr_unroute_kentry(struct pfr_ktable *,
+ struct pfr_kentry *);
+void pfr_copyout_addr(struct pfr_addr *,
+ struct pfr_kentry *);
+int pfr_walktree(struct radix_node *, void *);
+void pfr_insert_ktables(struct pfr_ktableworkq *);
+void pfr_remove_ktables(struct pfr_ktableworkq *);
+void pfr_clstats_ktables(struct pfr_ktableworkq *, long,
+ int);
+struct pfr_ktable *pfr_create_ktable(struct pfr_table *, long);
+void pfr_destroy_ktable(struct pfr_ktable *);
+void pfr_destroy_ktables(struct pfr_ktableworkq *);
+int pfr_ktable_compare(struct pfr_ktable *,
+ struct pfr_ktable *);
+struct pfr_ktable *pfr_lookup_hash(union pfr_hash *);
+struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
+int pfr_match_addr(struct pf_addr *, struct pf_addr *,
+ struct pf_addr *, sa_family_t);
+void pfr_update_stats(struct pf_addr *, struct pf_addr *,
+ struct pf_addr *, sa_family_t, u_int64_t, int, int,
+ int);
+
+RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
+RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
+
+struct pfr_ktablehashq pfr_ktablehash[PFR_HASH_BUCKETS];
+struct pfr_ktablehead pfr_ktables;
+int pfr_ktable_cnt;
+
+
+int
+pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ int s, rv;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+ rv = pfr_enqueue_addrs(kt, &workq, ndel);
+ if (rv)
+ return rv;
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_remove_kentries(kt, &workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ if (kt->pfrkt_cnt) {
+ printf("pfr_clr_addrs: corruption detected.");
+ kt->pfrkt_cnt = 0;
+ }
+ }
+ return (0);
+}
+
+int
+pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *nadd, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, rv, s, xadd = 0;
+ long tzero = time.tv_sec;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (copyin(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EFAULT);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (flags & PFR_FLAG_FEEDBACK) {
+ ad.pfra_fback = (p == NULL) ?
+ PFR_FB_ADDED : PFR_FB_NONE;
+ if (copyout(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ if (p == NULL) {
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ p = pfr_create_kentry(&ad, tzero);
+ if (p == NULL)
+ senderr(ENOMEM);
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ }
+ xadd++;
+ } else if (p->pfrke_not != ad.pfra_not)
+ senderr(EEXIST);
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ if (pfr_insert_kentries(kt, &workq)) {
+ splx(s);
+ senderr(ENOMEM);
+ }
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nadd != NULL)
+ *nadd = xadd;
+ return (0);
+_bad:
+ pfr_destroy_kentries(&workq);
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size);
+ return (rv);
+}
+
+int
+pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *ndel, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, rv, s, xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (copyin(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (flags & PFR_FLAG_FEEDBACK) {
+ ad.pfra_fback = (p != NULL) ?
+ PFR_FB_DELETED : PFR_FB_NONE;
+ if (copyout(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ if (p != NULL) {
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ xdel++;
+ }
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_remove_kentries(kt, &workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+_bad:
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size);
+ return (rv);
+}
+
+int
+pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *size2, int *nadd, int *ndel, int *nchange, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq addq, delq, changeq;
+ struct pfr_walktree w;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, rv, s, xadd = 0, xdel = 0, xchange = 0;
+ long tzero = time.tv_sec;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_MARK;
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+ if (!rv)
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+ if (rv)
+ return (rv);
+
+ SLIST_INIT(&addq);
+ SLIST_INIT(&delq);
+ SLIST_INIT(&changeq);
+ for (i = 0; i < size; i++) {
+ if (copyin(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (p != NULL) {
+ p->pfrke_mark = 1;
+ if (p->pfrke_not != ad.pfra_not) {
+ SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
+ ad.pfra_fback = PFR_FB_CHANGED;
+ xchange++;
+ } else
+ ad.pfra_fback = PFR_FB_NONE;
+ } else {
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ p = pfr_create_kentry(&ad, tzero);
+ if (p == NULL)
+ senderr(ENOMEM);
+ SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
+ }
+ ad.pfra_fback = PFR_FB_ADDED;
+ xadd++;
+ }
+ if (flags & PFR_FLAG_FEEDBACK)
+ if (copyout(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ w.pfrw_op = PFRW_SWEEP;
+ w.pfrw_workq = &delq;
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+ if (!rv)
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+ if (rv)
+ senderr(rv);
+ xdel = w.pfrw_cnt;
+ if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
+ if (*size2 < size+xdel) {
+ *size2 = size+xdel;
+ senderr(0);
+ }
+ i = 0;
+ SLIST_FOREACH(p, &delq, pfrke_workq) {
+ pfr_copyout_addr(&ad, p);
+ ad.pfra_fback = PFR_FB_DELETED;
+ if (copyout(&ad, addr+size+i, sizeof(ad)))
+ senderr(EFAULT);
+ i++;
+ }
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ if (pfr_insert_kentries(kt, &addq)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ senderr(ENOMEM);
+ }
+ pfr_remove_kentries(kt, &delq);
+ SLIST_FOREACH(p, &changeq, pfrke_workq)
+ p->pfrke_not ^= 1;
+ pfr_clstats_kentries(&changeq, time.tv_sec);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nadd != NULL)
+ *nadd = xadd;
+ if (ndel != NULL)
+ *ndel = xdel;
+ if (nchange != NULL)
+ *nchange = xchange;
+ if ((flags & PFR_FLAG_FEEDBACK) && *size2)
+ *size2 = size+xdel;
+ return (0);
+_bad:
+ pfr_destroy_kentries(&addq);
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size);
+ return (rv);
+}
+
+int
+pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i;
+
+ ACCEPT_FLAGS(0);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+
+ for (i = 0; i < size; i++) {
+ if (copyin(addr+i, &ad, sizeof(ad)))
+ return (EFAULT);
+ if (pfr_validate_addr(&ad))
+ return (EINVAL);
+ if (ADDR_NETWORK(&ad))
+ return (EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 0);
+ ad.pfra_fback = (p != NULL && !p->pfrke_not) ?
+ PFR_FB_MATCH : PFR_FB_NONE;
+ if (copyout(&ad, addr+i, sizeof(ad)))
+ return (EFAULT);
+ }
+ return (0);
+}
+
+int
+pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
+ int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_walktree w;
+ int rv;
+
+ ACCEPT_FLAGS(0);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+ if (kt->pfrkt_cnt > *size) {
+ *size = kt->pfrkt_cnt;
+ return (0);
+ }
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_GET_ADDRS;
+ w.pfrw_addr = addr;
+ w.pfrw_free = kt->pfrkt_cnt;
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+ if (!rv)
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+ if (rv)
+ return (rv);
+
+ if (w.pfrw_free) {
+ printf("pfr_get_addrs: corruption detected.");
+ return (ENOTTY);
+ }
+ *size = kt->pfrkt_cnt;
+ return (0);
+}
+
+int
+pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
+ int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_walktree w;
+ struct pfr_kentryworkq workq;
+ int rv, s;
+ long tzero = time.tv_sec;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_CLSTATS);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+ if (kt->pfrkt_cnt > *size) {
+ *size = kt->pfrkt_cnt;
+ return (0);
+ }
+
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_GET_ASTATS;
+ w.pfrw_astats = addr;
+ w.pfrw_free = kt->pfrkt_cnt;
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+ if (!rv)
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+ if (!rv && (flags & PFR_FLAG_CLSTATS)) {
+ rv = pfr_enqueue_addrs(kt, &workq, NULL);
+ if (rv)
+ return rv;
+ pfr_clstats_kentries(&workq, tzero);
+ }
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ if (rv)
+ return (rv);
+
+ if (w.pfrw_free) {
+ printf("pfr_get_astats: corruption detected.");
+ return (ENOTTY);
+ }
+ *size = kt->pfrkt_cnt;
+ return (0);
+}
+
+int
+pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
+ int *nzero, int flags)
+{
+ struct pfr_ktable *kt;
+ struct pfr_kentryworkq workq;
+ struct pfr_kentry *p;
+ struct pfr_addr ad;
+ int i, rv, s, xzero = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY+PFR_FLAG_FEEDBACK);
+ kt = pfr_lookup_table(tbl);
+ if (kt == NULL)
+ return (ESRCH);
+ SLIST_INIT(&workq);
+ for (i = 0; i < size; i++) {
+ if (copyin(addr+i, &ad, sizeof(ad)))
+ senderr(EFAULT);
+ if (pfr_validate_addr(&ad))
+ senderr(EINVAL);
+ p = pfr_lookup_addr(kt, &ad, 1);
+ if (flags & PFR_FLAG_FEEDBACK) {
+ ad.pfra_fback = (p != NULL) ?
+ PFR_FB_CLEARED : PFR_FB_NONE;
+ if (copyout(&ad, addr+i, sizeof(ad)))
+ senderr(EFAULT);
+ }
+ if (p != NULL) {
+ SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
+ xzero++;
+ }
+ }
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_clstats_kentries(&workq, 0);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nzero != NULL)
+ *nzero = xzero;
+ return (0);
+_bad:
+ if (flags & PFR_FLAG_FEEDBACK)
+ pfr_reset_feedback(addr, size);
+ return (rv);
+}
+
+
+int
+pfr_validate_addr(struct pfr_addr *ad)
+{
+ switch (ad->pfra_af) {
+ case AF_INET:
+ if (ad->pfra_af > 32)
+ return (-1);
+ return (0);
+ case AF_INET6:
+ if (ad->pfra_af > 128)
+ return (-1);
+ return (0);
+ default:
+ return (-1);
+ }
+}
+
+int
+pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
+ int *naddr)
+{
+ struct pfr_walktree w;
+ int rv;
+
+ SLIST_INIT(workq);
+ bzero(&w, sizeof(w));
+ w.pfrw_op = PFRW_ENQUEUE;
+ w.pfrw_workq = workq;
+ rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
+ if (rv)
+ return (rv);
+ rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
+ if (rv)
+ return (rv);
+ if (naddr != NULL)
+ *naddr = w.pfrw_cnt;
+ return (0);
+}
+
+struct pfr_kentry *
+pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
+{
+ union sockaddr_union sa, mask;
+ struct radix_node_head *head;
+ struct pfr_kentry *ke;
+
+ bzero(&sa, sizeof(sa));
+ if (ad->pfra_af == AF_INET) {
+ FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
+ head = kt->pfrkt_ip4;
+ } else {
+ FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
+ head = kt->pfrkt_ip6;
+ }
+ if (ADDR_NETWORK(ad)) {
+ pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
+ ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
+ } else {
+ ke = (struct pfr_kentry *)rn_match(&sa, head);
+ if (exact && ke && KENTRY_NETWORK(ke))
+ ke = NULL;
+ }
+ return (ke);
+}
+
+struct pfr_kentry *
+pfr_create_kentry(struct pfr_addr *ad, long tzero)
+{
+ struct pfr_kentry *ke;
+
+ ke = pool_get(&pfr_kentry_pl, PR_NOWAIT);
+ if (ke == NULL)
+ return (NULL);
+ bzero(ke, sizeof(*ke));
+
+ if (ad->pfra_af == AF_INET)
+ FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
+ else
+ FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
+ ke->pfrke_af = ad->pfra_af;
+ ke->pfrke_net = ad->pfra_net;
+ ke->pfrke_not = ad->pfra_not;
+ ke->pfrke_tzero = tzero;
+ return (ke);
+}
+
+void
+pfr_destroy_kentry(struct pfr_kentry *ke)
+{
+ if (ke == NULL)
+ return;
+ pool_put(&pfr_kentry_pl, ke);
+}
+
+void
+pfr_destroy_kentries(struct pfr_kentryworkq *workq)
+{
+ struct pfr_kentry *p, *q;
+ for (p = SLIST_FIRST(workq); p != NULL; p = q) {
+ q = SLIST_NEXT(p, pfrke_workq);
+ pfr_destroy_kentry(p);
+ }
+}
+
+int
+pfr_insert_kentries(struct pfr_ktable *kt,
+ struct pfr_kentryworkq *workq)
+{
+ struct pfr_kentry *p, *q;
+ int n = 0;
+
+ SLIST_FOREACH(p, workq, pfrke_workq) {
+ if (pfr_route_kentry(kt, p)) {
+ /* bad luck - no memory for netmask */
+ SLIST_FOREACH(q, workq, pfrke_workq) {
+ if (q == p)
+ break;
+ pfr_unroute_kentry(kt, q);
+ }
+ return (-1);
+ }
+ n++;
+ }
+ kt->pfrkt_cnt += n;
+ return (0);
+}
+
+void
+pfr_remove_kentries(struct pfr_ktable *kt,
+ struct pfr_kentryworkq *workq)
+{
+ struct pfr_kentry *p;
+ int n = 0;
+
+ SLIST_FOREACH(p, workq, pfrke_workq) {
+ pfr_unroute_kentry(kt, p);
+ n++;
+ }
+ kt->pfrkt_cnt -= n;
+ pfr_destroy_kentries(workq);
+}
+
+void
+pfr_clstats_kentries(struct pfr_kentryworkq *workq, long tzero)
+{
+ struct pfr_kentry *p;
+ int s, n = 0;
+
+ SLIST_FOREACH(p, workq, pfrke_workq) {
+ s = splsoftnet();
+ bzero(p->pfrke_packets, sizeof(p->pfrke_packets));
+ bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes));
+ splx(s);
+ p->pfrke_tzero = tzero;
+ n++;
+ }
+}
+
+void
+pfr_reset_feedback(struct pfr_addr *addr, int size)
+{
+ struct pfr_addr ad;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (copyin(addr+i, &ad, sizeof(ad)))
+ break;
+ ad.pfra_fback = PFR_FB_NONE;
+ if (copyout(&ad, addr+i, sizeof(ad)))
+ break;
+ }
+}
+
+void
+pfr_prepare_network(union sockaddr_union *sa, int af, int net)
+{
+ int i;
+
+ bzero(sa, sizeof(*sa));
+ if (af == AF_INET) {
+ sa->sin.sin_len = sizeof(sa->sin);
+ sa->sin.sin_family = AF_INET;
+ sa->sin.sin_addr.s_addr = htonl(-1 << (32-net));
+ } else {
+ sa->sin6.sin6_len = sizeof(sa->sin6);
+ sa->sin6.sin6_family = AF_INET;
+ for (i = 0; i < 4; i++) {
+ if (net <= 32) {
+ sa->sin6.sin6_addr.s6_addr32[i] =
+ htonl(-1 << (32-net));
+ break;
+ }
+ sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
+ net -= 32;
+ }
+ }
+}
+
+int
+pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
+{
+ union sockaddr_union mask;
+ struct radix_node *rn;
+ struct radix_node_head *head;
+ int s;
+
+ if (ke->pfrke_af == AF_INET)
+ head = kt->pfrkt_ip4;
+ else
+ head = kt->pfrkt_ip6;
+
+ s = splsoftnet();
+ if (KENTRY_NETWORK(ke)) {
+ pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
+ rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node);
+ } else
+ rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node);
+ splx(s);
+
+ if (rn == NULL) {
+ printf("pfr_route_kentry: no memory for mask\n");
+ return (-1);
+ }
+ return (0);
+}
+
+int
+pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
+{
+ union sockaddr_union mask;
+ struct radix_node *rn;
+ struct radix_node_head *head;
+ int s;
+
+ if (ke->pfrke_af == AF_INET)
+ head = kt->pfrkt_ip4;
+ else
+ head = kt->pfrkt_ip6;
+
+ s = splsoftnet();
+ if (KENTRY_NETWORK(ke)) {
+ pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
+ rn = rn_delete(&ke->pfrke_sa, &mask, head);
+ } else
+ rn = rn_delete(&ke->pfrke_sa, NULL, head);
+ splx(s);
+
+ if (rn == NULL) {
+ printf("pfr_unroute_kentry: delete failed\n");
+ return (-1);
+ }
+ return (0);
+}
+
+void
+pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
+{
+ bzero(ad, sizeof(*ad));
+ ad->pfra_af = ke->pfrke_af;
+ ad->pfra_net = ke->pfrke_net;
+ ad->pfra_not = ke->pfrke_not;
+ if (ad->pfra_af == AF_INET)
+ ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
+ else
+ ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
+}
+
+int
+pfr_walktree(struct radix_node *rn, void *arg)
+{
+ struct pfr_kentry *ke = (struct pfr_kentry *)rn;
+ struct pfr_walktree *w = arg;
+ int s;
+
+ switch (w->pfrw_op) {
+ case PFRW_MARK:
+ ke->pfrke_mark = 0;
+ break;
+ case PFRW_SWEEP:
+ if (ke->pfrke_mark)
+ break;
+ /* fall trough */
+ case PFRW_ENQUEUE:
+ SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
+ w->pfrw_cnt++;
+ break;
+ case PFRW_GET_ADDRS:
+ if (w->pfrw_free-- > 0) {
+ struct pfr_addr ad;
+
+ pfr_copyout_addr(&ad, ke);
+ if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
+ return (EFAULT);
+ w->pfrw_addr++;
+ }
+ break;
+ case PFRW_GET_ASTATS:
+ if (w->pfrw_free-- > 0) {
+ struct pfr_astats as;
+
+ pfr_copyout_addr(&as.pfras_a, ke);
+
+ s = splsoftnet();
+ bcopy(ke->pfrke_packets, as.pfras_packets,
+ sizeof(as.pfras_packets));
+ bcopy(ke->pfrke_bytes, as.pfras_bytes,
+ sizeof(as.pfras_bytes));
+ splx(s);
+
+ if (copyout(&as, w->pfrw_astats, sizeof(as)))
+ return (EFAULT);
+ w->pfrw_astats++;
+ }
+ break;
+ }
+ return (0);
+}
+
+
+int
+pfr_clr_tables(int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p;
+ int s, xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ SLIST_INIT(&workq);
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables)
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_remove_ktables(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p, key;
+ int i, s, xadd = 0;
+ long tzero = time.tv_sec;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ SLIST_INIT(&workq);
+ for(i = 0; i < size; i++) {
+ if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t))) {
+ pfr_destroy_ktables(&workq);
+ return (EFAULT);
+ }
+ if (key.pfrkt_name[PF_TABLE_NAME_SIZE-1])
+ return (EINVAL);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p == NULL) {
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ p = pfr_create_ktable(&key.pfrkt_t, tzero);
+ if (p == NULL) {
+ pfr_destroy_ktables(&workq);
+ return (ENOMEM);
+ }
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ /* TODO: move the following out of the if */
+ if (pfr_lookup_hash(&p->pfrkt_hash)) {
+ printf(
+ "pfr_add_tables: sha collision\n");
+ pfr_destroy_ktables(&workq);
+ return (EEXIST);
+ }
+ }
+ xadd++;
+ }
+ }
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_insert_ktables(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nadd != NULL)
+ *nadd = xadd;
+ return (0);
+}
+
+int
+pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p, key;
+ int i, s, xdel = 0;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_DUMMY);
+ SLIST_INIT(&workq);
+ for(i = 0; i < size; i++) {
+ if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
+ return (EFAULT);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p != NULL) {
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xdel++;
+ }
+ }
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_remove_ktables(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (ndel != NULL)
+ *ndel = xdel;
+ return (0);
+}
+
+int
+pfr_get_tables(struct pfr_table *tbl, int *size, int flags)
+{
+ struct pfr_ktable *p;
+ int n = pfr_ktable_cnt;
+
+ ACCEPT_FLAGS(0);
+ if (n > *size) {
+ *size = n;
+ return (0);
+ }
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (n-- <= 0)
+ continue;
+ if (copyout(&p->pfrkt_t, tbl++, sizeof(*tbl)))
+ return (EFAULT);
+ }
+ if (n) {
+ printf("pfr_get_tables: corruption detected.");
+ return (ENOTTY);
+ }
+ *size = pfr_ktable_cnt;
+ return (0);
+}
+
+int
+pfr_get_tstats(struct pfr_tstats *tbl, int *size, int flags)
+{
+ struct pfr_ktable *p;
+ struct pfr_ktableworkq workq;
+ int s, n = pfr_ktable_cnt;
+ long tzero = time.tv_sec;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_CLSTATS+PFR_FLAG_RECURSE);
+ if (n > *size) {
+ *size = n;
+ return (0);
+ }
+ SLIST_INIT(&workq);
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
+ if (n-- <= 0)
+ continue;
+ if (!(flags & PFR_FLAG_ATOMIC))
+ s = splsoftnet();
+ if (copyout(&p->pfrkt_ts, tbl++, sizeof(*tbl))) {
+ splx(s);
+ return (EFAULT);
+ }
+ if (!(flags & PFR_FLAG_ATOMIC))
+ splx(s);
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ }
+ if (flags & PFR_FLAG_CLSTATS)
+ pfr_clstats_ktables(&workq, tzero,
+ flags & PFR_FLAG_RECURSE);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ if (n) {
+ printf("pfr_get_tstats: corruption detected.");
+ return (ENOTTY);
+ }
+ *size = pfr_ktable_cnt;
+ return (0);
+}
+
+int
+pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
+{
+ struct pfr_ktableworkq workq;
+ struct pfr_ktable *p, key;
+ int i, s, xzero = 0;
+ long tzero = time.tv_sec;
+
+ ACCEPT_FLAGS(PFR_FLAG_ATOMIC+PFR_FLAG_CLSTATS+PFR_FLAG_RECURSE);
+ SLIST_INIT(&workq);
+ for(i = 0; i < size; i++) {
+ if (copyin(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t)))
+ return (EFAULT);
+ p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
+ if (p != NULL) {
+ SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
+ xzero++;
+ }
+ }
+
+ if (!(flags & PFR_FLAG_DUMMY)) {
+ if (flags & PFR_FLAG_ATOMIC)
+ s = splsoftnet();
+ pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_RECURSE);
+ if (flags & PFR_FLAG_ATOMIC)
+ splx(s);
+ }
+ if (nzero != NULL)
+ *nzero = xzero;
+ return (0);
+}
+
+int
+pfr_wrap_table(struct pfr_table *tbl, struct pf_addr_wrap *wrap,
+ int *exists, int flags)
+{
+ union pfr_hash hash;
+ struct pf_addr_wrap w;
+ SHA1_CTX sha1;
+
+ ACCEPT_FLAGS(0);
+ if (!*tbl->pfrt_name || tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
+ return (EINVAL);
+ SHA1Init(&sha1);
+ SHA1Update(&sha1, tbl->pfrt_name, strlen(tbl->pfrt_name));
+ SHA1Final(hash.pfrh_sha1, &sha1);
+
+ bzero(&w, sizeof(w));
+ bcopy(&hash, &w.addr, sizeof(w.addr));
+ w.mask.addr32[0] = PF_TABLE_MASK;
+ w.mask.addr32[1] = hash.pfrh_int32[4];
+ if (copyout(&w, wrap, sizeof(*wrap)))
+ return (EFAULT);
+
+ if (exists != NULL)
+ *exists = pfr_lookup_table(tbl) != NULL;
+ return (0);
+}
+
+int
+pfr_unwrap_table(struct pfr_table *tbl, struct pf_addr_wrap *wrap, int flags)
+{
+ union pfr_hash hash;
+ struct pf_addr_wrap w;
+ struct pfr_ktable *kt;
+
+ ACCEPT_FLAGS(0);
+ if (copyin(wrap, &w, sizeof(w)))
+ return (EFAULT);
+
+ if (w.mask.addr32[0] != PF_TABLE_MASK || w.mask.addr32[2] ||
+ w.mask.addr32[3])
+ return (EINVAL);
+
+ bcopy(&w.addr, &hash, 16);
+ hash.pfrh_int32[4] = w.mask.addr32[1];
+ kt = pfr_lookup_hash(&hash);
+ if (kt == NULL)
+ return (ENOENT);
+ *tbl = kt->pfrkt_t;
+ return (0);
+}
+
+void
+pfr_insert_ktables(struct pfr_ktableworkq *workq)
+{
+ struct pfr_ktable *p;
+ int s, n = 0;
+
+ /* insert into tree */
+ SLIST_FOREACH(p, workq, pfrkt_workq) {
+ RB_INSERT(pfr_ktablehead, &pfr_ktables, p);
+ n++;
+ }
+ pfr_ktable_cnt += n;
+
+ SLIST_FOREACH(p, workq, pfrkt_workq) {
+ s = splsoftnet();
+ SLIST_INSERT_HEAD(pfr_ktablehash +
+ PFR_HASH_BUCKET(p->pfrkt_hash),
+ p, pfrkt_hashq);
+ splx(s);
+ }
+}
+
+void
+pfr_remove_ktables(struct pfr_ktableworkq *workq)
+{
+ struct pfr_kentryworkq addrq;
+ struct pfr_ktable *p;
+ int s, n = 0;
+
+ SLIST_FOREACH(p, workq, pfrkt_workq) {
+ s = splsoftnet();
+ SLIST_REMOVE(pfr_ktablehash + PFR_HASH_BUCKET(p->pfrkt_hash),
+ p, pfr_ktable, pfrkt_hashq);
+ splx(s);
+ }
+
+ SLIST_FOREACH(p, workq, pfrkt_workq) {
+ RB_REMOVE(pfr_ktablehead, &pfr_ktables, p);
+ if (pfr_enqueue_addrs(p, &addrq, NULL))
+ printf("pfr_remove_ktables: enqueue failed");
+ pfr_destroy_kentries(&addrq);
+ n++;
+ }
+ pfr_ktable_cnt -= n;
+ pfr_destroy_ktables(workq);
+}
+
+void
+pfr_clstats_ktables(struct pfr_ktableworkq *workq, long tzero, int recurse)
+{
+ struct pfr_kentryworkq addrq;
+ struct pfr_ktable *p;
+ int s;
+
+ SLIST_FOREACH(p, workq, pfrkt_workq) {
+ if (recurse) {
+ if (pfr_enqueue_addrs(p, &addrq, NULL))
+ printf("pfr_clr_tstats: enqueue failed");
+ pfr_clstats_kentries(&addrq, tzero);
+ }
+ s = splsoftnet();
+ bzero(p->pfrkt_packets, sizeof(p->pfrkt_packets));
+ bzero(p->pfrkt_bytes, sizeof(p->pfrkt_bytes));
+ p->pfrkt_match = p->pfrkt_nomatch = 0;
+ splx(s);
+ p->pfrkt_tzero = tzero;
+ }
+}
+
+struct pfr_ktable *
+pfr_create_ktable(struct pfr_table *tbl, long tzero)
+{
+ struct pfr_ktable *kt;
+ SHA1_CTX sha1;
+
+ kt = pool_get(&pfr_ktable_pl, PR_NOWAIT);
+ if (kt == NULL)
+ return (NULL);
+ bzero(kt, sizeof(*kt));
+ kt->pfrkt_t = *tbl;
+
+ /* compute secure hash */
+ SHA1Init(&sha1);
+ SHA1Update(&sha1, kt->pfrkt_name, strlen(kt->pfrkt_name));
+ SHA1Final(kt->pfrkt_hash.pfrh_sha1, &sha1);
+
+ if (!rn_inithead((void **)&kt->pfrkt_ip4,
+ 8 * offsetof(struct sockaddr_in, sin_addr)) ||
+ !rn_inithead((void **)&kt->pfrkt_ip6,
+ 8 * offsetof(struct sockaddr_in6, sin6_addr))) {
+ pfr_destroy_ktable(kt);
+ return (NULL);
+ }
+ kt->pfrkt_tzero = tzero;
+
+ return (kt);
+}
+
+void
+pfr_destroy_ktable(struct pfr_ktable *kt)
+{
+ if (kt == NULL)
+ return;
+ if (kt->pfrkt_ip4 != NULL)
+ free((caddr_t)kt->pfrkt_ip4, M_RTABLE);
+ if (kt->pfrkt_ip6 != NULL)
+ free((caddr_t)kt->pfrkt_ip6, M_RTABLE);
+ pool_put(&pfr_ktable_pl, kt);
+}
+
+void
+pfr_destroy_ktables(struct pfr_ktableworkq *workq)
+{
+ struct pfr_ktable *p, *q;
+ for(p = SLIST_FIRST(workq); p; p = q) {
+ q = SLIST_NEXT(p, pfrkt_workq);
+ pfr_destroy_ktable(p);
+ }
+}
+
+int
+pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
+{
+ return (strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE));
+}
+
+struct pfr_ktable *
+pfr_lookup_hash(union pfr_hash *hash)
+{
+ struct pfr_ktable *p;
+
+ SLIST_FOREACH(p, pfr_ktablehash+PFR_HASH_BUCKET(*hash), pfrkt_hashq)
+ if (!memcmp(p->pfrkt_hash.pfrh_sha1, hash->pfrh_sha1, 20))
+ return (p);
+ return (NULL);
+}
+
+struct pfr_ktable *
+pfr_lookup_table(struct pfr_table *tbl)
+{
+ /* struct pfr_ktable start like a struct pfr_table */
+ return RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
+}
+
+
+/*
+ * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
+ * If n is 0, they match if they are equal. If n is != 0, they match if they
+ * are different.
+ */
+int
+pfr_match_addr(struct pf_addr *a, struct pf_addr *m,
+ struct pf_addr *b, sa_family_t af)
+{
+ union pfr_hash hash;
+ struct pfr_ktable *kt;
+ struct pfr_kentry *ke = NULL;
+ int match;
+
+ bcopy(a, &hash, 16);
+ hash.pfrh_int32[4] = m->addr32[1];
+ kt = pfr_lookup_hash(&hash);
+ if (kt == NULL)
+ return (0);
+ switch (af) {
+ case AF_INET:
+ pfr_sin.sin_addr.s_addr = b->addr32[0];
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
+ break;
+ case AF_INET6:
+ bcopy(&b, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
+ break;
+ }
+ match = (ke && !ke->pfrke_not);
+ if (match)
+ kt->pfrkt_match++;
+ else
+ kt->pfrkt_nomatch++;
+ return (match);
+}
+
+void
+pfr_update_stats(struct pf_addr *a, struct pf_addr *m,
+ struct pf_addr *b, sa_family_t af, u_int64_t len,
+ int dir_out, int op_pass, int notrule)
+{
+ union pfr_hash hash;
+ struct pfr_ktable *kt;
+ struct pfr_kentry *ke = NULL;
+
+ bcopy(a, &hash, 16);
+ hash.pfrh_int32[4] = m->addr32[1];
+ kt = pfr_lookup_hash(&hash);
+ if (kt == NULL)
+ return;
+
+ switch (af) {
+ case AF_INET:
+ pfr_sin.sin_addr.s_addr = b->addr32[0];
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4);
+ break;
+ case AF_INET6:
+ bcopy(&b, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr));
+ ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6);
+ break;
+ }
+ if (ke == NULL || ke->pfrke_not != notrule) {
+ if (op_pass != PFR_OP_PASS)
+ printf("pfr_update_stats: assertion failed.");
+ op_pass = PFR_OP_XPASS;
+ }
+ kt->pfrkt_packets[dir_out][op_pass]++;
+ kt->pfrkt_bytes[dir_out][op_pass] += len;
+ if (op_pass != PFR_OP_XPASS) {
+ ke->pfrke_packets[dir_out][op_pass]++;
+ ke->pfrke_bytes[dir_out][op_pass] += len;
+ }
+}
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index 98718d0acb3..5973ad20ff9 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pfvar.h,v 1.118 2002/12/23 13:15:18 mcbride Exp $ */
+/* $OpenBSD: pfvar.h,v 1.119 2002/12/29 20:07:34 cedric Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -37,6 +37,9 @@
#include <sys/queue.h>
#include <sys/tree.h>
+#include <net/radix.h>
+#include <netinet/ip_ipsp.h>
+
enum { PF_IN=1, PF_OUT=2 };
enum { PF_PASS=0, PF_DROP=1, PF_SCRUB=2, PF_NAT=3, PF_NONAT=4,
PF_BINAT=5, PF_NOBINAT=6, PF_RDR=7, PF_NORDR=8 };
@@ -436,6 +439,91 @@ struct pf_anchor {
TAILQ_HEAD(pf_anchorqueue, pf_anchor);
+#define PF_TABLE_MASK 0xCAFEBABE
+#define PF_TABLE_NAME_SIZE 128
+
+struct pfr_table {
+ char pfrt_name[PF_TABLE_NAME_SIZE];
+};
+
+enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED,
+ PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_MAX };
+
+struct pfr_addr {
+ union {
+ struct in_addr _pfra_ip4addr;
+ struct in6_addr _pfra_ip6addr;
+ } pfra_u;
+ u_int8_t pfra_af;
+ u_int8_t pfra_net;
+ u_int8_t pfra_not;
+ u_int8_t pfra_fback;
+};
+#define pfra_ip4addr pfra_u._pfra_ip4addr
+#define pfra_ip6addr pfra_u._pfra_ip6addr
+
+enum { PFR_DIR_IN, PFR_DIR_OUT, PFR_DIR_MAX };
+enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX };
+#define PFR_OP_XPASS PFR_OP_ADDR_MAX
+
+struct pfr_astats {
+ struct pfr_addr pfras_a;
+ u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ long pfras_tzero;
+};
+
+struct pfr_tstats {
+ struct pfr_table pfrts_t;
+ u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX];
+ u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX];
+ u_int64_t pfrts_match;
+ u_int64_t pfrts_nomatch;
+ long pfrts_tzero;
+ int pfrts_cnt;
+};
+#define pfrts_name pfrts_t.pfrt_name
+
+union pfr_hash {
+ char pfrh_sha1[20];
+ u_int32_t pfrh_int32[5];
+};
+
+SLIST_HEAD(pfr_kentryworkq, pfr_kentry);
+struct pfr_kentry {
+ struct radix_node pfrke_node[2];
+ union sockaddr_union pfrke_sa;
+ u_int64_t pfrke_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ u_int64_t pfrke_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX];
+ SLIST_ENTRY(pfr_kentry) pfrke_workq;
+ long pfrke_tzero;
+ u_int8_t pfrke_af;
+ u_int8_t pfrke_net;
+ u_int8_t pfrke_not;
+ u_int8_t pfrke_mark;
+};
+
+SLIST_HEAD(pfr_ktablehashq, pfr_ktable);
+SLIST_HEAD(pfr_ktableworkq, pfr_ktable);
+RB_HEAD(pfr_ktablehead, pfr_ktable);
+struct pfr_ktable {
+ struct pfr_tstats pfrkt_ts;
+ union pfr_hash pfrkt_hash;
+ RB_ENTRY(pfr_ktable) pfrkt_tree;
+ SLIST_ENTRY(pfr_ktable) pfrkt_hashq;
+ SLIST_ENTRY(pfr_ktable) pfrkt_workq;
+ struct radix_node_head *pfrkt_ip4;
+ struct radix_node_head *pfrkt_ip6;
+};
+#define pfrkt_t pfrkt_ts.pfrts_t
+#define pfrkt_name pfrkt_t.pfrt_name
+#define pfrkt_cnt pfrkt_ts.pfrts_cnt
+#define pfrkt_packets pfrkt_ts.pfrts_packets
+#define pfrkt_bytes pfrkt_ts.pfrts_bytes
+#define pfrkt_match pfrkt_ts.pfrts_match
+#define pfrkt_nomatch pfrkt_ts.pfrts_nomatch
+#define pfrkt_tzero pfrkt_ts.pfrts_tzero
+
struct pf_pdesc {
u_int64_t tot_len; /* Make Mickey money */
union {
@@ -710,6 +798,27 @@ struct pfioc_ruleset {
char name[PF_RULESET_NAME_SIZE];
};
+#define PFR_FLAG_ATOMIC 0x00000001
+#define PFR_FLAG_DUMMY 0x00000002
+#define PFR_FLAG_FEEDBACK 0x00000004
+#define PFR_FLAG_CLSTATS 0x00000008
+#define PFR_FLAG_RECURSE 0x00000010
+#define PFR_FLAG_ALLMASK 0x0000001F
+
+struct pfioc_table {
+ struct pfr_table pfrio_table;
+ void *pfrio_buffer;
+ int pfrio_size;
+ int pfrio_size2;
+ int pfrio_nadd;
+ int pfrio_ndel;
+ int pfrio_nchange;
+ int pfrio_flags;
+};
+#define pfrio_exists pfrio_nadd
+#define pfrio_nzero pfrio_nadd
+#define pfrio_name pfrio_table.pfrt_name
+
/*
* ioctl operations
@@ -758,6 +867,22 @@ struct pfioc_ruleset {
#define DIOCGETANCHOR _IOWR('D', 57, struct pfioc_anchor)
#define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset)
#define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset)
+#define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table)
+#define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table)
+#define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table)
+#define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table)
+#define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table)
+#define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table)
+#define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table)
+#define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table)
+#define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table)
+#define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table)
+#define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table)
+#define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table)
+#define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table)
+#define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table)
+#define DIOCRWRAPTABLE _IOWR('D', 74, struct pfioc_table)
+#define DIOCRUNWRTABLE _IOWR('D', 75, struct pfioc_table)
#ifdef _KERNEL
@@ -789,6 +914,7 @@ extern void pf_update_anchor_rules(void);
extern void pf_dynaddr_copyout(struct pf_addr_wrap *);
extern struct pool pf_tree_pl, pf_rule_pl, pf_addr_pl;
extern struct pool pf_state_pl, pf_altq_pl, pf_pooladdr_pl;
+extern struct pool pfr_ktable_pl, pfr_kentry_pl;
extern void pf_purge_timeout(void *);
extern int pftm_interval;
extern void pf_purge_expired_states(void);
@@ -822,6 +948,31 @@ void pf_normalize_init(void);
int pf_normalize_ip(struct mbuf **, int, struct ifnet *, u_short *);
void pf_purge_expired_fragments(void);
int pf_routable(struct pf_addr *addr, sa_family_t af);
+int pfr_match_addr(struct pf_addr *, struct pf_addr *,
+ struct pf_addr *, sa_family_t);
+void pfr_update_stats(struct pf_addr *, struct pf_addr *,
+ struct pf_addr *, sa_family_t, u_int64_t, int, int, int);
+int pfr_clr_tables(int *, int);
+int pfr_add_tables(struct pfr_table *, int, int *, int);
+int pfr_del_tables(struct pfr_table *, int, int *, int);
+int pfr_get_tables(struct pfr_table *, int *, int);
+int pfr_get_tstats(struct pfr_tstats *, int *, int);
+int pfr_clr_tstats(struct pfr_table *, int, int *, int);
+int pfr_clr_addrs(struct pfr_table *, int *, int);
+int pfr_add_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_del_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_set_addrs(struct pfr_table *, struct pfr_addr *, int, int *,
+ int *, int *, int *, int);
+int pfr_get_addrs(struct pfr_table *, struct pfr_addr *, int *, int);
+int pfr_get_astats(struct pfr_table *, struct pfr_astats *, int *, int);
+int pfr_clr_astats(struct pfr_table *, struct pfr_addr *, int, int *,
+ int);
+int pfr_tst_addrs(struct pfr_table *, struct pfr_addr *, int, int);
+int pfr_wrap_table(struct pfr_table *, struct pf_addr_wrap *, int *,
+ int);
+int pfr_unwrap_table(struct pfr_table *, struct pf_addr_wrap *, int);
extern struct pf_status pf_status;
extern struct pool pf_frent_pl, pf_frag_pl;