summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorHenning Brauer <henning@cvs.openbsd.org>2009-04-06 12:05:56 +0000
committerHenning Brauer <henning@cvs.openbsd.org>2009-04-06 12:05:56 +0000
commit63f618ffc13737b6d73b157c8b7921c7b0e4be29 (patch)
tree0a1338ce99c5274fd3ecdcef7b8e017b3df9e461 /sys
parent4b3aad969b68381a5f8dc7beb977b479929205ec (diff)
1) scrub rules are completely gone.
2) packet reassembly: only one method remains, full reassembly. crop and drop-ovl are gone. . set reassemble yes|no [no-df] if no-df is given fragments (and only fragments!) with the df bit set have it cleared before entering the fragment cache, and thus the reassembled packet doesn't have df set either. it does NOT touch non-fragmented packets. 3) regular rules can have scrub options. . pass scrub(no-df, min-ttl 64, max-mss 1400, set-tos lowdelay) . match scrub(reassemble tcp, random-id) of course all options are optional. the individual options still do what they used to do on scrub rules, but everything is stateful now. 4) match rules "match" is a new action, just like pass and block are, and can be used like they do. opposed to pass or block, they do NOT change the pass/block state of a packet. i. e. . pass . match passes the packet, and . block . match blocks it. Every time (!) a match rule matches, i. e. not only when it is the last matching rule, the following actions are set: -queue assignment. can be overwritten later, the last rule that set a queue wins. note how this is different from the last matching rule wins, if the last matching rule has no queue assignments and the second last matching rule was a match rule with queue assignments, these assignments are taken. -rtable assignments. works the same as queue assignments. -set-tos, min-ttl, max-mss, no-df, random-id, reassemble tcp, all work like the above -logging. every matching rule causes the packet to be logged. this means a single packet can get logged more than once (think multiple log interfaces with different receivers, like pflogd and spamlogd) . almost entirely hacked at n2k9 in basel, could not be committed close to release. this really should have been multiple diffs, but splitting them now is not feasible any more. input from mcbride and dlg, and frantzen about the fragment handling. speedup around 7% for the common case, the more the more scrub rules were in use. manpage not up to date, being worked on.
Diffstat (limited to 'sys')
-rw-r--r--sys/net/pf.c279
-rw-r--r--sys/net/pf_ioctl.c17
-rw-r--r--sys/net/pf_norm.c573
-rw-r--r--sys/net/pf_ruleset.c7
-rw-r--r--sys/net/pfvar.h77
5 files changed, 310 insertions, 643 deletions
diff --git a/sys/net/pf.c b/sys/net/pf.c
index 67e0445ef95..81c3ca3eab1 100644
--- a/sys/net/pf.c
+++ b/sys/net/pf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf.c,v 1.639 2009/03/15 19:40:41 miod Exp $ */
+/* $OpenBSD: pf.c,v 1.640 2009/04/06 12:05:55 henning Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -127,7 +127,7 @@ struct pf_anchor_stackframe {
struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
-struct pool pf_altq_pl;
+struct pool pf_altq_pl, pf_rule_item_pl;
void pf_init_threshold(struct pf_threshold *, u_int32_t,
u_int32_t);
@@ -153,12 +153,15 @@ void pf_send_tcp(const struct pf_rule *, sa_family_t,
const struct pf_addr *, const struct pf_addr *,
u_int16_t, u_int16_t, u_int32_t, u_int32_t,
u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
- u_int16_t, struct ether_header *, struct ifnet *);
+ u_int16_t, struct ether_header *, struct ifnet *,
+ u_int);
void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
- sa_family_t, struct pf_rule *);
+ sa_family_t, struct pf_rule *, u_int);
void pf_detach_state(struct pf_state *);
void pf_state_key_detach(struct pf_state *, int);
u_int32_t pf_tcp_iss(struct pf_pdesc *);
+void pf_rule_to_actions(struct pf_rule *,
+ struct pf_rule_actions *);
int pf_test_rule(struct pf_rule **, struct pf_state **,
int, struct pfi_kif *, struct mbuf *, int,
void *, struct pf_pdesc *, struct pf_rule **,
@@ -170,7 +173,8 @@ static __inline int pf_create_state(struct pf_rule *, struct pf_rule *,
struct pf_state_key *, struct mbuf *, int,
u_int16_t, u_int16_t, int *, struct pfi_kif *,
struct pf_state **, int, u_int16_t, u_int16_t,
- int);
+ int, struct pf_rule_slist *,
+ struct pf_rule_actions *);
int pf_test_fragment(struct pf_rule **, int,
struct pfi_kif *, struct mbuf *, void *,
struct pf_pdesc *, struct pf_rule **,
@@ -258,27 +262,35 @@ enum { PF_ICMP_MULTI_NONE, PF_ICMP_MULTI_SOLICITED, PF_ICMP_MULTI_LINK };
#define BOUND_IFACE(r, k) \
((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
-#define STATE_INC_COUNTERS(s) \
- do { \
- s->rule.ptr->states_cur++; \
- s->rule.ptr->states_tot++; \
- if (s->anchor.ptr != NULL) { \
- s->anchor.ptr->states_cur++; \
- s->anchor.ptr->states_tot++; \
- } \
- if (s->nat_rule.ptr != NULL) { \
- s->nat_rule.ptr->states_cur++; \
- s->nat_rule.ptr->states_tot++; \
- } \
+#define STATE_INC_COUNTERS(s) \
+ do { \
+ struct pf_rule_item *mrm; \
+ s->rule.ptr->states_cur++; \
+ s->rule.ptr->states_tot++; \
+ if (s->anchor.ptr != NULL) { \
+ s->anchor.ptr->states_cur++; \
+ s->anchor.ptr->states_tot++; \
+ } \
+ if (s->nat_rule.ptr != NULL) { \
+ s->nat_rule.ptr->states_cur++; \
+ s->nat_rule.ptr->states_tot++; \
+ } \
+ SLIST_FOREACH(mrm, &s->match_rules, entry) { \
+ mrm->r->states_cur++; \
+ mrm->r->states_tot++; \
+ } \
} while (0)
-#define STATE_DEC_COUNTERS(s) \
- do { \
- if (s->nat_rule.ptr != NULL) \
- s->nat_rule.ptr->states_cur--; \
- if (s->anchor.ptr != NULL) \
- s->anchor.ptr->states_cur--; \
- s->rule.ptr->states_cur--; \
+#define STATE_DEC_COUNTERS(s) \
+ do { \
+ struct pf_rule_item *mrm; \
+ if (s->nat_rule.ptr != NULL) \
+ s->nat_rule.ptr->states_cur--; \
+ if (s->anchor.ptr != NULL) \
+ s->anchor.ptr->states_cur--; \
+ s->rule.ptr->states_cur--; \
+ SLIST_FOREACH(mrm, &s->match_rules, entry) \
+ mrm->r->states_cur--; \
} while (0)
static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
@@ -1091,7 +1103,8 @@ pf_unlink_state(struct pf_state *cur)
cur->key[PF_SK_WIRE]->port[1],
cur->key[PF_SK_WIRE]->port[0],
cur->src.seqhi, cur->src.seqlo + 1,
- TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
+ TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL,
+ cur->rtableid);
}
RB_REMOVE(pf_state_tree_id, &tree_id, cur);
#if NPFLOW > 0
@@ -1111,6 +1124,8 @@ pf_unlink_state(struct pf_state *cur)
void
pf_free_state(struct pf_state *cur)
{
+ struct pf_rule_item *ri;
+
splsoftassert(IPL_SOFTNET);
#if NPFSYNC > 0
@@ -1128,6 +1143,13 @@ pf_free_state(struct pf_state *cur)
if (cur->anchor.ptr != NULL)
if (--cur->anchor.ptr->states_cur <= 0)
pf_rm_rule(NULL, cur->anchor.ptr);
+ while ((ri = SLIST_FIRST(&cur->match_rules))) {
+ SLIST_REMOVE_HEAD(&cur->match_rules, entry);
+ if (--ri->r->states_cur <= 0 &&
+ ri->r->src_nodes <= 0)
+ pf_rm_rule(NULL, ri->r);
+ pool_put(&pf_rule_item_pl, ri);
+ }
pf_normalize_tcp_cleanup(cur);
pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
TAILQ_REMOVE(&state_list, cur, entry_list);
@@ -1856,7 +1878,7 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af,
const struct pf_addr *saddr, const struct pf_addr *daddr,
u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
- u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
+ u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp, u_int rtableid)
{
struct mbuf *m;
int len, tlen;
@@ -1895,8 +1917,8 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af,
m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
m->m_pkthdr.pf.tag = rtag;
- if (r != NULL && r->rtableid >= 0)
- m->m_pkthdr.pf.rtableid = r->rtableid;
+ if (rtableid >= 0)
+ m->m_pkthdr.pf.rtableid = rtableid;
#ifdef ALTQ
if (r != NULL && r->qid) {
@@ -2010,7 +2032,7 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af,
void
pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
- struct pf_rule *r)
+ struct pf_rule *r, u_int rtableid)
{
struct mbuf *m0;
@@ -2019,8 +2041,8 @@ pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
- if (r->rtableid >= 0)
- m0->m_pkthdr.pf.rtableid = r->rtableid;
+ if (rtableid >= 0)
+ m0->m_pkthdr.pf.rtableid = rtableid;
#ifdef ALTQ
if (r->qid) {
@@ -2595,24 +2617,47 @@ pf_tcp_iss(struct pf_pdesc *pd)
return (digest[0] + tcp_iss + pf_tcp_iss_off);
}
+void
+pf_rule_to_actions(struct pf_rule *r, struct pf_rule_actions *a)
+{
+ if (r->qid)
+ a->qid = r->qid;
+ if (r->pqid)
+ a->pqid = r->pqid;
+ if (r->rtableid >= 0)
+ a->rtableid = r->rtableid;
+ a->log |= r->log;
+ if (r->scrub_flags & PFSTATE_SETTOS)
+ a->set_tos = r->set_tos;
+ if (r->min_ttl)
+ a->min_ttl = r->min_ttl;
+ if (r->max_mss)
+ a->max_mss = r->max_mss;
+ a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
+ PFSTATE_SETTOS|PFSTATE_SCRUB_TCP));
+}
+
int
pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
struct pfi_kif *kif, struct mbuf *m, int off, void *h,
struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
struct ifqueue *ifq)
{
- struct pf_rule *nr = NULL;
+ struct pf_rule *nr = NULL, *lastr = NULL;
struct pf_addr *saddr = pd->src, *daddr = pd->dst;
sa_family_t af = pd->af;
struct pf_rule *r, *a = NULL;
struct pf_ruleset *ruleset = NULL;
+ struct pf_rule_slist rules;
+ struct pf_rule_item *ri;
struct pf_src_node *nsn = NULL;
struct tcphdr *th = pd->hdr.tcp;
struct pf_state_key *skw = NULL, *sks = NULL;
struct pf_state_key *sk = NULL, *nk = NULL;
+ struct pf_rule_actions act;
u_short reason;
int rewrite = 0, hdrlen = 0;
- int tag = -1, rtableid = -1;
+ int tag = -1;
int asd = 0;
int match = 0;
int state_icmp = 0, icmp_dir, multi;
@@ -2620,6 +2665,8 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
u_int16_t bproto_sum = 0, bip_sum;
u_int8_t icmptype = 0, icmpcode = 0;
+ bzero(&act, sizeof(act));
+ act.rtableid = -1;
if (direction == PF_IN && pf_check_congestion(ifq)) {
REASON_SET(&reason, PFRES_CONGEST);
@@ -2809,6 +2856,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
pd->nat_rule = nr;
}
+ SLIST_INIT(&rules);
while (r != NULL) {
r->evaluations++;
if (pfi_kif_match(r->kif, kif) == r->ifnot)
@@ -2869,15 +2917,24 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
r->os_fingerprint)))
r = TAILQ_NEXT(r, entries);
else {
+ lastr = r;
if (r->tag)
tag = r->tag;
- if (r->rtableid >= 0)
- rtableid = r->rtableid;
if (r->anchor == NULL) {
- match = 1;
- *rm = r;
- *am = a;
- *rsm = ruleset;
+ if (r->action == PF_MATCH) {
+ ri = pool_get(&pf_rule_item_pl,
+ PR_NOWAIT);
+ ri->r = r;
+ /* order is irrelevant */
+ SLIST_INSERT_HEAD(&rules, ri, entry);
+ pf_rule_to_actions(r, &act);
+ } else {
+ match = 1;
+ *rm = r;
+ *am = a;
+ *rsm = ruleset;
+ }
+
if ((*rm)->quick)
break;
r = TAILQ_NEXT(r, entries);
@@ -2893,13 +2950,24 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
a = *am;
ruleset = *rsm;
+ /* apply actions for last matching rule */
+ if (lastr && lastr->action != PF_MATCH)
+ pf_rule_to_actions(lastr, &act);
+
REASON_SET(&reason, PFRES_MATCH);
- if (r->log || (nr != NULL && nr->log)) {
+ if (act.log || (nr != NULL && nr->log)) {
+ struct pf_rule_item *mr;
+
if (rewrite)
m_copyback(m, off, hdrlen, pd->hdr.any);
- PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
- a, ruleset, pd);
+ if (r->log)
+ PFLOG_PACKET(kif, h, m, af, direction, reason,
+ r->log ? r : nr, a, ruleset, pd);
+ SLIST_FOREACH(mr, &rules, entry)
+ if (mr->r->log)
+ PFLOG_PACKET(kif, h, m, af, direction, reason,
+ mr->r, a, ruleset, pd);
}
if ((r->action == PF_DROP) &&
@@ -2950,32 +3018,32 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
pf_send_tcp(r, af, pd->dst,
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
- r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
+ r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp,
+ act.rtableid);
}
} else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
r->return_icmp)
pf_send_icmp(m, r->return_icmp >> 8,
- r->return_icmp & 255, af, r);
+ r->return_icmp & 255, af, r, act.rtableid);
else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
r->return_icmp6)
pf_send_icmp(m, r->return_icmp6 >> 8,
- r->return_icmp6 & 255, af, r);
+ r->return_icmp6 & 255, af, r, act.rtableid);
}
if (r->action == PF_DROP)
goto cleanup;
- if (pf_tag_packet(m, tag, rtableid)) {
+ if (pf_tag_packet(m, tag, act.rtableid)) {
REASON_SET(&reason, PFRES_MEMORY);
goto cleanup;
}
- if (!state_icmp && (r->keep_state || nr != NULL ||
- (pd->flags & PFDESC_TCP_NORM))) {
+ if (!state_icmp && (r->keep_state || nr != NULL)) {
int action;
action = pf_create_state(r, nr, a, pd, nsn, skw, sks, nk, sk, m,
off, sport, dport, &rewrite, kif, sm, tag, bproto_sum,
- bip_sum, hdrlen);
+ bip_sum, hdrlen, &rules, &act);
if (action != PF_PASS)
return (action);
} else {
@@ -3019,7 +3087,8 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
struct pf_state_key *sks, struct pf_state_key *nk, struct pf_state_key *sk,
struct mbuf *m, int off, u_int16_t sport, u_int16_t dport, int *rewrite,
struct pfi_kif *kif, struct pf_state **sm, int tag, u_int16_t bproto_sum,
- u_int16_t bip_sum, int hdrlen)
+ u_int16_t bip_sum, int hdrlen, struct pf_rule_slist *rules,
+ struct pf_rule_actions *act)
{
struct pf_state *s = NULL;
struct pf_src_node *sn = NULL;
@@ -3054,6 +3123,7 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
s->rule.ptr = r;
s->nat_rule.ptr = nr;
s->anchor.ptr = a;
+ bcopy(rules, &s->match_rules, sizeof(s->match_rules));
STATE_INC_COUNTERS(s);
if (r->allow_opts)
s->state_flags |= PFSTATE_ALLOWOPTS;
@@ -3061,7 +3131,14 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
s->state_flags |= PFSTATE_SLOPPY;
if (r->rule_flag & PFRULE_PFLOW)
s->state_flags |= PFSTATE_PFLOW;
- s->log = r->log & PF_LOG_ALL;
+ s->log = act->log & PF_LOG_ALL;
+ s->qid = act->qid;
+ s->pqid = act->pqid;
+ s->rtableid = act->rtableid;
+ s->min_ttl = act->min_ttl;
+ s->set_tos = act->set_tos;
+ s->max_mss = act->max_mss;
+ s->state_flags |= act->flags;
s->sync_state = PFSYNC_S_NONE;
if (nr != NULL)
s->log |= nr->log & PF_LOG_ALL;
@@ -3132,15 +3209,15 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
s->nat_src_node->states++;
}
if (pd->proto == IPPROTO_TCP) {
- if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
- off, pd, th, &s->src, &s->dst)) {
+ if (s->state_flags & PFSTATE_SCRUB_TCP &&
+ pf_normalize_tcp_init(m, off, pd, th, &s->src, &s->dst)) {
REASON_SET(&reason, PFRES_MEMORY);
pf_src_tree_remove_state(s);
STATE_DEC_COUNTERS(s);
pool_put(&pf_state_pl, s);
return (PF_DROP);
}
- if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
+ if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub &&
pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
&s->src, &s->dst, rewrite)) {
/* This really shouldn't happen!!! */
@@ -3203,7 +3280,8 @@ pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
s->src.mss = mss;
pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
- TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
+ TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL,
+ act->rtableid);
REASON_SET(&reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
}
@@ -3345,7 +3423,7 @@ pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
if (src->seqlo == 0) {
/* First packet from this end. Set its state */
- if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
+ if (((*state)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) &&
src->scrub == NULL) {
if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
REASON_SET(reason, PFRES_MEMORY);
@@ -3616,7 +3694,7 @@ pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
th->th_sport, ntohl(th->th_ack), 0,
TH_RST, 0, 0,
(*state)->rule.ptr->return_ttl, 1, 0,
- pd->eh, kif->pfik_ifp);
+ pd->eh, kif->pfik_ifp, (*state)->rtableid);
src->seqlo = 0;
src->seqhi = 1;
src->max_win = 1;
@@ -3768,7 +3846,7 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
pd->src, th->th_dport, th->th_sport,
(*state)->src.seqhi, ntohl(th->th_seq) + 1,
TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
- 0, NULL, NULL);
+ 0, NULL, NULL, (*state)->rtableid);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if (!(th->th_flags & TH_ACK) ||
@@ -3798,7 +3876,8 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
(*state)->dst.seqhi, 0, TH_SYN, 0,
- (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
+ (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL,
+ (*state)->rtableid);
REASON_SET(reason, PFRES_SYNPROXY);
return (PF_SYNPROXY_DROP);
} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
@@ -3813,13 +3892,13 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
pd->src, th->th_dport, th->th_sport,
ntohl(th->th_ack), ntohl(th->th_seq) + 1,
TH_ACK, (*state)->src.max_win, 0, 0, 0,
- (*state)->tag, NULL, NULL);
+ (*state)->tag, NULL, NULL, (*state)->rtableid);
pf_send_tcp((*state)->rule.ptr, pd->af,
&sk->addr[pd->sidx], &sk->addr[pd->didx],
sk->port[pd->sidx], sk->port[pd->didx],
(*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
TH_ACK, (*state)->dst.max_win, 0, 0, 1,
- 0, NULL, NULL);
+ 0, NULL, NULL, (*state)->rtableid);
(*state)->src.seqdiff = (*state)->dst.seqhi -
(*state)->src.seqlo;
(*state)->dst.seqdiff = (*state)->src.seqhi -
@@ -5328,15 +5407,17 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
if (m->m_pkthdr.len < (int)sizeof(*h)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
- log = 1;
+ log = PF_LOG_FORCE;
goto done;
}
if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
return (PF_PASS);
- /* We do IP header normalization and packet reassembly here */
- if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
+ /* packet reassembly here if 1) enabled 2) we deal with a fragment */
+ h = mtod(m, struct ip *);
+ if (pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)) &&
+ pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
action = PF_DROP;
goto done;
}
@@ -5347,7 +5428,7 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
if (off < (int)sizeof(*h)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_SHORT);
- log = 1;
+ log = PF_LOG_FORCE;
goto done;
}
@@ -5380,7 +5461,8 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
pd.hdr.tcp = &th;
if (!pf_pull_hdr(m, off, &th, sizeof(th),
&action, &reason, AF_INET)) {
- log = action != PF_PASS;
+ if (action != PF_PASS)
+ log = PF_LOG_FORCE;
goto done;
}
pd.p_len = pd.tot_len - off - (th.th_off << 2);
@@ -5401,6 +5483,13 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
} else if (s == NULL)
action = pf_test_rule(&r, &s, dir, kif,
m, off, h, &pd, &a, &ruleset, &ipintrq);
+
+ if (s) {
+ if (s->max_mss)
+ pf_normalize_mss(m, off, &pd, s->max_mss);
+ } else if (r->max_mss)
+ pf_normalize_mss(m, off, &pd, r->max_mss);
+
break;
}
@@ -5410,7 +5499,8 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
pd.hdr.udp = &uh;
if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
&action, &reason, AF_INET)) {
- log = action != PF_PASS;
+ if (action != PF_PASS)
+ log = PF_LOG_FORCE;
goto done;
}
if (uh.uh_dport == 0 ||
@@ -5440,7 +5530,8 @@ pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
pd.hdr.icmp = &ih;
if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
&action, &reason, AF_INET)) {
- log = action != PF_PASS;
+ if (action != PF_PASS)
+ log = PF_LOG_FORCE;
goto done;
}
action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
@@ -5478,23 +5569,28 @@ done:
!((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
action = PF_DROP;
REASON_SET(&reason, PFRES_IPOPTIONS);
- log = 1;
+ log = PF_LOG_FORCE;
DPFPRINTF(PF_DEBUG_MISC,
("pf: dropping packet with ip options\n"));
}
- if ((s && s->tag) || r->rtableid)
- pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
+ if (s)
+ pf_scrub_ip(&m, s->state_flags, s->min_ttl, s->set_tos);
+ else
+ pf_scrub_ip(&m, r->scrub_flags, r->min_ttl, r->set_tos);
+
+ if (s && (s->tag || s->rtableid))
+ pf_tag_packet(m, s ? s->tag : 0, s->rtableid);
if (dir == PF_IN && s && s->key[PF_SK_STACK])
m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
#ifdef ALTQ
- if (action == PF_PASS && r->qid) {
+ if (action == PF_PASS && s && s->qid) {
if (pqid || (pd.tos & IPTOS_LOWDELAY))
- m->m_pkthdr.pf.qid = r->pqid;
+ m->m_pkthdr.pf.qid = s->pqid;
else
- m->m_pkthdr.pf.qid = r->qid;
+ m->m_pkthdr.pf.qid = s->qid;
/* add hints for ecn */
m->m_pkthdr.pf.hdr = h;
}
@@ -5523,15 +5619,21 @@ done:
}
if (log) {
- struct pf_rule *lr;
+ struct pf_rule *lr;
+ struct pf_rule_item *ri;
if (s != NULL && s->nat_rule.ptr != NULL &&
s->nat_rule.ptr->log & PF_LOG_ALL)
lr = s->nat_rule.ptr;
else
lr = r;
- PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
- &pd);
+ if (log == PF_LOG_FORCE || lr->log & PF_LOG_ALL)
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a,
+ ruleset, &pd);
+ SLIST_FOREACH(ri, &s->match_rules, entry)
+ if (ri->r->log & PF_LOG_ALL)
+ PFLOG_PACKET(kif, h, m, AF_INET, dir, reason,
+ ri->r, a, ruleset, &pd);
}
kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
@@ -5546,6 +5648,8 @@ done:
a->bytes[dirndx] += pd.tot_len;
}
if (s != NULL) {
+ struct pf_rule_item *ri;
+
if (s->nat_rule.ptr != NULL) {
s->nat_rule.ptr->packets[dirndx]++;
s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
@@ -5561,6 +5665,10 @@ done:
dirndx = (dir == s->direction) ? 0 : 1;
s->packets[dirndx]++;
s->bytes[dirndx] += pd.tot_len;
+ SLIST_FOREACH(ri, &s->match_rules, entry) {
+ ri->r->packets[dirndx]++;
+ ri->r->bytes[dirndx] += pd.tot_len;
+ }
}
tr = r;
nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
@@ -5647,7 +5755,7 @@ pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
if (m->m_pkthdr.pf.flags & PF_TAG_GENERATED)
return (PF_PASS);
- /* We do IP header normalization and packet reassembly here */
+ /* packet reassembly */
if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
action = PF_DROP;
goto done;
@@ -5889,18 +5997,23 @@ done:
("pf: dropping packet with dangerous v6 headers\n"));
}
- if ((s && s->tag) || r->rtableid)
- pf_tag_packet(m, s ? s->tag : 0, r->rtableid);
+ if (s)
+ pf_scrub_ip6(&m, s->min_ttl);
+ else
+ pf_scrub_ip6(&m, r->min_ttl);
+
+ if (s && (s->tag || s->rtableid))
+ pf_tag_packet(m, s ? s->tag : 0, s->rtableid);
if (dir == PF_IN && s && s->key[PF_SK_STACK])
m->m_pkthdr.pf.statekey = s->key[PF_SK_STACK];
#ifdef ALTQ
- if (action == PF_PASS && r->qid) {
+ if (action == PF_PASS && s && s->qid) {
if (pd.tos & IPTOS_LOWDELAY)
- m->m_pkthdr.pf.qid = r->pqid;
+ m->m_pkthdr.pf.qid = s->pqid;
else
- m->m_pkthdr.pf.qid = r->qid;
+ m->m_pkthdr.pf.qid = s->qid;
/* add hints for ecn */
m->m_pkthdr.pf.hdr = h;
}
diff --git a/sys/net/pf_ioctl.c b/sys/net/pf_ioctl.c
index d59cfe307fa..01eaaa79fa8 100644
--- a/sys/net/pf_ioctl.c
+++ b/sys/net/pf_ioctl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_ioctl.c,v 1.215 2009/03/09 13:53:10 mcbride Exp $ */
+/* $OpenBSD: pf_ioctl.c,v 1.216 2009/04/06 12:05:55 henning Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -151,6 +151,8 @@ pfattach(int num)
"pfstatekeypl", NULL);
pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0,
"pfstateitempl", NULL);
+ pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0,
+ "pfruleitempl", NULL);
pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
&pool_allocator_nointr);
pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
@@ -853,10 +855,6 @@ pf_setup_pfsync_matching(struct pf_ruleset *rs)
MD5Init(&ctx);
for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
- /* XXX PF_RULESET_SCRUB as well? */
- if (rs_cnt == PF_RULESET_SCRUB)
- continue;
-
if (rs->rules[rs_cnt].inactive.ptr_array)
free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
rs->rules[rs_cnt].inactive.ptr_array = NULL;
@@ -2839,6 +2837,15 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
break;
}
+ case DIOCSETREASS: {
+ u_int32_t *reass = (u_int32_t *)addr;
+
+ pf_status.reass = *reass;
+ if (!(pf_status.reass & PF_REASS_ENABLED))
+ pf_status.reass = 0;
+ break;
+ }
+
default:
error = ENODEV;
break;
diff --git a/sys/net/pf_norm.c b/sys/net/pf_norm.c
index 8fa6081287e..9faef283832 100644
--- a/sys/net/pf_norm.c
+++ b/sys/net/pf_norm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_norm.c,v 1.115 2009/01/31 20:06:55 henning Exp $ */
+/* $OpenBSD: pf_norm.c,v 1.116 2009/04/06 12:05:55 henning Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
@@ -112,15 +112,12 @@ void pf_free_fragment(struct pf_fragment *);
struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *);
struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **,
struct pf_frent *, int);
-struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
- struct pf_fragment **, int, int, int *);
-int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
- struct tcphdr *, int, sa_family_t);
-void pf_scrub_ip(struct mbuf **, u_int32_t, u_int8_t,
+void pf_scrub_ip(struct mbuf **, u_int8_t, u_int8_t,
u_int8_t);
#ifdef INET6
void pf_scrub_ip6(struct mbuf **, u_int8_t);
#endif
+
#define DPFPRINTF(x) do { \
if (pf_status.debug >= PF_DEBUG_MISC) { \
printf("%s: ", __func__); \
@@ -520,308 +517,11 @@ pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
return (NULL);
}
-struct mbuf *
-pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
- int drop, int *nomem)
-{
- struct mbuf *m = *m0;
- struct pf_frcache *frp, *fra, *cur = NULL;
- int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
- u_int16_t off = ntohs(h->ip_off) << 3;
- u_int16_t max = ip_len + off;
- int hosed = 0;
-
- KASSERT(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
-
- /* Create a new range queue for this packet */
- if (*frag == NULL) {
- *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
- if (*frag == NULL) {
- pf_flush_fragments();
- *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
- if (*frag == NULL)
- goto no_mem;
- }
-
- /* Get an entry for the queue */
- cur = pool_get(&pf_cent_pl, PR_NOWAIT);
- if (cur == NULL) {
- pool_put(&pf_cache_pl, *frag);
- *frag = NULL;
- goto no_mem;
- }
- pf_ncache++;
-
- (*frag)->fr_flags = PFFRAG_NOBUFFER;
- (*frag)->fr_max = 0;
- (*frag)->fr_src = h->ip_src;
- (*frag)->fr_dst = h->ip_dst;
- (*frag)->fr_p = h->ip_p;
- (*frag)->fr_id = h->ip_id;
- (*frag)->fr_timeout = time_second;
-
- cur->fr_off = off;
- cur->fr_end = max;
- LIST_INIT(&(*frag)->fr_cache);
- LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
-
- RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
- TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
-
- DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
-
- goto pass;
- }
-
- /*
- * Find a fragment after the current one:
- * - off contains the real shifted offset.
- */
- frp = NULL;
- LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
- if (fra->fr_off > off)
- break;
- frp = fra;
- }
-
- KASSERT(frp != NULL || fra != NULL);
-
- if (frp != NULL) {
- int precut;
-
- precut = frp->fr_end - off;
- if (precut >= ip_len) {
- /* Fragment is entirely a duplicate */
- DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
- h->ip_id, frp->fr_off, frp->fr_end, off, max));
- goto drop_fragment;
- }
- if (precut == 0) {
- /* They are adjacent. Fixup cache entry */
- DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
- h->ip_id, frp->fr_off, frp->fr_end, off, max));
- frp->fr_end = max;
- } else if (precut > 0) {
- /* The first part of this payload overlaps with a
- * fragment that has already been passed.
- * Need to trim off the first part of the payload.
- * But to do so easily, we need to create another
- * mbuf to throw the original header into.
- */
-
- DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
- h->ip_id, precut, frp->fr_off, frp->fr_end, off,
- max));
-
- off += precut;
- max -= precut;
- /* Update the previous frag to encompass this one */
- frp->fr_end = max;
-
- if (!drop) {
- /* XXX Optimization opportunity
- * This is a very heavy way to trim the payload.
- * we could do it much faster by diddling mbuf
- * internals but that would be even less legible
- * than this mbuf magic. For my next trick,
- * I'll pull a rabbit out of my laptop.
- */
- *m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
- if (*m0 == NULL)
- goto no_mem;
- KASSERT((*m0)->m_next == NULL);
- m_adj(m, precut + (h->ip_hl << 2));
- m_cat(*m0, m);
- m = *m0;
- if (m->m_flags & M_PKTHDR) {
- int plen = 0;
- struct mbuf *t;
- for (t = m; t; t = t->m_next)
- plen += t->m_len;
- m->m_pkthdr.len = plen;
- }
-
-
- h = mtod(m, struct ip *);
-
-
- KASSERT((int)m->m_len ==
- ntohs(h->ip_len) - precut);
- h->ip_off = htons(ntohs(h->ip_off) +
- (precut >> 3));
- h->ip_len = htons(ntohs(h->ip_len) - precut);
- } else {
- hosed++;
- }
- } else {
- /* There is a gap between fragments */
-
- DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
- h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
- max));
-
- cur = pool_get(&pf_cent_pl, PR_NOWAIT);
- if (cur == NULL)
- goto no_mem;
- pf_ncache++;
-
- cur->fr_off = off;
- cur->fr_end = max;
- LIST_INSERT_AFTER(frp, cur, fr_next);
- }
- }
-
- if (fra != NULL) {
- int aftercut;
- int merge = 0;
-
- aftercut = max - fra->fr_off;
- if (aftercut == 0) {
- /* Adjacent fragments */
- DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
- h->ip_id, off, max, fra->fr_off, fra->fr_end));
- fra->fr_off = off;
- merge = 1;
- } else if (aftercut > 0) {
- /* Need to chop off the tail of this fragment */
- DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
- h->ip_id, aftercut, off, max, fra->fr_off,
- fra->fr_end));
- fra->fr_off = off;
- max -= aftercut;
-
- merge = 1;
-
- if (!drop) {
- m_adj(m, -aftercut);
- if (m->m_flags & M_PKTHDR) {
- int plen = 0;
- struct mbuf *t;
- for (t = m; t; t = t->m_next)
- plen += t->m_len;
- m->m_pkthdr.len = plen;
- }
- h = mtod(m, struct ip *);
- KASSERT((int)m->m_len ==
- ntohs(h->ip_len) - aftercut);
- h->ip_len = htons(ntohs(h->ip_len) - aftercut);
- } else {
- hosed++;
- }
- } else if (frp == NULL) {
- /* There is a gap between fragments */
- DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
- h->ip_id, -aftercut, off, max, fra->fr_off,
- fra->fr_end));
-
- cur = pool_get(&pf_cent_pl, PR_NOWAIT);
- if (cur == NULL)
- goto no_mem;
- pf_ncache++;
-
- cur->fr_off = off;
- cur->fr_end = max;
- LIST_INSERT_BEFORE(fra, cur, fr_next);
- }
-
-
- /* Need to glue together two separate fragment descriptors */
- if (merge) {
- if (cur && fra->fr_off <= cur->fr_end) {
- /* Need to merge in a previous 'cur' */
- DPFPRINTF(("fragcache[%d]: adjacent(merge "
- "%d-%d) %d-%d (%d-%d)\n",
- h->ip_id, cur->fr_off, cur->fr_end, off,
- max, fra->fr_off, fra->fr_end));
- fra->fr_off = cur->fr_off;
- LIST_REMOVE(cur, fr_next);
- pool_put(&pf_cent_pl, cur);
- pf_ncache--;
- cur = NULL;
-
- } else if (frp && fra->fr_off <= frp->fr_end) {
- /* Need to merge in a modified 'frp' */
- KASSERT(cur == NULL);
- DPFPRINTF(("fragcache[%d]: adjacent(merge "
- "%d-%d) %d-%d (%d-%d)\n",
- h->ip_id, frp->fr_off, frp->fr_end, off,
- max, fra->fr_off, fra->fr_end));
- fra->fr_off = frp->fr_off;
- LIST_REMOVE(frp, fr_next);
- pool_put(&pf_cent_pl, frp);
- pf_ncache--;
- frp = NULL;
-
- }
- }
- }
-
- if (hosed) {
- /*
- * We must keep tracking the overall fragment even when
- * we're going to drop it anyway so that we know when to
- * free the overall descriptor. Thus we drop the frag late.
- */
- goto drop_fragment;
- }
-
-
- pass:
- /* Update maximum data size */
- if ((*frag)->fr_max < max)
- (*frag)->fr_max = max;
-
- /* This is the last segment */
- if (!mff)
- (*frag)->fr_flags |= PFFRAG_SEENLAST;
-
- /* Check if we are completely reassembled */
- if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
- LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
- LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
- /* Remove from fragment queue */
- DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
- (*frag)->fr_max));
- pf_free_fragment(*frag);
- *frag = NULL;
- }
-
- return (m);
-
- no_mem:
- *nomem = 1;
-
- /* Still need to pay attention to !IP_MF */
- if (!mff && *frag != NULL)
- (*frag)->fr_flags |= PFFRAG_SEENLAST;
-
- m_freem(m);
- return (NULL);
-
- drop_fragment:
-
- /* Still need to pay attention to !IP_MF */
- if (!mff && *frag != NULL)
- (*frag)->fr_flags |= PFFRAG_SEENLAST;
-
- if (drop) {
- /* This fragment has been deemed bad. Don't reass */
- if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
- DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
- h->ip_id));
- (*frag)->fr_flags |= PFFRAG_DROP;
- }
-
- m_freem(m);
- return (NULL);
-}
-
int
pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
struct pf_pdesc *pd)
{
struct mbuf *m = *m0;
- struct pf_rule *r;
struct pf_frent *frent;
struct pf_fragment *frag = NULL;
struct ip *h = mtod(m, struct ip *);
@@ -831,39 +531,6 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
u_int16_t max;
int ip_len;
int ip_off;
- int tag = -1;
-
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
- while (r != NULL) {
- r->evaluations++;
- if (pfi_kif_match(r->kif, kif) == r->ifnot)
- r = r->skip[PF_SKIP_IFP].ptr;
- else if (r->direction && r->direction != dir)
- r = r->skip[PF_SKIP_DIR].ptr;
- else if (r->af && r->af != AF_INET)
- r = r->skip[PF_SKIP_AF].ptr;
- else if (r->proto && r->proto != h->ip_p)
- r = r->skip[PF_SKIP_PROTO].ptr;
- else if (PF_MISMATCHAW(&r->src.addr,
- (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
- r->src.neg, kif))
- r = r->skip[PF_SKIP_SRC_ADDR].ptr;
- else if (PF_MISMATCHAW(&r->dst.addr,
- (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
- r->dst.neg, NULL))
- r = r->skip[PF_SKIP_DST_ADDR].ptr;
- else if (r->match_tag && !pf_match_tag(m, r, &tag))
- r = TAILQ_NEXT(r, entries);
- else
- break;
- }
-
- if (r == NULL || r->action == PF_NOSCRUB)
- return (PF_PASS);
- else {
- r->packets[dir == PF_OUT]++;
- r->bytes[dir == PF_OUT] += pd->tot_len;
- }
/* Check for illegal packets */
if (hlen < (int)sizeof(struct ip))
@@ -872,8 +539,8 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
if (hlen > ntohs(h->ip_len))
goto drop;
- /* Clear IP_DF if the rule uses the no-df option */
- if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
+ /* Clear IP_DF if we're in no-df mode */
+ if (!(pf_status.reass & PF_REASS_NODF) && h->ip_off & htons(IP_DF)) {
u_int16_t ip_off = h->ip_off;
h->ip_off &= htons(~IP_DF);
@@ -909,75 +576,35 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
}
max = fragoff + ip_len;
- if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
- /* Fully buffer all of the fragments */
-
- frag = pf_find_fragment(h, &pf_frag_tree);
-
- /* Check if we saw the last fragment already */
- if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
- max > frag->fr_max)
- goto bad;
-
- /* Get an entry for the fragment queue */
- frent = pool_get(&pf_frent_pl, PR_NOWAIT);
- if (frent == NULL) {
- REASON_SET(reason, PFRES_MEMORY);
- return (PF_DROP);
- }
- pf_nfrents++;
- frent->fr_ip = h;
- frent->fr_m = m;
-
- /* Might return a completely reassembled mbuf, or NULL */
- DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
- *m0 = m = pf_reassemble(m0, &frag, frent, mff);
-
- if (m == NULL)
- return (PF_DROP);
-
- if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
- goto drop;
+ /* Fully buffer all of the fragments */
+ frag = pf_find_fragment(h, &pf_frag_tree);
- h = mtod(m, struct ip *);
- } else {
- /* non-buffering fragment cache (drops or masks overlaps) */
- int nomem = 0;
-
- if (dir == PF_OUT && m->m_pkthdr.pf.flags & PF_TAG_FRAGCACHE) {
- /*
- * Already passed the fragment cache in the
- * input direction. If we continued, it would
- * appear to be a dup and would be dropped.
- */
- goto fragment_pass;
- }
+ /* Check if we saw the last fragment already */
+ if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
+ max > frag->fr_max)
+ goto bad;
- frag = pf_find_fragment(h, &pf_cache_tree);
+ /* Get an entry for the fragment queue */
+ frent = pool_get(&pf_frent_pl, PR_NOWAIT);
+ if (frent == NULL) {
+ REASON_SET(reason, PFRES_MEMORY);
+ return (PF_DROP);
+ }
+ pf_nfrents++;
+ frent->fr_ip = h;
+ frent->fr_m = m;
- /* Check if we saw the last fragment already */
- if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
- max > frag->fr_max) {
- if (r->rule_flag & PFRULE_FRAGDROP)
- frag->fr_flags |= PFFRAG_DROP;
- goto bad;
- }
+ /* Might return a completely reassembled mbuf, or NULL */
+ DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
+ *m0 = m = pf_reassemble(m0, &frag, frent, mff);
- *m0 = m = pf_fragcache(m0, h, &frag, mff,
- (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
- if (m == NULL) {
- if (nomem)
- goto no_mem;
- goto drop;
- }
+ if (m == NULL)
+ return (PF_DROP);
- if (dir == PF_IN)
- m->m_pkthdr.pf.flags |= PF_TAG_FRAGCACHE;
+ if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
+ goto drop;
- if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
- goto drop;
- goto fragment_pass;
- }
+ h = mtod(m, struct ip *);
no_fragment:
/* At this point, only IP_DF is allowed in ip_off */
@@ -988,25 +615,11 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
}
- /* not missing a return here */
-
- fragment_pass:
- pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
-
- if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
- pd->flags |= PFDESC_IP_REAS;
+ pd->flags |= PFDESC_IP_REAS;
return (PF_PASS);
- no_mem:
- REASON_SET(reason, PFRES_MEMORY);
- if (r != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
- return (PF_DROP);
-
drop:
REASON_SET(reason, PFRES_NORM);
- if (r != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
return (PF_DROP);
bad:
@@ -1017,8 +630,6 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
pf_free_fragment(frag);
REASON_SET(reason, PFRES_FRAG);
- if (r != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET, dir, *reason, r, NULL, NULL, pd);
return (PF_DROP);
}
@@ -1029,7 +640,6 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
u_short *reason, struct pf_pdesc *pd)
{
struct mbuf *m = *m0;
- struct pf_rule *r;
struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
int off;
struct ip6_ext ext;
@@ -1043,38 +653,6 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
u_int8_t proto;
int terminal;
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
- while (r != NULL) {
- r->evaluations++;
- if (pfi_kif_match(r->kif, kif) == r->ifnot)
- r = r->skip[PF_SKIP_IFP].ptr;
- else if (r->direction && r->direction != dir)
- r = r->skip[PF_SKIP_DIR].ptr;
- else if (r->af && r->af != AF_INET6)
- r = r->skip[PF_SKIP_AF].ptr;
-#if 0 /* header chain! */
- else if (r->proto && r->proto != h->ip6_nxt)
- r = r->skip[PF_SKIP_PROTO].ptr;
-#endif
- else if (PF_MISMATCHAW(&r->src.addr,
- (struct pf_addr *)&h->ip6_src, AF_INET6,
- r->src.neg, kif))
- r = r->skip[PF_SKIP_SRC_ADDR].ptr;
- else if (PF_MISMATCHAW(&r->dst.addr,
- (struct pf_addr *)&h->ip6_dst, AF_INET6,
- r->dst.neg, NULL))
- r = r->skip[PF_SKIP_DST_ADDR].ptr;
- else
- break;
- }
-
- if (r == NULL || r->action == PF_NOSCRUB)
- return (PF_PASS);
- else {
- r->packets[dir == PF_OUT]++;
- r->bytes[dir == PF_OUT] += pd->tot_len;
- }
-
/* Check for illegal packets */
if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
goto drop;
@@ -1161,8 +739,6 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
goto shortpkt;
- pf_scrub_ip6(&m, r->min_ttl);
-
return (PF_PASS);
fragment:
@@ -1182,20 +758,14 @@ pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
shortpkt:
REASON_SET(reason, PFRES_SHORT);
- if (r != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
return (PF_DROP);
drop:
REASON_SET(reason, PFRES_NORM);
- if (r != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
return (PF_DROP);
badfrag:
REASON_SET(reason, PFRES_FRAG);
- if (r != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET6, dir, *reason, r, NULL, NULL, pd);
return (PF_DROP);
}
#endif /* INET6 */
@@ -1204,55 +774,10 @@ int
pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
int off, void *h, struct pf_pdesc *pd)
{
- struct pf_rule *r, *rm = NULL;
struct tcphdr *th = pd->hdr.tcp;
- int rewrite = 0;
u_short reason;
u_int8_t flags;
- sa_family_t af = pd->af;
-
- r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
- while (r != NULL) {
- r->evaluations++;
- if (pfi_kif_match(r->kif, kif) == r->ifnot)
- r = r->skip[PF_SKIP_IFP].ptr;
- else if (r->direction && r->direction != dir)
- r = r->skip[PF_SKIP_DIR].ptr;
- else if (r->af && r->af != af)
- r = r->skip[PF_SKIP_AF].ptr;
- else if (r->proto && r->proto != pd->proto)
- r = r->skip[PF_SKIP_PROTO].ptr;
- else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
- r->src.neg, kif))
- r = r->skip[PF_SKIP_SRC_ADDR].ptr;
- else if (r->src.port_op && !pf_match_port(r->src.port_op,
- r->src.port[0], r->src.port[1], th->th_sport))
- r = r->skip[PF_SKIP_SRC_PORT].ptr;
- else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
- r->dst.neg, NULL))
- r = r->skip[PF_SKIP_DST_ADDR].ptr;
- else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
- r->dst.port[0], r->dst.port[1], th->th_dport))
- r = r->skip[PF_SKIP_DST_PORT].ptr;
- else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
- pf_osfp_fingerprint(pd, m, off, th),
- r->os_fingerprint))
- r = TAILQ_NEXT(r, entries);
- else {
- rm = r;
- break;
- }
- }
-
- if (rm == NULL || rm->action == PF_NOSCRUB)
- return (PF_PASS);
- else {
- r->packets[dir == PF_OUT]++;
- r->bytes[dir == PF_OUT] += pd->tot_len;
- }
-
- if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
- pd->flags |= PFDESC_TCP_NORM;
+ u_int rewrite = 0;
flags = th->th_flags;
if (flags & TH_SYN) {
@@ -1298,10 +823,6 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
rewrite = 1;
}
- /* Process options */
- if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
- rewrite = 1;
-
/* copy back packet headers if we sanitized */
if (rewrite)
m_copyback(m, off, sizeof(*th), th);
@@ -1310,8 +831,6 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
tcp_drop:
REASON_SET(&reason, PFRES_NORM);
- if (rm != NULL && r->log)
- PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, r, NULL, NULL, pd);
return (PF_DROP);
}
@@ -1801,13 +1320,12 @@ pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
}
int
-pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
- int off, sa_family_t af)
+pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss)
{
+ struct tcphdr *th = pd->hdr.tcp;
u_int16_t *mss;
int thoff;
int opt, cnt, optlen = 0;
- int rewrite = 0;
u_char opts[MAX_TCPOPTLEN];
u_char *optp = opts;
@@ -1815,8 +1333,8 @@ pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
cnt = thoff - sizeof(struct tcphdr);
if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
- NULL, NULL, af))
- return (rewrite);
+ NULL, NULL, pd->af))
+ return (0);
for (; cnt > 0; cnt -= optlen, optp += optlen) {
opt = optp[0];
@@ -1834,11 +1352,13 @@ pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
switch (opt) {
case TCPOPT_MAXSEG:
mss = (u_int16_t *)(optp + 2);
- if ((ntohs(*mss)) > r->max_mss) {
+ if ((ntohs(*mss)) > maxmss) {
th->th_sum = pf_cksum_fixup(th->th_sum,
- *mss, htons(r->max_mss), 0);
- *mss = htons(r->max_mss);
- rewrite = 1;
+ *mss, htons(maxmss), 0);
+ *mss = htons(maxmss);
+ m_copyback(m, off + sizeof(*th),
+ thoff - sizeof(*th), opts);
+ m_copyback(m, off, sizeof(*th), th);
}
break;
default:
@@ -1846,20 +1366,19 @@ pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
}
}
- if (rewrite)
- m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
- return (rewrite);
+
+ return (0);
}
void
-pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
+pf_scrub_ip(struct mbuf **m0, u_int8_t flags, u_int8_t min_ttl, u_int8_t tos)
{
struct mbuf *m = *m0;
struct ip *h = mtod(m, struct ip *);
/* Clear IP_DF if no-df was requested */
- if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
+ if (flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
u_int16_t ip_off = h->ip_off;
h->ip_off &= htons(~IP_DF);
@@ -1875,7 +1394,7 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
}
/* Enforce tos */
- if (flags & PFRULE_SET_TOS) {
+ if (flags & PFSTATE_SETTOS) {
u_int16_t ov, nv;
ov = *(u_int16_t *)h;
@@ -1886,7 +1405,7 @@ pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
}
/* random-id, but not for fragments */
- if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
+ if (flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
u_int16_t ip_id = h->ip_id;
h->ip_id = ip_randomid();
diff --git a/sys/net/pf_ruleset.c b/sys/net/pf_ruleset.c
index b04ab88cc61..2cc57e52394 100644
--- a/sys/net/pf_ruleset.c
+++ b/sys/net/pf_ruleset.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_ruleset.c,v 1.3 2009/01/06 21:57:51 thib Exp $ */
+/* $OpenBSD: pf_ruleset.c,v 1.4 2009/04/06 12:05:55 henning Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -102,11 +102,8 @@ int
pf_get_ruleset_number(u_int8_t action)
{
switch (action) {
- case PF_SCRUB:
- case PF_NOSCRUB:
- return (PF_RULESET_SCRUB);
- break;
case PF_PASS:
+ case PF_MATCH:
case PF_DROP:
return (PF_RULESET_FILTER);
break;
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index 8dd2f87c0fc..9952de3cf69 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pfvar.h,v 1.284 2009/03/09 13:53:10 mcbride Exp $ */
+/* $OpenBSD: pfvar.h,v 1.285 2009/04/06 12:05:55 henning Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -59,9 +59,10 @@ struct ip6_hdr;
enum { PF_INOUT, PF_IN, PF_OUT };
enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT,
- PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP, PF_DEFER };
-enum { PF_RULESET_SCRUB, PF_RULESET_FILTER, PF_RULESET_NAT,
- PF_RULESET_BINAT, PF_RULESET_RDR, PF_RULESET_MAX };
+ PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP, PF_DEFER,
+ PF_MATCH };
+enum { PF_RULESET_FILTER, PF_RULESET_NAT, PF_RULESET_BINAT,
+ PF_RULESET_RDR, PF_RULESET_MAX };
enum { PF_OP_NONE, PF_OP_IRG, PF_OP_EQ, PF_OP_NE, PF_OP_LT,
PF_OP_LE, PF_OP_GT, PF_OP_GE, PF_OP_XRG, PF_OP_RRG };
enum { PF_DEBUG_NONE, PF_DEBUG_URGENT, PF_DEBUG_MISC, PF_DEBUG_NOISY };
@@ -122,6 +123,7 @@ enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL,
#define PF_LOG 0x01
#define PF_LOG_ALL 0x02
#define PF_LOG_SOCKET_LOOKUP 0x04
+#define PF_LOG_FORCE 0x08
struct pf_addr {
union {
@@ -491,6 +493,17 @@ struct pf_osfp_ioctl {
int fp_getnum; /* DIOCOSFPGET number */
};
+struct pf_rule_actions {
+ int rtableid;
+ u_int16_t qid;
+ u_int16_t pqid;
+ u_int16_t max_mss;
+ u_int8_t log;
+ u_int8_t set_tos;
+ u_int8_t min_ttl;
+ u_int8_t flags;
+ u_int8_t pad[2];
+};
union pf_rule_ptr {
struct pf_rule *ptr;
@@ -599,6 +612,8 @@ struct pf_rule {
#define PF_FLUSH 0x01
#define PF_FLUSH_GLOBAL 0x02
u_int8_t flush;
+ u_int8_t scrub_flags;
+ u_int8_t pad2[3];
struct {
struct pf_addr addr;
@@ -616,14 +631,6 @@ struct pf_rule {
#define PFRULE_SRCTRACK 0x0020 /* track source states */
#define PFRULE_RULESRCTRACK 0x0040 /* per rule */
-/* scrub flags */
-#define PFRULE_NODF 0x0100
-#define PFRULE_FRAGCROP 0x0200 /* non-buffering frag cache */
-#define PFRULE_FRAGDROP 0x0400 /* drop funny fragments */
-#define PFRULE_RANDOMID 0x0800
-#define PFRULE_REASSEMBLE_TCP 0x1000
-#define PFRULE_SET_TOS 0x2000
-
/* rule flags again */
#define PFRULE_IFBOUND 0x00010000 /* if-bound */
#define PFRULE_STATESLOPPY 0x00020000 /* sloppy state tracking */
@@ -643,6 +650,13 @@ struct pf_threshold {
u_int32_t last;
};
+struct pf_rule_item {
+ SLIST_ENTRY(pf_rule_item) entry;
+ struct pf_rule *r;
+};
+
+SLIST_HEAD(pf_rule_slist, pf_rule_item);
+
struct pf_src_node {
RB_ENTRY(pf_src_node) entry;
struct pf_addr addr;
@@ -747,6 +761,7 @@ struct pf_state {
RB_ENTRY(pf_state) entry_id;
struct pf_state_peer src;
struct pf_state_peer dst;
+ struct pf_rule_slist match_rules;
union pf_rule_ptr rule;
union pf_rule_ptr anchor;
union pf_rule_ptr nat_rule;
@@ -761,20 +776,30 @@ struct pf_state {
u_int32_t creation;
u_int32_t expire;
u_int32_t pfsync_time;
+ u_int16_t qid;
+ u_int16_t pqid;
u_int16_t tag;
+ u_int16_t state_flags;
+#define PFSTATE_ALLOWOPTS 0x0001
+#define PFSTATE_SLOPPY 0x0002
+#define PFSTATE_PFLOW 0x0004
+#define PFSTATE_NOSYNC 0x0008
+#define PFSTATE_ACK 0x0010
+#define PFSTATE_NODF 0x0020
+#define PFSTATE_SETTOS 0x0040
+#define PFSTATE_RANDOMID 0x0080
+#define PFSTATE_SCRUB_TCP 0x0100
u_int8_t log;
- u_int8_t state_flags;
-#define PFSTATE_ALLOWOPTS 0x01
-#define PFSTATE_SLOPPY 0x02
-#define PFSTATE_PFLOW 0x04
-#define PFSTATE_NOSYNC 0x08
-#define PFSTATE_ACK 0x10
u_int8_t timeout;
u_int8_t sync_state; /* PFSYNC_S_x */
/* XXX */
u_int8_t sync_updates;
- u_int8_t _tail[3];
+
+ int16_t rtableid;
+ u_int8_t min_ttl;
+ u_int8_t set_tos;
+ u_int16_t max_mss;
};
/*
@@ -1110,9 +1135,7 @@ struct pf_pdesc {
u_int16_t *ip_sum;
u_int16_t *proto_sum;
- u_int16_t flags; /* Let SCRUB trigger behavior in
- * state code. Easier than tags */
-#define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */
+ u_int16_t flags;
#define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */
sa_family_t af;
u_int8_t proto;
@@ -1250,10 +1273,14 @@ struct pf_status {
u_int32_t since;
u_int32_t debug;
u_int32_t hostid;
+ u_int32_t reass; /* reassembly */
char ifname[IFNAMSIZ];
u_int8_t pf_chksum[PF_MD5_DIGEST_LENGTH];
};
+#define PF_REASS_ENABLED 0x01
+#define PF_REASS_NODF 0x02
+
struct cbq_opts {
u_int minburst;
u_int maxburst;
@@ -1579,6 +1606,7 @@ struct pfioc_iface {
#define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface)
#define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface)
#define DIOCKILLSRCNODES _IOWR('D', 91, struct pfioc_src_node_kill)
+#define DIOCSETREASS _IOWR('D', 92, u_int32_t)
#ifdef _KERNEL
RB_HEAD(pf_src_tree, pf_src_node);
@@ -1612,7 +1640,7 @@ extern void pf_tbladdr_copyout(struct pf_addr_wrap *);
extern void pf_calc_skip_steps(struct pf_rulequeue *);
extern struct pool pf_src_tree_pl, pf_rule_pl;
extern struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl,
- pf_altq_pl, pf_pooladdr_pl;
+ pf_altq_pl, pf_pooladdr_pl, pf_rule_item_pl;
extern struct pool pf_state_scrub_pl;
extern void pf_purge_thread(void *);
extern void pf_purge_expired_src_nodes(int);
@@ -1683,6 +1711,9 @@ int pf_normalize_tcp_init(struct mbuf *, int, struct pf_pdesc *,
int pf_normalize_tcp_stateful(struct mbuf *, int, struct pf_pdesc *,
u_short *, struct tcphdr *, struct pf_state *,
struct pf_state_peer *, struct pf_state_peer *, int *);
+int pf_normalize_mss(struct mbuf *, int, struct pf_pdesc *, u_int16_t);
+void pf_scrub_ip(struct mbuf **, u_int8_t, u_int8_t, u_int8_t);
+void pf_scrub_ip6(struct mbuf **, u_int8_t);
u_int32_t
pf_state_expires(const struct pf_state *);
void pf_purge_expired_fragments(void);