summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorAlexander Bluhm <bluhm@cvs.openbsd.org>2024-04-22 13:30:23 +0000
committerAlexander Bluhm <bluhm@cvs.openbsd.org>2024-04-22 13:30:23 +0000
commitbefc02d4f4652245622781a885024dafa2b58901 (patch)
treec7739a4140f7b70c26e564401b9a9790d4d88621 /sys
parent7f7f57b74856806c864572d00dc6f31cfb717b3a (diff)
Show pf fragment reassembly counters.
Framgent count and statistics are stored in struct pf_status. From there pfctl(8) and systat(1) collect and show them. Note that pfctl -s info needs the -v switch to show fragments. As fragment reassembly has its own mutex, also grab this in pf ipctl(2) and sysctl(2) code. input claudio@; OK henning@
Diffstat (limited to 'sys')
-rw-r--r--sys/net/pf_ioctl.c9
-rw-r--r--sys/net/pf_norm.c34
-rw-r--r--sys/net/pfvar.h9
-rw-r--r--sys/net/pfvar_priv.h6
4 files changed, 40 insertions, 18 deletions
diff --git a/sys/net/pf_ioctl.c b/sys/net/pf_ioctl.c
index 078fa72e083..567ee888077 100644
--- a/sys/net/pf_ioctl.c
+++ b/sys/net/pf_ioctl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_ioctl.c,v 1.415 2023/07/06 04:55:05 dlg Exp $ */
+/* $OpenBSD: pf_ioctl.c,v 1.416 2024/04/22 13:30:22 bluhm Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -1955,7 +1955,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
struct pf_status *s = (struct pf_status *)addr;
NET_LOCK();
PF_LOCK();
+ PF_FRAG_LOCK();
memcpy(s, &pf_status, sizeof(struct pf_status));
+ PF_FRAG_UNLOCK();
pfi_update_status(s->ifname, s);
PF_UNLOCK();
NET_UNLOCK();
@@ -1996,6 +1998,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
memset(pf_status.counters, 0, sizeof(pf_status.counters));
memset(pf_status.fcounters, 0, sizeof(pf_status.fcounters));
memset(pf_status.scounters, 0, sizeof(pf_status.scounters));
+ PF_FRAG_LOCK();
+ memset(pf_status.ncounters, 0, sizeof(pf_status.ncounters));
+ PF_FRAG_UNLOCK();
pf_status.since = getuptime();
PF_UNLOCK();
@@ -3271,7 +3276,9 @@ pf_sysctl(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
NET_LOCK_SHARED();
PF_LOCK();
+ PF_FRAG_LOCK();
memcpy(&pfs, &pf_status, sizeof(struct pf_status));
+ PF_FRAG_UNLOCK();
pfi_update_status(pfs.ifname, &pfs);
PF_UNLOCK();
NET_UNLOCK_SHARED();
diff --git a/sys/net/pf_norm.c b/sys/net/pf_norm.c
index fde4a21f06b..571dbf9d346 100644
--- a/sys/net/pf_norm.c
+++ b/sys/net/pf_norm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pf_norm.c,v 1.229 2023/10/10 11:25:31 bluhm Exp $ */
+/* $OpenBSD: pf_norm.c,v 1.230 2024/04/22 13:30:22 bluhm Exp $ */
/*
* Copyright 2001 Niels Provos <provos@citi.umich.edu>
@@ -150,13 +150,10 @@ int pf_reassemble6(struct mbuf **, struct ip6_frag *,
/* Globals */
struct pool pf_frent_pl, pf_frag_pl, pf_frnode_pl;
struct pool pf_state_scrub_pl;
-int pf_nfrents;
struct mutex pf_frag_mtx;
#define PF_FRAG_LOCK_INIT() mtx_init(&pf_frag_mtx, IPL_SOFTNET)
-#define PF_FRAG_LOCK() mtx_enter(&pf_frag_mtx)
-#define PF_FRAG_UNLOCK() mtx_leave(&pf_frag_mtx)
void
pf_normalize_init(void)
@@ -233,11 +230,12 @@ void
pf_flush_fragments(void)
{
struct pf_fragment *frag;
- int goal;
+ u_int goal;
- goal = pf_nfrents * 9 / 10;
- DPFPRINTF(LOG_NOTICE, "trying to free > %d frents", pf_nfrents - goal);
- while (goal < pf_nfrents) {
+ goal = pf_status.fragments * 9 / 10;
+ DPFPRINTF(LOG_NOTICE, "trying to free > %u frents",
+ pf_status.fragments - goal);
+ while (goal < pf_status.fragments) {
if ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) == NULL)
break;
pf_free_fragment(frag);
@@ -268,9 +266,10 @@ pf_free_fragment(struct pf_fragment *frag)
/* Free all fragment entries */
while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ pf_status.ncounters[NCNT_FRAG_REMOVALS]++;
m_freem(frent->fe_m);
pool_put(&pf_frent_pl, frent);
- pf_nfrents--;
+ pf_status.fragments--;
}
pool_put(&pf_frag_pl, frag);
}
@@ -283,6 +282,7 @@ pf_find_fragment(struct pf_frnode *key, u_int32_t id)
u_int32_t stale;
frnode = RB_FIND(pf_frnode_tree, &pf_frnode_tree, key);
+ pf_status.ncounters[NCNT_FRAG_SEARCH]++;
if (frnode == NULL)
return (NULL);
KASSERT(frnode->fn_fragments >= 1);
@@ -328,7 +328,7 @@ pf_create_fragment(u_short *reason)
return (NULL);
}
}
- pf_nfrents++;
+ pf_status.fragments++;
return (frent);
}
@@ -405,6 +405,7 @@ pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off);
TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
}
+ pf_status.ncounters[NCNT_FRAG_INSERT]++;
if (frag->fr_firstoff[index] == NULL) {
KASSERT(prev == NULL || pf_frent_index(prev) < index);
@@ -456,6 +457,7 @@ pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
}
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ pf_status.ncounters[NCNT_FRAG_REMOVALS]++;
KASSERT(frag->fr_entries[index] > 0);
frag->fr_entries[index]--;
@@ -689,7 +691,7 @@ pf_fillup_fragment(struct pf_frnode *key, u_int32_t id,
"fragment requeue limit exceeded");
m_freem(after->fe_m);
pool_put(&pf_frent_pl, after);
- pf_nfrents--;
+ pf_status.fragments--;
/* There is not way to recover */
goto free_fragment;
}
@@ -703,7 +705,7 @@ pf_fillup_fragment(struct pf_frnode *key, u_int32_t id,
pf_frent_remove(frag, after);
m_freem(after->fe_m);
pool_put(&pf_frent_pl, after);
- pf_nfrents--;
+ pf_status.fragments--;
}
/* If part of the queue gets too long, there is not way to recover. */
@@ -730,7 +732,7 @@ bad_fragment:
REASON_SET(reason, PFRES_FRAG);
drop_fragment:
pool_put(&pf_frent_pl, frent);
- pf_nfrents--;
+ pf_status.fragments--;
return (NULL);
}
@@ -742,6 +744,7 @@ pf_join_fragment(struct pf_fragment *frag)
frent = TAILQ_FIRST(&frag->fr_queue);
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ pf_status.ncounters[NCNT_FRAG_REMOVALS]++;
m = frent->fe_m;
/* Strip off any trailing bytes */
@@ -752,10 +755,11 @@ pf_join_fragment(struct pf_fragment *frag)
m->m_next = NULL;
m_cat(m, m2);
pool_put(&pf_frent_pl, frent);
- pf_nfrents--;
+ pf_status.fragments--;
while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+ pf_status.ncounters[NCNT_FRAG_REMOVALS]++;
m2 = frent->fe_m;
/* Strip off ip header */
m_adj(m2, frent->fe_hdrlen);
@@ -763,7 +767,7 @@ pf_join_fragment(struct pf_fragment *frag)
if (frent->fe_len < m2->m_pkthdr.len)
m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
pool_put(&pf_frent_pl, frent);
- pf_nfrents--;
+ pf_status.fragments--;
m_removehdr(m2);
m_cat(m, m2);
}
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index bfddbf9da10..cc35f445871 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pfvar.h,v 1.535 2024/01/01 22:16:51 bluhm Exp $ */
+/* $OpenBSD: pfvar.h,v 1.536 2024/04/22 13:30:22 bluhm Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -1192,6 +1192,11 @@ enum pfi_kif_refs {
#define SCNT_SRC_NODE_REMOVALS 2
#define SCNT_MAX 3
+#define NCNT_FRAG_SEARCH 0
+#define NCNT_FRAG_INSERT 1
+#define NCNT_FRAG_REMOVALS 2
+#define NCNT_MAX 3
+
#define REASON_SET(a, x) \
do { \
if ((void *)(a) != NULL) { \
@@ -1206,6 +1211,7 @@ struct pf_status {
u_int64_t lcounters[LCNT_MAX]; /* limit counters */
u_int64_t fcounters[FCNT_MAX];
u_int64_t scounters[SCNT_MAX];
+ u_int64_t ncounters[NCNT_MAX];
u_int64_t pcounters[2][2][3];
u_int64_t bcounters[2][2];
u_int64_t stateid;
@@ -1215,6 +1221,7 @@ struct pf_status {
u_int32_t states;
u_int32_t states_halfopen;
u_int32_t src_nodes;
+ u_int32_t fragments;
u_int32_t debug;
u_int32_t hostid;
u_int32_t reass; /* reassembly */
diff --git a/sys/net/pfvar_priv.h b/sys/net/pfvar_priv.h
index f06b7f6fab3..7a420bfa308 100644
--- a/sys/net/pfvar_priv.h
+++ b/sys/net/pfvar_priv.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pfvar_priv.h,v 1.35 2024/01/01 22:16:51 bluhm Exp $ */
+/* $OpenBSD: pfvar_priv.h,v 1.36 2024/04/22 13:30:22 bluhm Exp $ */
/*
* Copyright (c) 2001 Daniel Hartmeier
@@ -370,6 +370,7 @@ void pf_state_unref(struct pf_state *);
extern struct rwlock pf_lock;
extern struct rwlock pf_state_lock;
+extern struct mutex pf_frag_mtx;
extern struct mutex pf_inp_mtx;
#define PF_LOCK() do { \
@@ -415,6 +416,9 @@ extern struct mutex pf_inp_mtx;
rw_status(&pf_state_lock), __func__);\
} while (0)
+#define PF_FRAG_LOCK() mtx_enter(&pf_frag_mtx)
+#define PF_FRAG_UNLOCK() mtx_leave(&pf_frag_mtx)
+
/* for copies to/from network byte order */
void pf_state_peer_hton(const struct pf_state_peer *,
struct pfsync_state_peer *);