summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2011-05-10 01:10:09 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2011-05-10 01:10:09 +0000
commita98d1c5a81faf275a42a852b4d5e22d8e17b8219 (patch)
treee5d5b93ab43fdad32b846264ab047ef074940088
parentf10e89cb5a708d686ab2cc41111518f616306f8e (diff)
when undeferring a packet, try to timeout_del first to check if you
actually removed it from the timeout wheel before releasing it. if timeout_del returns 0 then you know the timeout is about to run or is already running, meaning it will free itself so you dont have to. this handling is only done for the undefer paths at SOFTNET since it is higher than SOFTCLOCK which timeouts run from. it is possible for a timeout to start running at softclock and get interrupted by softnet. the undefer in process context blocks both these interrupts while it undefers, so it is impossible for the timeout to run and cause the list to be in this inconsistent state.
-rw-r--r--sys/net/if_pfsync.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/sys/net/if_pfsync.c b/sys/net/if_pfsync.c
index eb547afc4f3..98eedd649b6 100644
--- a/sys/net/if_pfsync.c
+++ b/sys/net/if_pfsync.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_pfsync.c,v 1.162 2011/04/02 17:16:34 dlg Exp $ */
+/* $OpenBSD: if_pfsync.c,v 1.163 2011/05/10 01:10:08 dlg Exp $ */
/*
* Copyright (c) 2002 Michael Shalayeff
@@ -345,6 +345,7 @@ int
pfsync_clone_destroy(struct ifnet *ifp)
{
struct pfsync_softc *sc = ifp->if_softc;
+ struct pfsync_deferral *pd;
int s;
timeout_del(&sc->sc_bulk_tmo);
@@ -358,8 +359,11 @@ pfsync_clone_destroy(struct ifnet *ifp)
pfsync_drop(sc);
s = splsoftnet();
- while (sc->sc_deferred > 0)
- pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
+ while (sc->sc_deferred > 0) {
+ pd = TAILQ_FIRST(&sc->sc_deferrals);
+ timeout_del(&pd->pd_tmo);
+ pfsync_undefer(pd, 0);
+ }
splx(s);
pool_destroy(&sc->sc_pool);
@@ -1712,8 +1716,11 @@ pfsync_defer(struct pf_state *st, struct mbuf *m)
m->m_flags & (M_BCAST|M_MCAST))
return (0);
- if (sc->sc_deferred >= 128)
- pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
+ if (sc->sc_deferred >= 128) {
+ pd = TAILQ_FIRST(&sc->sc_deferrals);
+ if (timeout_del(&pd->pd_tmo))
+ pfsync_undefer(pd, 0);
+ }
pd = pool_get(&sc->sc_pool, M_NOWAIT);
if (pd == NULL)
@@ -1743,7 +1750,6 @@ pfsync_undefer(struct pfsync_deferral *pd, int drop)
splsoftassert(IPL_SOFTNET);
- timeout_del(&pd->pd_tmo); /* bah */
TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
sc->sc_deferred--;
@@ -1788,7 +1794,8 @@ pfsync_deferred(struct pf_state *st, int drop)
TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
if (pd->pd_st == st) {
- pfsync_undefer(pd, drop);
+ if (timeout_del(&pd->pd_tmo))
+ pfsync_undefer(pd, drop);
return;
}
}