summaryrefslogtreecommitdiff
path: root/sys/net
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2019-11-07 08:03:19 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2019-11-07 08:03:19 +0000
commit21d5fb1156f1a9b50d8577aa316ed3939e987877 (patch)
treeb2813c73991e91c1a244b2c3c8e9119912203763 /sys/net
parent37888cbb216c3b0a4b8628e4f5b1f49605c2ff56 (diff)
serialise hook adds and dels with a mutex instead of an implicit NET_LOCK.
i had NET_ASSERT_LOCKED() in the hook add and remove operations, because that's what's held when the hooks are run. some callers do not hold the NET_LOCK when calling them though, eg, bridge(4). aggr and tpmr used to not hold NET_LOCK while being destroyed, which also caused the asserts to fire, so i moved the port destroys inside NET_LOCK, but now I have deadlocks with some barrier calls. the hooks having their own lock means callers don't have to hold NET_LOCK and the list will stay sane. the code that runs the hooks gives up the mutex when calling the hook, but keeps track of where it's up to bey putting a cursor in the list. there's a single global mutex for all the interface linkstate and detach hooks, but this stuff isn't a hot path by any stretch of the imagination. based on (a lot of) testing by hrvoje popovski. thank you.
Diffstat (limited to 'sys/net')
-rw-r--r--sys/net/if.c59
1 files changed, 46 insertions, 13 deletions
diff --git a/sys/net/if.c b/sys/net/if.c
index 0c96e305224..9c029556acb 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if.c,v 1.590 2019/11/07 07:36:31 dlg Exp $ */
+/* $OpenBSD: if.c,v 1.591 2019/11/07 08:03:18 dlg Exp $ */
/* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */
/*
@@ -228,6 +228,10 @@ TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head);
LIST_HEAD(, if_clone) if_cloners = LIST_HEAD_INITIALIZER(if_cloners);
int if_cloners_count;
+/* hooks should only be added, deleted, and run from a process context */
+struct mutex if_hooks_mtx = MUTEX_INITIALIZER(IPL_NONE);
+void if_hooks_run(struct task_list *);
+
struct timeout net_tick_to;
void net_tick(void *);
int net_livelocked(void);
@@ -1040,10 +1044,39 @@ if_netisr(void *unused)
}
void
-if_deactivate(struct ifnet *ifp)
+if_hooks_run(struct task_list *hooks)
{
- struct task *t, *nt;
+ struct task *t, *nt, cursor;
+ void (*func)(void *);
+ void *arg;
+
+ /*
+ * holding the NET_LOCK guarantees that concurrent if_hooks_run
+ * calls can't happen, and they therefore can't try and call
+ * each others cursors as actual hooks.
+ */
+ NET_ASSERT_LOCKED();
+
+ mtx_enter(&if_hooks_mtx);
+ for (t = TAILQ_FIRST(hooks); t != NULL; t = nt) {
+ func = t->t_func;
+ arg = t->t_arg;
+
+ TAILQ_INSERT_AFTER(hooks, t, &cursor, t_entry);
+ mtx_leave(&if_hooks_mtx);
+
+ (*func)(arg);
+
+ mtx_enter(&if_hooks_mtx);
+ nt = TAILQ_NEXT(&cursor, t_entry); /* avoid _Q_INVALIDATE */
+ TAILQ_REMOVE(hooks, &cursor, t_entry);
+ }
+ mtx_leave(&if_hooks_mtx);
+}
+void
+if_deactivate(struct ifnet *ifp)
+{
/*
* Call detach hooks from head to tail. To make sure detach
* hooks are executed in the reverse order they were added, all
@@ -1051,23 +1084,24 @@ if_deactivate(struct ifnet *ifp)
*/
NET_LOCK();
- TAILQ_FOREACH_SAFE(t, &ifp->if_detachhooks, t_entry, nt)
- (*t->t_func)(t->t_arg);
+ if_hooks_run(&ifp->if_detachhooks);
NET_UNLOCK();
}
void
if_detachhook_add(struct ifnet *ifp, struct task *t)
{
- NET_ASSERT_LOCKED();
+ mtx_enter(&if_hooks_mtx);
TAILQ_INSERT_HEAD(&ifp->if_detachhooks, t, t_entry);
+ mtx_leave(&if_hooks_mtx);
}
void
if_detachhook_del(struct ifnet *ifp, struct task *t)
{
- NET_ASSERT_LOCKED();
+ mtx_enter(&if_hooks_mtx);
TAILQ_REMOVE(&ifp->if_detachhooks, t, t_entry);
+ mtx_leave(&if_hooks_mtx);
}
/*
@@ -1643,29 +1677,28 @@ if_linkstate_task(void *xifidx)
void
if_linkstate(struct ifnet *ifp)
{
- struct task *t, *nt;
-
NET_ASSERT_LOCKED();
rtm_ifchg(ifp);
rt_if_track(ifp);
- TAILQ_FOREACH_SAFE(t, &ifp->if_linkstatehooks, t_entry, nt)
- (*t->t_func)(t->t_arg);
+ if_hooks_run(&ifp->if_linkstatehooks);
}
void
if_linkstatehook_add(struct ifnet *ifp, struct task *t)
{
- NET_ASSERT_LOCKED();
+ mtx_enter(&if_hooks_mtx);
TAILQ_INSERT_TAIL(&ifp->if_linkstatehooks, t, t_entry);
+ mtx_leave(&if_hooks_mtx);
}
void
if_linkstatehook_del(struct ifnet *ifp, struct task *t)
{
- NET_ASSERT_LOCKED();
+ mtx_enter(&if_hooks_mtx);
TAILQ_REMOVE(&ifp->if_linkstatehooks, t, t_entry);
+ mtx_leave(&if_hooks_mtx);
}
/*