diff options
author | David Gwynne <dlg@cvs.openbsd.org> | 2015-11-20 03:35:24 +0000 |
---|---|---|
committer | David Gwynne <dlg@cvs.openbsd.org> | 2015-11-20 03:35:24 +0000 |
commit | 93d7f55b038092ded0514b3f745fd014c1845492 (patch) | |
tree | da8fbd80cf246b5a4ebcdf952d9bb7b92cbe5e19 /sys/net | |
parent | d3a33e8102f3bbde40bab36e7fc5067836baf064 (diff) |
shuffle struct ifqueue so in flight mbufs are protected by a mutex.
the code is refactored so the IFQ macros call newly implemented ifq
functions. the ifq code is split so each discipline (priq and hfsc
in our case) is an opaque set of operations that the common ifq
code can call. the common code does the locking, accounting (ifq_len
manipulation), and freeing of the mbuf if the disciplines enqueue
function rejects it. theyre kind of like bufqs in the block layer
with their fifo and nscan disciplines.
the new api also supports atomic switching of disciplines at runtime.
the hfsc setup in pf_ioctl.c has been tweaked to build a complete
hfsc_if structure which it attaches to the send queue in a single
operation, rather than attaching to the interface up front and
building up a list of queues.
the send queue is now mutexed, which raises the expectation that
packets can be enqueued or purged on one cpu while another cpu is
dequeueing them in a driver for transmission. a lot of drivers use
IFQ_POLL to peek at an mbuf and attempt to fit it on the ring before
committing to it with a later IFQ_DEQUEUE operation. if the mbuf
gets freed in between the POLL and DEQUEUE operations, fireworks
will ensue.
to avoid this, the ifq api introduces ifq_deq_begin, ifq_deq_rollback,
and ifq_deq_commit. ifq_deq_begin allows a driver to take the ifq
mutex and get a reference to the mbuf they wish to try and tx. if
there's space, they can ifq_deq_commit it to remove the mbuf and
release the mutex. if there's no space, ifq_deq_rollback simply
releases the mutex. this api was developed to make updating the
drivers using IFQ_POLL easy, instead of having to do significant
semantic changes to avoid POLL that we cannot test on all the
hardware.
the common code has been tested pretty hard, and all the driver
modifications are straightforward except for de(4). if that breaks
it can be dealt with later.
ok mpi@ jmatthew@
Diffstat (limited to 'sys/net')
-rw-r--r-- | sys/net/hfsc.c | 327 | ||||
-rw-r--r-- | sys/net/hfsc.h | 19 | ||||
-rw-r--r-- | sys/net/if.c | 334 | ||||
-rw-r--r-- | sys/net/if_tun.c | 46 | ||||
-rw-r--r-- | sys/net/if_var.h | 167 | ||||
-rw-r--r-- | sys/net/pf_if.c | 7 | ||||
-rw-r--r-- | sys/net/pf_ioctl.c | 167 | ||||
-rw-r--r-- | sys/net/pfvar.h | 5 |
8 files changed, 702 insertions, 370 deletions
diff --git a/sys/net/hfsc.c b/sys/net/hfsc.c index 1b0f3752c94..05cd4f9e978 100644 --- a/sys/net/hfsc.c +++ b/sys/net/hfsc.c @@ -1,4 +1,4 @@ -/* $OpenBSD: hfsc.c,v 1.30 2015/11/09 01:06:31 dlg Exp $ */ +/* $OpenBSD: hfsc.c,v 1.31 2015/11/20 03:35:23 dlg Exp $ */ /* * Copyright (c) 2012-2013 Henning Brauer <henning@openbsd.org> @@ -181,11 +181,11 @@ struct hfsc_class { */ struct hfsc_if { struct hfsc_if *hif_next; /* interface state list */ - struct ifqueue *hif_ifq; /* backpointer to ifq */ struct hfsc_class *hif_rootclass; /* root class */ struct hfsc_class *hif_defaultclass; /* default class */ struct hfsc_class **hif_class_tbl; - struct hfsc_class *hif_pollcache; /* cache for poll operation */ + + u_int64_t hif_microtime; /* time at deq_begin */ u_int hif_allocated; /* # of slots in hif_class_tbl */ u_int hif_classes; /* # of classes in the tree */ @@ -206,9 +206,8 @@ int hfsc_class_destroy(struct hfsc_if *, struct hfsc_class *); struct hfsc_class *hfsc_nextclass(struct hfsc_class *); -struct mbuf *hfsc_cl_dequeue(struct hfsc_class *); -struct mbuf *hfsc_cl_poll(struct hfsc_class *); -void hfsc_cl_purge(struct hfsc_if *, struct hfsc_class *); +void hfsc_cl_purge(struct hfsc_if *, struct hfsc_class *, + struct mbuf_list *); void hfsc_deferred(void *); void hfsc_update_cfmin(struct hfsc_class *); @@ -256,6 +255,30 @@ struct hfsc_class *hfsc_clh2cph(struct hfsc_if *, u_int32_t); struct pool hfsc_class_pl, hfsc_internal_sc_pl; +/* + * ifqueue glue. + */ + +void *hfsc_alloc(void *); +void hfsc_free(void *); +int hfsc_enq(struct ifqueue *, struct mbuf *); +struct mbuf *hfsc_deq_begin(struct ifqueue *, void **); +void hfsc_deq_commit(struct ifqueue *, struct mbuf *, void *); +void hfsc_deq_rollback(struct ifqueue *, struct mbuf *, void *); +void hfsc_purge(struct ifqueue *, struct mbuf_list *); + +const struct ifq_ops hfsc_ops = { + hfsc_alloc, + hfsc_free, + hfsc_enq, + hfsc_deq_begin, + hfsc_deq_commit, + hfsc_deq_rollback, + hfsc_purge, +}; + +const struct ifq_ops * const ifq_hfsc_ops = &hfsc_ops; + u_int64_t hfsc_microuptime(void) { @@ -296,64 +319,37 @@ hfsc_initialize(void) { pool_init(&hfsc_class_pl, sizeof(struct hfsc_class), 0, 0, PR_WAITOK, "hfscclass", NULL); + pool_setipl(&hfsc_class_pl, IPL_NONE); pool_init(&hfsc_internal_sc_pl, sizeof(struct hfsc_internal_sc), 0, 0, PR_WAITOK, "hfscintsc", NULL); + pool_setipl(&hfsc_internal_sc_pl, IPL_NONE); } -int -hfsc_attach(struct ifnet *ifp) +struct hfsc_if * +hfsc_pf_alloc(struct ifnet *ifp) { struct hfsc_if *hif; - if (ifp == NULL || ifp->if_snd.ifq_hfsc != NULL) - return (0); + KASSERT(ifp != NULL); - hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK | M_ZERO); + hif = malloc(sizeof(*hif), M_DEVBUF, M_WAITOK | M_ZERO); TAILQ_INIT(&hif->hif_eligible); hif->hif_class_tbl = mallocarray(HFSC_DEFAULT_CLASSES, sizeof(void *), M_DEVBUF, M_WAITOK | M_ZERO); hif->hif_allocated = HFSC_DEFAULT_CLASSES; - hif->hif_ifq = &ifp->if_snd; - ifp->if_snd.ifq_hfsc = hif; - timeout_set(&hif->hif_defer, hfsc_deferred, ifp); - /* XXX HRTIMER don't schedule it yet, only when some packets wait. */ - timeout_add(&hif->hif_defer, 1); - return (0); + return (hif); } int -hfsc_detach(struct ifnet *ifp) +hfsc_pf_addqueue(struct hfsc_if *hif, struct pf_queuespec *q) { - struct hfsc_if *hif; - - if (ifp == NULL) - return (0); - - hif = ifp->if_snd.ifq_hfsc; - timeout_del(&hif->hif_defer); - ifp->if_snd.ifq_hfsc = NULL; - - free(hif->hif_class_tbl, M_DEVBUF, hif->hif_allocated * sizeof(void *)); - free(hif, M_DEVBUF, sizeof(struct hfsc_if)); - - return (0); -} - -int -hfsc_addqueue(struct pf_queuespec *q) -{ - struct hfsc_if *hif; struct hfsc_class *cl, *parent; struct hfsc_sc rtsc, lssc, ulsc; - if (q->kif->pfik_ifp == NULL) - return (0); - - if ((hif = q->kif->pfik_ifp->if_snd.ifq_hfsc) == NULL) - return (EINVAL); + KASSERT(hif != NULL); if (q->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL) @@ -386,61 +382,82 @@ hfsc_addqueue(struct pf_queuespec *q) } int -hfsc_delqueue(struct pf_queuespec *q) -{ - struct hfsc_if *hif; - struct hfsc_class *cl; - - if (q->kif->pfik_ifp == NULL) - return (0); - - if ((hif = q->kif->pfik_ifp->if_snd.ifq_hfsc) == NULL) - return (EINVAL); - - if ((cl = hfsc_clh2cph(hif, q->qid)) == NULL) - return (EINVAL); - - return (hfsc_class_destroy(hif, cl)); -} - -int -hfsc_qstats(struct pf_queuespec *q, void *ubuf, int *nbytes) +hfsc_pf_qstats(struct pf_queuespec *q, void *ubuf, int *nbytes) { + struct ifnet *ifp = q->kif->pfik_ifp; struct hfsc_if *hif; struct hfsc_class *cl; struct hfsc_class_stats stats; int error = 0; - if (q->kif->pfik_ifp == NULL) - return (EBADF); - - if ((hif = q->kif->pfik_ifp->if_snd.ifq_hfsc) == NULL) + if (ifp == NULL) return (EBADF); - if ((cl = hfsc_clh2cph(hif, q->qid)) == NULL) + if (*nbytes < sizeof(stats)) return (EINVAL); - if (*nbytes < sizeof(stats)) + hif = ifq_q_enter(&ifp->if_snd, ifq_hfsc_ops); + if (hif == NULL) + return (EBADF); + + if ((cl = hfsc_clh2cph(hif, q->qid)) == NULL) { + ifq_q_leave(&ifp->if_snd, hif); return (EINVAL); + } hfsc_getclstats(&stats, cl); + ifq_q_leave(&ifp->if_snd, hif); if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0) return (error); + *nbytes = sizeof(stats); return (0); } void -hfsc_purge(struct ifqueue *ifq) +hfsc_pf_free(struct hfsc_if *hif) +{ + hfsc_free(hif); +} + +void * +hfsc_alloc(void *q) +{ + struct hfsc_if *hif = q; + KASSERT(hif != NULL); + + timeout_add(&hif->hif_defer, 1); + return (hif); +} + +void +hfsc_free(void *q) { - struct hfsc_if *hif = ifq->ifq_hfsc; + struct hfsc_if *hif = q; + int i; + + KERNEL_ASSERT_LOCKED(); + + timeout_del(&hif->hif_defer); + + i = hif->hif_allocated; + do + hfsc_class_destroy(hif, hif->hif_class_tbl[--i]); + while (i > 0); + + free(hif->hif_class_tbl, M_DEVBUF, hif->hif_allocated * sizeof(void *)); + free(hif, M_DEVBUF, sizeof(*hif)); +} + +void +hfsc_purge(struct ifqueue *ifq, struct mbuf_list *ml) +{ + struct hfsc_if *hif = ifq->ifq_q; struct hfsc_class *cl; for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) - if (ml_len(&cl->cl_q.q) > 0) - hfsc_cl_purge(hif, cl); - hif->hif_ifq->ifq_len = 0; + hfsc_cl_purge(hif, cl, ml); } struct hfsc_class * @@ -555,9 +572,7 @@ hfsc_class_destroy(struct hfsc_if *hif, struct hfsc_class *cl) return (EBUSY); s = splnet(); - - if (ml_len(&cl->cl_q.q) > 0) - hfsc_cl_purge(hif, cl); + KASSERT(ml_empty(&cl->cl_q.q)); if (cl->cl_parent != NULL) { struct hfsc_class *p = cl->cl_parent->cl_children; @@ -624,9 +639,9 @@ hfsc_nextclass(struct hfsc_class *cl) } int -hfsc_enqueue(struct ifqueue *ifq, struct mbuf *m) +hfsc_enq(struct ifqueue *ifq, struct mbuf *m) { - struct hfsc_if *hif = ifq->ifq_hfsc; + struct hfsc_if *hif = ifq->ifq_q; struct hfsc_class *cl; if ((cl = hfsc_clh2cph(hif, m->m_pkthdr.pf.qid)) == NULL || @@ -638,12 +653,12 @@ hfsc_enqueue(struct ifqueue *ifq, struct mbuf *m) } if (ml_len(&cl->cl_q.q) >= cl->cl_q.qlimit) { - /* drop. mbuf needs to be freed */ + /* drop occurred. mbuf needs to be freed */ PKTCNTR_INC(&cl->cl_stats.drop_cnt, m->m_pkthdr.len); return (ENOBUFS); } + ml_enqueue(&cl->cl_q.q, m); - ifq->ifq_len++; m->m_pkthdr.pf.prio = IFQ_MAXPRIO; /* successfully queued. */ @@ -654,71 +669,68 @@ hfsc_enqueue(struct ifqueue *ifq, struct mbuf *m) } struct mbuf * -hfsc_dequeue(struct ifqueue *ifq, int remove) +hfsc_deq_begin(struct ifqueue *ifq, void **cookiep) { - struct hfsc_if *hif = ifq->ifq_hfsc; + struct hfsc_if *hif = ifq->ifq_q; struct hfsc_class *cl, *tcl; struct mbuf *m; - int next_len, realtime = 0; u_int64_t cur_time; - if (IFQ_LEN(ifq) == 0) - return (NULL); - cur_time = hfsc_microuptime(); - if (remove && hif->hif_pollcache != NULL) { - cl = hif->hif_pollcache; - hif->hif_pollcache = NULL; - /* check if the class was scheduled by real-time criteria */ - if (cl->cl_rsc != NULL) - realtime = (cl->cl_e <= cur_time); - } else { + /* + * if there are eligible classes, use real-time criteria. + * find the class with the minimum deadline among + * the eligible classes. + */ + cl = hfsc_ellist_get_mindl(hif, cur_time); + if (cl == NULL) { /* - * if there are eligible classes, use real-time criteria. - * find the class with the minimum deadline among - * the eligible classes. + * use link-sharing criteria + * get the class with the minimum vt in the hierarchy */ - if ((cl = hfsc_ellist_get_mindl(hif, cur_time)) != NULL) { - realtime = 1; - } else { + cl = NULL; + tcl = hif->hif_rootclass; + + while (tcl != NULL && tcl->cl_children != NULL) { + tcl = hfsc_actlist_firstfit(tcl, cur_time); + if (tcl == NULL) + continue; + /* - * use link-sharing criteria - * get the class with the minimum vt in the hierarchy + * update parent's cl_cvtmin. + * don't update if the new vt is smaller. */ - cl = NULL; - tcl = hif->hif_rootclass; + if (tcl->cl_parent->cl_cvtmin < tcl->cl_vt) + tcl->cl_parent->cl_cvtmin = tcl->cl_vt; - while (tcl != NULL && tcl->cl_children != NULL) { - tcl = hfsc_actlist_firstfit(tcl, cur_time); - if (tcl == NULL) - continue; + cl = tcl; + } + /* XXX HRTIMER plan hfsc_deferred precisely here. */ + if (cl == NULL) + return (NULL); + } - /* - * update parent's cl_cvtmin. - * don't update if the new vt is smaller. - */ - if (tcl->cl_parent->cl_cvtmin < tcl->cl_vt) - tcl->cl_parent->cl_cvtmin = tcl->cl_vt; + m = ml_dequeue(&cl->cl_q.q); + KASSERT(m != NULL); - cl = tcl; - } - /* XXX HRTIMER plan hfsc_deferred precisely here. */ - if (cl == NULL) - return (NULL); - } + hif->hif_microtime = cur_time; + *cookiep = cl; + return (m); +} - if (!remove) { - hif->hif_pollcache = cl; - m = hfsc_cl_poll(cl); - return (m); - } - } +void +hfsc_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie) +{ + struct hfsc_if *hif = ifq->ifq_q; + struct hfsc_class *cl = cookie; + int next_len, realtime = 0; + u_int64_t cur_time = hif->hif_microtime; - if ((m = hfsc_cl_dequeue(cl)) == NULL) - panic("hfsc_dequeue"); + /* check if the class was scheduled by real-time criteria */ + if (cl->cl_rsc != NULL) + realtime = (cl->cl_e <= cur_time); - ifq->ifq_len--; PKTCNTR_INC(&cl->cl_stats.xmit_cnt, m->m_pkthdr.len); hfsc_update_vf(cl, m->m_pkthdr.len, cur_time); @@ -739,51 +751,49 @@ hfsc_dequeue(struct ifqueue *ifq, int remove) /* the class becomes passive */ hfsc_set_passive(hif, cl); } +} - return (m); +void +hfsc_deq_rollback(struct ifqueue *ifq, struct mbuf *m, void *cookie) +{ + struct hfsc_class *cl = cookie; + + ml_requeue(&cl->cl_q.q, m); } void hfsc_deferred(void *arg) { struct ifnet *ifp = arg; + struct hfsc_if *hif; int s; + KERNEL_ASSERT_LOCKED(); + KASSERT(HFSC_ENABLED(&ifp->if_snd)); + s = splnet(); - if (HFSC_ENABLED(&ifp->if_snd) && !IFQ_IS_EMPTY(&ifp->if_snd)) + if (!IFQ_IS_EMPTY(&ifp->if_snd)) if_start(ifp); splx(s); - /* XXX HRTIMER nearest virtual/fit time is likely less than 1/HZ. */ - timeout_add(&ifp->if_snd.ifq_hfsc->hif_defer, 1); -} - -struct mbuf * -hfsc_cl_dequeue(struct hfsc_class *cl) -{ - return (ml_dequeue(&cl->cl_q.q)); -} + hif = ifp->if_snd.ifq_q; -struct mbuf * -hfsc_cl_poll(struct hfsc_class *cl) -{ - /* XXX */ - return (cl->cl_q.q.ml_head); + /* XXX HRTIMER nearest virtual/fit time is likely less than 1/HZ. */ + timeout_add(&hif->hif_defer, 1); } void -hfsc_cl_purge(struct hfsc_if *hif, struct hfsc_class *cl) +hfsc_cl_purge(struct hfsc_if *hif, struct hfsc_class *cl, struct mbuf_list *ml) { struct mbuf *m; if (ml_empty(&cl->cl_q.q)) return; - while ((m = hfsc_cl_dequeue(cl)) != NULL) { + MBUF_LIST_FOREACH(&cl->cl_q.q, m) PKTCNTR_INC(&cl->cl_stats.drop_cnt, m->m_pkthdr.len); - m_freem(m); - hif->hif_ifq->ifq_len--; - } + + ml_enlist(ml, &cl->cl_q.q); hfsc_update_vf(cl, 0, 0); /* remove cl from the actlist */ hfsc_set_passive(hif, cl); @@ -1544,25 +1554,4 @@ hfsc_clh2cph(struct hfsc_if *hif, u_int32_t chandle) return (cl); return (NULL); } - -#else /* NPF > 0 */ - -void -hfsc_purge(struct ifqueue *q) -{ - panic("hfsc_purge called on hfsc-less kernel"); -} - -int -hfsc_enqueue(struct ifqueue *q, struct mbuf *m) -{ - panic("hfsc_enqueue called on hfsc-less kernel"); -} - -struct mbuf * -hfsc_dequeue(struct ifqueue *q, int i) -{ - panic("hfsc_enqueue called on hfsc-less kernel"); -} - #endif diff --git a/sys/net/hfsc.h b/sys/net/hfsc.h index ae746e50ad0..544d9df6259 100644 --- a/sys/net/hfsc.h +++ b/sys/net/hfsc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: hfsc.h,v 1.10 2015/11/09 01:06:31 dlg Exp $ */ +/* $OpenBSD: hfsc.h,v 1.11 2015/11/20 03:35:23 dlg Exp $ */ /* * Copyright (c) 2012-2013 Henning Brauer <henning@openbsd.org> @@ -112,19 +112,18 @@ struct ifqueue; struct pf_queuespec; struct hfsc_if; -#define HFSC_ENABLED(ifq) ((ifq)->ifq_hfsc != NULL) +extern const struct ifq_ops * const ifq_hfsc_ops; + +#define HFSC_ENABLED(ifq) ((ifq)->ifq_ops == ifq_hfsc_ops) #define HFSC_DEFAULT_QLIMIT 50 +struct hfsc_if *hfsc_pf_alloc(struct ifnet *); +int hfsc_pf_addqueue(struct hfsc_if *, struct pf_queuespec *); +void hfsc_pf_free(struct hfsc_if *); +int hfsc_pf_qstats(struct pf_queuespec *, void *, int *); + void hfsc_initialize(void); -int hfsc_attach(struct ifnet *); -int hfsc_detach(struct ifnet *); -void hfsc_purge(struct ifqueue *); -int hfsc_enqueue(struct ifqueue *, struct mbuf *); -struct mbuf *hfsc_dequeue(struct ifqueue *, int); u_int64_t hfsc_microuptime(void); -int hfsc_addqueue(struct pf_queuespec *); -int hfsc_delqueue(struct pf_queuespec *); -int hfsc_qstats(struct pf_queuespec *, void *, int *); #endif /* _KERNEL */ #endif /* _HFSC_H_ */ diff --git a/sys/net/if.c b/sys/net/if.c index 2f4d0399f6f..75b3e96b7f8 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if.c,v 1.407 2015/11/18 13:58:02 mpi Exp $ */ +/* $OpenBSD: if.c,v 1.408 2015/11/20 03:35:23 dlg Exp $ */ /* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */ /* @@ -397,9 +397,6 @@ if_attachsetup(struct ifnet *ifp) if_addgroup(ifp, IFG_ALL); - if (ifp->if_snd.ifq_maxlen == 0) - IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); - if_attachdomain(ifp); #if NPF > 0 pfi_attach_ifnet(ifp); @@ -510,6 +507,8 @@ if_attach_common(struct ifnet *ifp) TAILQ_INIT(&ifp->if_addrlist); TAILQ_INIT(&ifp->if_maddrlist); + ifq_init(&ifp->if_snd); + ifp->if_addrhooks = malloc(sizeof(*ifp->if_addrhooks), M_TEMP, M_WAITOK); TAILQ_INIT(ifp->if_addrhooks); @@ -538,7 +537,7 @@ if_start(struct ifnet *ifp) splassert(IPL_NET); - if (ifp->if_snd.ifq_len >= min(8, ifp->if_snd.ifq_maxlen) && + if (ifq_len(&ifp->if_snd) >= min(8, ifp->if_snd.ifq_maxlen) && !ISSET(ifp->if_flags, IFF_OACTIVE)) { if (ISSET(ifp->if_xflags, IFXF_TXREADY)) { TAILQ_REMOVE(&iftxlist, ifp, if_txlist); @@ -783,8 +782,6 @@ if_input_process(void *xmq) s = splnet(); while ((m = ml_dequeue(&ml)) != NULL) { - sched_pause(); - ifp = if_get(m->m_pkthdr.ph_ifidx); if (ifp == NULL) { m_freem(m); @@ -942,6 +939,8 @@ if_detach(struct ifnet *ifp) if_idxmap_remove(ifp); splx(s); + + ifq_destroy(&ifp->if_snd); } /* @@ -2725,6 +2724,327 @@ niq_enlist(struct niqueue *niq, struct mbuf_list *ml) return (rv); } +/* + * send queues. + */ + +void *priq_alloc(void *); +void priq_free(void *); +int priq_enq(struct ifqueue *, struct mbuf *); +struct mbuf *priq_deq_begin(struct ifqueue *, void **); +void priq_deq_commit(struct ifqueue *, struct mbuf *, void *); +void priq_deq_rollback(struct ifqueue *, struct mbuf *, void *); +void priq_purge(struct ifqueue *, struct mbuf_list *); + +const struct ifq_ops priq_ops = { + priq_alloc, + priq_free, + priq_enq, + priq_deq_begin, + priq_deq_commit, + priq_deq_rollback, + priq_purge, +}; + +const struct ifq_ops * const ifq_priq_ops = &priq_ops; + +struct priq_list { + struct mbuf *head; + struct mbuf *tail; +}; + +struct priq { + struct priq_list pq_lists[IFQ_NQUEUES]; +}; + +void * +priq_alloc(void *null) +{ + return (malloc(sizeof(struct priq), M_DEVBUF, M_WAITOK | M_ZERO)); +} + +void +priq_free(void *pq) +{ + free(pq, M_DEVBUF, sizeof(struct priq)); +} + +int +priq_enq(struct ifqueue *ifq, struct mbuf *m) +{ + struct priq *pq; + struct priq_list *pl; + + if (ifq_len(ifq) >= ifq->ifq_maxlen) + return (ENOBUFS); + + pq = ifq->ifq_q; + KASSERT(m->m_pkthdr.pf.prio < IFQ_MAXPRIO); + pl = &pq->pq_lists[m->m_pkthdr.pf.prio]; + + m->m_nextpkt = NULL; + if (pl->tail == NULL) + pl->head = m; + else + pl->tail->m_nextpkt = m; + pl->tail = m; + + return (0); +} + +struct mbuf * +priq_deq_begin(struct ifqueue *ifq, void **cookiep) +{ + struct priq *pq = ifq->ifq_q; + struct priq_list *pl; + unsigned int prio = nitems(pq->pq_lists); + struct mbuf *m; + + do { + pl = &pq->pq_lists[--prio]; + m = pl->head; + if (m != NULL) { + *cookiep = pl; + return (m); + } + } while (prio > 0); + + return (NULL); +} + +void +priq_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie) +{ + struct priq_list *pl = cookie; + + KASSERT(pl->head == m); + + pl->head = m->m_nextpkt; + m->m_nextpkt = NULL; + + if (pl->head == NULL) + pl->tail = NULL; +} + +void +priq_deq_rollback(struct ifqueue *ifq, struct mbuf *m, void *cookie) +{ +#ifdef DIAGNOSTIC + struct priq_list *pl = cookie; + + KASSERT(pl->head == m); +#endif +} + +void +priq_purge(struct ifqueue *ifq, struct mbuf_list *ml) +{ + struct priq *pq = ifq->ifq_q; + struct priq_list *pl; + unsigned int prio = nitems(pq->pq_lists); + struct mbuf *m, *n; + + do { + pl = &pq->pq_lists[--prio]; + + for (m = pl->head; m != NULL; m = n) { + n = m->m_nextpkt; + ml_enqueue(ml, m); + } + + pl->head = pl->tail = NULL; + } while (prio > 0); +} + +int +ifq_enqueue_try(struct ifqueue *ifq, struct mbuf *m) +{ + int rv; + + mtx_enter(&ifq->ifq_mtx); + rv = ifq->ifq_ops->ifqop_enq(ifq, m); + if (rv == 0) + ifq->ifq_len++; + else + ifq->ifq_drops++; + mtx_leave(&ifq->ifq_mtx); + + return (rv); +} + +int +ifq_enq(struct ifqueue *ifq, struct mbuf *m) +{ + int err; + + err = ifq_enqueue_try(ifq, m); + if (err != 0) + m_freem(m); + + return (err); +} + +struct mbuf * +ifq_deq_begin(struct ifqueue *ifq) +{ + struct mbuf *m = NULL; + void *cookie; + + mtx_enter(&ifq->ifq_mtx); + if (ifq->ifq_len == 0 || + (m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie)) == NULL) { + mtx_leave(&ifq->ifq_mtx); + return (NULL); + } + + m->m_pkthdr.ph_cookie = cookie; + + return (m); +} + +void +ifq_deq_commit(struct ifqueue *ifq, struct mbuf *m) +{ + void *cookie; + + KASSERT(m != NULL); + cookie = m->m_pkthdr.ph_cookie; + + ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie); + ifq->ifq_len--; + mtx_leave(&ifq->ifq_mtx); +} + +void +ifq_deq_rollback(struct ifqueue *ifq, struct mbuf *m) +{ + void *cookie; + + KASSERT(m != NULL); + cookie = m->m_pkthdr.ph_cookie; + + ifq->ifq_ops->ifqop_deq_rollback(ifq, m, cookie); + mtx_leave(&ifq->ifq_mtx); +} + +struct mbuf * +ifq_deq(struct ifqueue *ifq) +{ + struct mbuf *m; + + m = ifq_deq_begin(ifq); + if (m == NULL) + return (NULL); + + ifq_deq_commit(ifq, m); + + return (m); +} + +unsigned int +ifq_purge(struct ifqueue *ifq) +{ + struct mbuf_list ml = MBUF_LIST_INITIALIZER(); + unsigned int rv; + + mtx_enter(&ifq->ifq_mtx); + ifq->ifq_ops->ifqop_purge(ifq, &ml); + rv = ifq->ifq_len; + ifq->ifq_len = 0; + ifq->ifq_drops += rv; + mtx_leave(&ifq->ifq_mtx); + + KASSERT(rv == ml_len(&ml)); + + ml_purge(&ml); + + return (rv); +} + +void +ifq_init(struct ifqueue *ifq) +{ + mtx_init(&ifq->ifq_mtx, IPL_NET); + ifq->ifq_drops = 0; + + /* default to priq */ + ifq->ifq_ops = &priq_ops; + ifq->ifq_q = priq_ops.ifqop_alloc(NULL); + + ifq->ifq_serializer = 0; + ifq->ifq_len = 0; + + if (ifq->ifq_maxlen == 0) + ifq_set_maxlen(ifq, IFQ_MAXLEN); +} + +void +ifq_attach(struct ifqueue *ifq, const struct ifq_ops *newops, void *opsarg) +{ + struct mbuf_list ml = MBUF_LIST_INITIALIZER(); + struct mbuf_list free_ml = MBUF_LIST_INITIALIZER(); + struct mbuf *m; + const struct ifq_ops *oldops; + void *newq, *oldq; + + newq = newops->ifqop_alloc(opsarg); + + mtx_enter(&ifq->ifq_mtx); + ifq->ifq_ops->ifqop_purge(ifq, &ml); + ifq->ifq_len = 0; + + oldops = ifq->ifq_ops; + oldq = ifq->ifq_q; + + ifq->ifq_ops = newops; + ifq->ifq_q = newq; + + while ((m = ml_dequeue(&ml)) != NULL) { + if (ifq->ifq_ops->ifqop_enq(ifq, m) != 0) { + ifq->ifq_drops++; + ml_enqueue(&free_ml, m); + } else + ifq->ifq_len++; + } + mtx_leave(&ifq->ifq_mtx); + + oldops->ifqop_free(oldq); + + ml_purge(&free_ml); +} + +void * +ifq_q_enter(struct ifqueue *ifq, const struct ifq_ops *ops) +{ + mtx_enter(&ifq->ifq_mtx); + if (ifq->ifq_ops == ops) + return (ifq->ifq_q); + + mtx_leave(&ifq->ifq_mtx); + + return (NULL); +} + +void +ifq_q_leave(struct ifqueue *ifq, void *q) +{ + KASSERT(q == ifq->ifq_q); + mtx_leave(&ifq->ifq_mtx); +} + +void +ifq_destroy(struct ifqueue *ifq) +{ + struct mbuf_list ml = MBUF_LIST_INITIALIZER(); + + /* don't need to lock because this is the last use of the ifq */ + + ifq->ifq_ops->ifqop_purge(ifq, &ml); + ifq->ifq_ops->ifqop_free(ifq->ifq_q); + + ml_purge(&ml); +} + __dead void unhandled_af(int af) { diff --git a/sys/net/if_tun.c b/sys/net/if_tun.c index 6f0dff68a9d..948a0f0f296 100644 --- a/sys/net/if_tun.c +++ b/sys/net/if_tun.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_tun.c,v 1.159 2015/10/25 12:05:40 mpi Exp $ */ +/* $OpenBSD: if_tun.c,v 1.160 2015/11/20 03:35:23 dlg Exp $ */ /* $NetBSD: if_tun.c,v 1.24 1996/05/07 02:40:48 thorpej Exp $ */ /* @@ -685,10 +685,11 @@ tun_dev_ioctl(struct tun_softc *tp, u_long cmd, caddr_t data, int flag, tp->tun_flags &= ~TUN_ASYNC; break; case FIONREAD: - IFQ_POLL(&tp->tun_if.if_snd, m); - if (m != NULL) + m = ifq_deq_begin(&tp->tun_if.if_snd); + if (m != NULL) { *(int *)data = m->m_pkthdr.len; - else + ifq_deq_rollback(&tp->tun_if.if_snd, m); + } else *(int *)data = 0; break; case TIOCSPGRP: @@ -810,6 +811,14 @@ tun_dev_read(struct tun_softc *tp, struct uio *uio, int ioflag) } while (m0 == NULL); splx(s); + if (tp->tun_flags & TUN_LAYER2) { +#if NBPFILTER > 0 + if (ifp->if_bpf) + bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); +#endif + ifp->if_opackets++; + } + while (m0 != NULL && uio->uio_resid > 0 && error == 0) { len = min(uio->uio_resid, m0->m_len); if (len != 0) @@ -1007,7 +1016,7 @@ tun_dev_poll(struct tun_softc *tp, int events, struct proc *p) { int revents, s; struct ifnet *ifp; - struct mbuf *m; + unsigned int len; ifp = &tp->tun_if; revents = 0; @@ -1015,10 +1024,9 @@ tun_dev_poll(struct tun_softc *tp, int events, struct proc *p) TUNDEBUG(("%s: tunpoll\n", ifp->if_xname)); if (events & (POLLIN | POLLRDNORM)) { - IFQ_POLL(&ifp->if_snd, m); - if (m != NULL) { - TUNDEBUG(("%s: tunselect q=%d\n", ifp->if_xname, - IFQ_LEN(ifp->if_snd))); + len = IFQ_LEN(&ifp->if_snd); + if (len > 0) { + TUNDEBUG(("%s: tunselect q=%d\n", ifp->if_xname, len)); revents |= events & (POLLIN | POLLRDNORM); } else { TUNDEBUG(("%s: tunpoll waiting\n", ifp->if_xname)); @@ -1114,7 +1122,7 @@ filt_tunread(struct knote *kn, long hint) int s; struct tun_softc *tp; struct ifnet *ifp; - struct mbuf *m; + unsigned int len; if (kn->kn_status & KN_DETACHED) { kn->kn_data = 0; @@ -1125,10 +1133,10 @@ filt_tunread(struct knote *kn, long hint) ifp = &tp->tun_if; s = splnet(); - IFQ_POLL(&ifp->if_snd, m); - if (m != NULL) { + len = IFQ_LEN(&ifp->if_snd); + if (len > 0) { splx(s); - kn->kn_data = IFQ_LEN(&ifp->if_snd); + kn->kn_data = len; TUNDEBUG(("%s: tunkqread q=%d\n", ifp->if_xname, IFQ_LEN(&ifp->if_snd))); @@ -1175,21 +1183,11 @@ void tun_start(struct ifnet *ifp) { struct tun_softc *tp = ifp->if_softc; - struct mbuf *m; splassert(IPL_NET); - IFQ_POLL(&ifp->if_snd, m); - if (m != NULL) { - if (tp->tun_flags & TUN_LAYER2) { -#if NBPFILTER > 0 - if (ifp->if_bpf) - bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); -#endif - ifp->if_opackets++; - } + if (IFQ_LEN(&ifp->if_snd)) tun_wakeup(tp); - } } void diff --git a/sys/net/if_var.h b/sys/net/if_var.h index abc6af69dbc..470e5543f99 100644 --- a/sys/net/if_var.h +++ b/sys/net/if_var.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_var.h,v 1.53 2015/11/18 13:58:02 mpi Exp $ */ +/* $OpenBSD: if_var.h,v 1.54 2015/11/20 03:35:23 dlg Exp $ */ /* $NetBSD: if.h,v 1.23 1996/05/07 02:40:27 thorpej Exp $ */ /* @@ -98,17 +98,33 @@ struct if_clone { { { 0 }, name, sizeof(name) - 1, create, destroy } /* - * Structure defining a queue for a network interface. + * Structure defining the send queue for a network interface. */ -struct ifqueue { - struct { - struct mbuf *head; - struct mbuf *tail; - } ifq_q[IFQ_NQUEUES]; - int ifq_len; - int ifq_maxlen; - int ifq_drops; - struct hfsc_if *ifq_hfsc; + +struct ifqueue; + +struct ifq_ops { + void *(*ifqop_alloc)(void *); + void (*ifqop_free)(void *); + int (*ifqop_enq)(struct ifqueue *, struct mbuf *); + struct mbuf *(*ifqop_deq_begin)(struct ifqueue *, void **); + void (*ifqop_deq_commit)(struct ifqueue *, + struct mbuf *, void *); + void (*ifqop_deq_rollback)(struct ifqueue *, + struct mbuf *, void *); + void (*ifqop_purge)(struct ifqueue *, + struct mbuf_list *); +}; + +struct ifqueue { + struct mutex ifq_mtx; + uint64_t ifq_drops; + const struct ifq_ops *ifq_ops; + void *ifq_q; + unsigned int ifq_len; + unsigned int ifq_serializer; + + unsigned int ifq_maxlen; }; /* @@ -256,121 +272,55 @@ struct ifg_list { }; #ifdef _KERNEL -#define IFQ_MAXLEN 256 -#define IFNET_SLOWHZ 1 /* granularity is 1 second */ - /* - * Output queues (ifp->if_snd) and internetwork datagram level (pup level 1) - * input routines have queues of messages stored on ifqueue structures - * (defined above). Entries are added to and deleted from these structures - * by these macros, which should be called with ipl raised to splnet(). + * Interface send queues. */ -#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) -#define IF_DROP(ifq) ((ifq)->ifq_drops++) -#define IF_ENQUEUE(ifq, m) \ -do { \ - (m)->m_nextpkt = NULL; \ - if ((ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail == NULL) \ - (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].head = m; \ - else \ - (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail->m_nextpkt = m; \ - (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail = m; \ - (ifq)->ifq_len++; \ -} while (/* CONSTCOND */0) -#define IF_PREPEND(ifq, m) \ -do { \ - (m)->m_nextpkt = (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].head; \ - if ((ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail == NULL) \ - (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].tail = (m); \ - (ifq)->ifq_q[(m)->m_pkthdr.pf.prio].head = (m); \ - (ifq)->ifq_len++; \ -} while (/* CONSTCOND */0) -#define IF_POLL(ifq, m) \ -do { \ - int if_dequeue_prio = IFQ_MAXPRIO; \ - do { \ - (m) = (ifq)->ifq_q[if_dequeue_prio].head; \ - } while (!(m) && --if_dequeue_prio >= 0); \ -} while (/* CONSTCOND */0) +void ifq_init(struct ifqueue *); +void ifq_attach(struct ifqueue *, const struct ifq_ops *, void *); +void ifq_destroy(struct ifqueue *); +int ifq_enq_try(struct ifqueue *, struct mbuf *); +int ifq_enq(struct ifqueue *, struct mbuf *); +struct mbuf *ifq_deq_begin(struct ifqueue *); +void ifq_deq_commit(struct ifqueue *, struct mbuf *); +void ifq_deq_rollback(struct ifqueue *, struct mbuf *); +struct mbuf *ifq_deq(struct ifqueue *); +unsigned int ifq_purge(struct ifqueue *); +void *ifq_q_enter(struct ifqueue *, const struct ifq_ops *); +void ifq_q_leave(struct ifqueue *, void *); + +#define ifq_len(_ifq) ((_ifq)->ifq_len) +#define ifq_empty(_ifq) (ifq_len(_ifq) == 0) +#define ifq_set_maxlen(_ifq, _l) ((_ifq)->ifq_maxlen = (_l)) + +extern const struct ifq_ops * const ifq_priq_ops; -#define IF_DEQUEUE(ifq, m) \ -do { \ - int if_dequeue_prio = IFQ_MAXPRIO; \ - do { \ - (m) = (ifq)->ifq_q[if_dequeue_prio].head; \ - if (m) { \ - if (((ifq)->ifq_q[if_dequeue_prio].head = \ - (m)->m_nextpkt) == NULL) \ - (ifq)->ifq_q[if_dequeue_prio].tail = NULL; \ - (m)->m_nextpkt = NULL; \ - (ifq)->ifq_len--; \ - } \ - } while (!(m) && --if_dequeue_prio >= 0); \ -} while (/* CONSTCOND */0) +#define IFQ_MAXLEN 256 +#define IFNET_SLOWHZ 1 /* granularity is 1 second */ -#define IF_PURGE(ifq) \ -do { \ - struct mbuf *__m0; \ - \ - for (;;) { \ - IF_DEQUEUE((ifq), __m0); \ - if (__m0 == NULL) \ - break; \ - else \ - m_freem(__m0); \ - } \ -} while (/* CONSTCOND */0) -#define IF_LEN(ifq) ((ifq)->ifq_len) -#define IF_IS_EMPTY(ifq) ((ifq)->ifq_len == 0) +/* + * IFQ compat on ifq API + */ #define IFQ_ENQUEUE(ifq, m, err) \ do { \ - if (HFSC_ENABLED(ifq)) \ - (err) = hfsc_enqueue(((struct ifqueue *)(ifq)), m); \ - else { \ - if (IF_QFULL((ifq))) { \ - (err) = ENOBUFS; \ - } else { \ - IF_ENQUEUE((ifq), (m)); \ - (err) = 0; \ - } \ - } \ - if ((err)) { \ - m_freem((m)); \ - (ifq)->ifq_drops++; \ - } \ + (err) = ifq_enq((ifq), (m)); \ } while (/* CONSTCOND */0) #define IFQ_DEQUEUE(ifq, m) \ do { \ - if (HFSC_ENABLED((ifq))) \ - (m) = hfsc_dequeue(((struct ifqueue *)(ifq)), 1); \ - else \ - IF_DEQUEUE((ifq), (m)); \ -} while (/* CONSTCOND */0) - -#define IFQ_POLL(ifq, m) \ -do { \ - if (HFSC_ENABLED((ifq))) \ - (m) = hfsc_dequeue(((struct ifqueue *)(ifq)), 0); \ - else \ - IF_POLL((ifq), (m)); \ + (m) = ifq_deq(ifq); \ } while (/* CONSTCOND */0) #define IFQ_PURGE(ifq) \ do { \ - if (HFSC_ENABLED((ifq))) \ - hfsc_purge(((struct ifqueue *)(ifq))); \ - else \ - IF_PURGE((ifq)); \ + (void)ifq_purge(ifq); \ } while (/* CONSTCOND */0) -#define IFQ_SET_READY(ifq) /* nothing */ - -#define IFQ_LEN(ifq) IF_LEN(ifq) -#define IFQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0) -#define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen = (len)) +#define IFQ_LEN(ifq) ifq_len(ifq) +#define IFQ_IS_EMPTY(ifq) ifq_empty(ifq) +#define IFQ_SET_MAXLEN(ifq, len) ifq_set_maxlen(ifq, len) +#define IFQ_SET_READY(ifq) do { } while (0) /* default interface priorities */ #define IF_WIRED_DEFAULT_PRIORITY 0 @@ -405,6 +355,7 @@ extern struct ifnet_head ifnet; extern unsigned int lo0ifidx; void if_start(struct ifnet *); +int if_enqueue_try(struct ifnet *, struct mbuf *); int if_enqueue(struct ifnet *, struct mbuf *); void if_input(struct ifnet *, struct mbuf_list *); int if_input_local(struct ifnet *, struct mbuf *, sa_family_t); diff --git a/sys/net/pf_if.c b/sys/net/pf_if.c index 25bf59347d6..fdef0783f43 100644 --- a/sys/net/pf_if.c +++ b/sys/net/pf_if.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pf_if.c,v 1.81 2015/10/30 11:33:55 mikeb Exp $ */ +/* $OpenBSD: pf_if.c,v 1.82 2015/11/20 03:35:23 dlg Exp $ */ /* * Copyright 2005 Henning Brauer <henning@openbsd.org> @@ -258,11 +258,6 @@ pfi_detach_ifnet(struct ifnet *ifp) hook_disestablish(ifp->if_addrhooks, kif->pfik_ah_cookie); pfi_kif_update(kif); - if (HFSC_ENABLED(&ifp->if_snd)) { - pf_remove_queues(ifp); - pf_free_queues(pf_queues_active, ifp); - } - kif->pfik_ifp = NULL; ifp->if_pf_kif = NULL; pfi_kif_unref(kif, PFI_KIF_REF_NONE); diff --git a/sys/net/pf_ioctl.c b/sys/net/pf_ioctl.c index 0709b8fb9ed..7d8d74f2435 100644 --- a/sys/net/pf_ioctl.c +++ b/sys/net/pf_ioctl.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pf_ioctl.c,v 1.291 2015/10/13 19:32:31 sashan Exp $ */ +/* $OpenBSD: pf_ioctl.c,v 1.292 2015/11/20 03:35:23 dlg Exp $ */ /* * Copyright (c) 2001 Daniel Hartmeier @@ -85,8 +85,10 @@ int pfclose(dev_t, int, int, struct proc *); int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); int pf_begin_rules(u_int32_t *, const char *); int pf_rollback_rules(u_int32_t, char *); -int pf_create_queues(void); +int pf_enable_queues(void); +void pf_remove_queues(void); int pf_commit_queues(void); +void pf_free_queues(struct pf_queuehead *); int pf_setup_pfsync_matching(struct pf_ruleset *); void pf_hash_rule(MD5_CTX *, struct pf_rule *); void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); @@ -517,68 +519,144 @@ pf_rollback_rules(u_int32_t ticket, char *anchor) /* queue defs only in the main ruleset */ if (anchor[0]) return (0); - return (pf_free_queues(pf_queues_inactive, NULL)); + + pf_free_queues(pf_queues_inactive); + + return (0); } -int -pf_free_queues(struct pf_queuehead *where, struct ifnet *ifp) +void +pf_free_queues(struct pf_queuehead *where) { struct pf_queuespec *q, *qtmp; TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { - if (ifp && q->kif->pfik_ifp != ifp) - continue; TAILQ_REMOVE(where, q, entries); pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); pool_put(&pf_queue_pl, q); } - return (0); } -int -pf_remove_queues(struct ifnet *ifp) +void +pf_remove_queues(void) { struct pf_queuespec *q; - int error = 0; - - /* remove queues */ - TAILQ_FOREACH_REVERSE(q, pf_queues_active, pf_queuehead, entries) { - if (ifp && q->kif->pfik_ifp != ifp) - continue; - if ((error = hfsc_delqueue(q)) != 0) - return (error); - } + struct ifnet *ifp; /* put back interfaces in normal queueing mode */ TAILQ_FOREACH(q, pf_queues_active, entries) { - if (ifp && q->kif->pfik_ifp != ifp) + if (q->parent_qid != 0) + continue; + + ifp = q->kif->pfik_ifp; + if (ifp == NULL) continue; - if (q->parent_qid == 0) - if ((error = hfsc_detach(q->kif->pfik_ifp)) != 0) - return (error); + + KASSERT(HFSC_ENABLED(&ifp->if_snd)); + + ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); } +} - return (0); +struct pf_hfsc_queue { + struct ifnet *ifp; + struct hfsc_if *hif; + struct pf_hfsc_queue *next; +}; + +static inline struct pf_hfsc_queue * +pf_hfsc_ifp2q(struct pf_hfsc_queue *list, struct ifnet *ifp) +{ + struct pf_hfsc_queue *phq = list; + + while (phq != NULL) { + if (phq->ifp == ifp) + return (phq); + + phq = phq->next; + } + + return (phq); } int pf_create_queues(void) { struct pf_queuespec *q; - int error = 0; + struct ifnet *ifp; + struct pf_hfsc_queue *list = NULL, *phq; + int error; + + /* find root queues and alloc hfsc for these interfaces */ + TAILQ_FOREACH(q, pf_queues_active, entries) { + if (q->parent_qid != 0) + continue; + + ifp = q->kif->pfik_ifp; + if (ifp == NULL) + continue; + + phq = malloc(sizeof(*phq), M_TEMP, M_WAITOK); + phq->ifp = ifp; + phq->hif = hfsc_pf_alloc(ifp); - /* find root queues and attach hfsc to these interfaces */ - TAILQ_FOREACH(q, pf_queues_active, entries) - if (q->parent_qid == 0) - if ((error = hfsc_attach(q->kif->pfik_ifp)) != 0) - return (error); + phq->next = list; + list = phq; + } /* and now everything */ - TAILQ_FOREACH(q, pf_queues_active, entries) - if ((error = hfsc_addqueue(q)) != 0) - return (error); + TAILQ_FOREACH(q, pf_queues_active, entries) { + ifp = q->kif->pfik_ifp; + if (ifp == NULL) + continue; + + phq = pf_hfsc_ifp2q(list, ifp); + KASSERT(phq != NULL); + + error = hfsc_pf_addqueue(phq->hif, q); + if (error != 0) + goto error; + } + + /* find root queues in old list to disable them if necessary */ + TAILQ_FOREACH(q, pf_queues_inactive, entries) { + if (q->parent_qid != 0) + continue; + + ifp = q->kif->pfik_ifp; + if (ifp == NULL) + continue; + + phq = pf_hfsc_ifp2q(list, ifp); + if (phq != NULL) + continue; + + ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); + } + + /* commit the new queues */ + while (list != NULL) { + phq = list; + list = phq->next; + + ifp = phq->ifp; + + ifq_attach(&ifp->if_snd, ifq_hfsc_ops, phq->hif); + free(phq, M_TEMP, sizeof(*phq)); + } return (0); + +error: + while (list != NULL) { + phq = list; + list = phq->next; + + hfsc_pf_free(phq->hif); + free(phq, M_TEMP, sizeof(*phq)); + } + + return (error); } int @@ -587,16 +665,21 @@ pf_commit_queues(void) struct pf_queuehead *qswap; int error; - if ((error = pf_remove_queues(NULL)) != 0) + /* swap */ + qswap = pf_queues_active; + pf_queues_active = pf_queues_inactive; + pf_queues_inactive = qswap; + + error = pf_create_queues(); + if (error != 0) { + pf_queues_inactive = pf_queues_active; + pf_queues_active = qswap; return (error); + } - /* swap */ - qswap = pf_queues_active; - pf_queues_active = pf_queues_inactive; - pf_queues_inactive = qswap; - pf_free_queues(pf_queues_inactive, NULL); + pf_free_queues(pf_queues_inactive); - return (pf_create_queues()); + return (0); } #define PF_MD5_UPD(st, elm) \ @@ -935,7 +1018,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) else { pf_status.running = 0; pf_status.since = time_second; - pf_remove_queues(NULL); + pf_remove_queues(); DPFPRINTF(LOG_NOTICE, "pf: stopped"); } break; @@ -1001,7 +1084,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } bcopy(qs, &pq->queue, sizeof(pq->queue)); - error = hfsc_qstats(qs, pq->buf, &nbytes); + error = hfsc_pf_qstats(qs, pq->buf, &nbytes); if (error == 0) pq->nbytes = nbytes; break; diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index aad10865ed3..5c5a30e3879 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pfvar.h,v 1.422 2015/10/30 11:33:55 mikeb Exp $ */ +/* $OpenBSD: pfvar.h,v 1.423 2015/11/20 03:35:23 dlg Exp $ */ /* * Copyright (c) 2001 Daniel Hartmeier @@ -1657,9 +1657,6 @@ extern struct pf_queuehead pf_queues[2]; extern struct pf_queuehead *pf_queues_active, *pf_queues_inactive; extern u_int32_t ticket_pabuf; -extern int pf_free_queues(struct pf_queuehead *, - struct ifnet *); -extern int pf_remove_queues(struct ifnet *); extern int pf_tbladdr_setup(struct pf_ruleset *, struct pf_addr_wrap *); extern void pf_tbladdr_remove(struct pf_addr_wrap *); |