summaryrefslogtreecommitdiff
path: root/sys/arch/sgi/hpc
diff options
context:
space:
mode:
authorDavid Gwynne <dlg@cvs.openbsd.org>2015-11-25 03:10:01 +0000
committerDavid Gwynne <dlg@cvs.openbsd.org>2015-11-25 03:10:01 +0000
commit6215416f96d04fd1a1b0e14e2670c208f0acc34c (patch)
tree14249f751ae54985d3581b0632deb81620be2edf /sys/arch/sgi/hpc
parentbbe7ffca434bff081b83e600614f4ec4cded8f3b (diff)
replace IFF_OACTIVE manipulation with mpsafe operations.
there are two things shared between the network stack and drivers in the send path: the send queue and the IFF_OACTIVE flag. the send queue is now protected by a mutex. this diff makes the oactive functionality mpsafe too. IFF_OACTIVE is part of if_flags. there are two problems with that. firstly, if_flags is a short and we dont have any MI atomic operations to manipulate a short. secondly, while we could make the IFF_OACTIVE operates mpsafe, all changes to other flags would have to be made safe at the same time, otherwise a read-modify-write cycle on their updates could clobber the oactive change. instead, this moves the oactive mark into struct ifqueue and provides an API for changing it. there's ifq_set_oactive, ifq_clr_oactive, and ifq_is_oactive. these are modelled on ifsq_set_oactive, ifsq_clr_oactive, and ifsq_is_oactive in dragonflybsd. this diff includes changes to all the drivers manipulating IFF_OACTIVE to now use the ifsq_{set,clr_is}_oactive API too. ok kettenis@ mpi@ jmatthew@ deraadt@
Diffstat (limited to 'sys/arch/sgi/hpc')
-rw-r--r--sys/arch/sgi/hpc/if_sq.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/sys/arch/sgi/hpc/if_sq.c b/sys/arch/sgi/hpc/if_sq.c
index 0f2f0d034dd..f09e0937e1b 100644
--- a/sys/arch/sgi/hpc/if_sq.c
+++ b/sys/arch/sgi/hpc/if_sq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_sq.c,v 1.22 2015/11/24 17:11:38 mpi Exp $ */
+/* $OpenBSD: if_sq.c,v 1.23 2015/11/25 03:09:58 dlg Exp $ */
/* $NetBSD: if_sq.c,v 1.42 2011/07/01 18:53:47 dyoung Exp $ */
/*
@@ -548,7 +548,7 @@ sq_init(struct ifnet *ifp)
sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF);
ifp->if_flags |= IFF_RUNNING;
- ifp->if_flags &= ~IFF_OACTIVE;
+ ifq_clr_oactive(&ifp->if_snd);
sq_start(ifp);
return 0;
@@ -650,7 +650,7 @@ sq_start(struct ifnet *ifp)
uint32_t status;
int err, len, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
- if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
+ if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
return;
/*
@@ -746,7 +746,7 @@ sq_start(struct ifnet *ifp)
* XXX We could allocate an mbuf and copy, but
* XXX it is worth it?
*/
- ifp->if_flags |= IFF_OACTIVE;
+ ifq_set_oactive(&ifp->if_snd);
bus_dmamap_unload(sc->sc_dmat, dmamap);
if (m != NULL)
m_freem(m);
@@ -846,7 +846,7 @@ sq_start(struct ifnet *ifp)
/* All transmit descriptors used up, let upper layers know */
if (sc->sc_nfreetx == 0)
- ifp->if_flags |= IFF_OACTIVE;
+ ifq_set_oactive(&ifp->if_snd);
if (sc->sc_nfreetx != ofree) {
SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
@@ -948,7 +948,8 @@ sq_stop(struct ifnet *ifp)
int i;
ifp->if_timer = 0;
- ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+ ifp->if_flags &= ~IFF_RUNNING;
+ ifq_clr_oactive(&ifp->if_snd);
for (i = 0; i < SQ_NTXDESC; i++) {
if (sc->sc_txmbuf[i] != NULL) {
@@ -1265,7 +1266,7 @@ sq_txintr(struct sq_softc *sc)
/* If we have buffers free, let upper layers know */
if (sc->sc_nfreetx > 0)
- ifp->if_flags &= ~IFF_OACTIVE;
+ ifq_clr_oactive(&ifp->if_snd);
/* If all packets have left the coop, cancel watchdog */
if (sc->sc_nfreetx == SQ_NTXDESC)