diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2008-08-26 21:06:30 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2008-08-26 21:06:30 +0000 |
commit | e56118e28aad1eb81e94228213cc073711827850 (patch) | |
tree | f3f0892e0f7fd9bcc3009a25697e841939677c0b /sys/dev | |
parent | 91b9a65730c3ff64b72d0701cd14d81bafa98035 (diff) |
Fix a couple of problems that may make gem(4) get stuck:
1. If bus_dmamap_load_mbuf() fails because there are not enough
segments in the map, defrag the mbuf.
2. If there are not enough free (hardware ring) descriptors, set
IFF_OACTIVE and keep the packet on the queue.
3. If there is some other resource starvation that makes
bus_dmamap_load_mbuf() or defragmentation fail, drop the packet.
Don't set IFF_OACTIVE, since the Tx ring could be empty and we'd be
stuck.
4. Only pass packets that are actually handed off to the hardware to
BPF. Do so before handing them off to the hardware to make sure
the packet isn't freed behind our back.
ok dlg@
Diffstat (limited to 'sys/dev')
-rw-r--r-- | sys/dev/ic/gem.c | 158 |
1 files changed, 91 insertions, 67 deletions
diff --git a/sys/dev/ic/gem.c b/sys/dev/ic/gem.c index 97c5a909e6e..d5d17fb68b0 100644 --- a/sys/dev/ic/gem.c +++ b/sys/dev/ic/gem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: gem.c,v 1.75 2008/05/31 02:41:25 brad Exp $ */ +/* $OpenBSD: gem.c,v 1.76 2008/08/26 21:06:29 kettenis Exp $ */ /* $NetBSD: gem.c,v 1.1 2001/09/16 00:11:43 eeh Exp $ */ /* @@ -100,7 +100,6 @@ int gem_disable_tx(struct gem_softc *); void gem_rxdrain(struct gem_softc *); int gem_add_rxbuf(struct gem_softc *, int idx); void gem_setladrf(struct gem_softc *); -int gem_encap(struct gem_softc *, struct mbuf *, u_int32_t *); /* MII methods & callbacks */ int gem_mii_readreg(struct device *, int, int); @@ -1600,58 +1599,6 @@ chipit: bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); } -int -gem_encap(struct gem_softc *sc, struct mbuf *mhead, u_int32_t *bixp) -{ - u_int64_t flags; - u_int32_t cur, frag, i; - bus_dmamap_t map; - - cur = frag = *bixp; - map = sc->sc_txd[cur].sd_map; - - if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, - BUS_DMA_NOWAIT) != 0) { - return (ENOBUFS); - } - - if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) { - bus_dmamap_unload(sc->sc_dmatag, map); - return (ENOBUFS); - } - - bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, - BUS_DMASYNC_PREWRITE); - - for (i = 0; i < map->dm_nsegs; i++) { - sc->sc_txdescs[frag].gd_addr = - GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr); - flags = (map->dm_segs[i].ds_len & GEM_TD_BUFSIZE) | - (i == 0 ? GEM_TD_START_OF_PACKET : 0) | - ((i == (map->dm_nsegs - 1)) ? GEM_TD_END_OF_PACKET : 0); - sc->sc_txdescs[frag].gd_flags = GEM_DMA_WRITE(sc, flags); - bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, - GEM_CDTXOFF(frag), sizeof(struct gem_desc), - BUS_DMASYNC_PREWRITE); - cur = frag; - if (++frag == GEM_NTXDESC) - frag = 0; - } - - sc->sc_tx_cnt += map->dm_nsegs; - sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map; - sc->sc_txd[cur].sd_map = map; - sc->sc_txd[cur].sd_mbuf = mhead; - - bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag); - - *bixp = frag; - - /* sync descriptors */ - - return (0); -} - /* * Transmit interrupt. */ @@ -1694,18 +1641,71 @@ void gem_start(struct ifnet *ifp) { struct gem_softc *sc = ifp->if_softc; - struct mbuf *m; - u_int32_t bix; + struct mbuf *m, *m0; + u_int64_t flags; + bus_dmamap_t map; + u_int32_t cur, frag, i; + int error; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; - bix = sc->sc_tx_prod; - while (sc->sc_txd[bix].sd_mbuf == NULL) { + while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) { IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; + /* + * Encapsulate this packet and start it going... + * or fail... + */ + + cur = frag = sc->sc_tx_prod; + map = sc->sc_txd[cur].sd_map; + m0 = NULL; + + error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, + BUS_DMA_NOWAIT); + if (error != 0 && error != EFBIG) + goto drop; + if (error != 0) { + /* Too many fragments, linearize. */ + MGETHDR(m0, M_DONTWAIT, MT_DATA); + if (m0 == NULL) + goto drop; + if (m->m_pkthdr.len > MHLEN) { + MCLGET(m0, M_DONTWAIT); + if (!(m0->m_flags & M_EXT)) { + m_freem(m0); + goto drop; + } + } + m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t)); + m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len; + error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m0, + BUS_DMA_NOWAIT); + if (error != 0) { + m_freem(m0); + goto drop; + } + } + + if ((sc->sc_tx_cnt + map->dm_nsegs) > (GEM_NTXDESC - 2)) { + bus_dmamap_unload(sc->sc_dmatag, map); + ifp->if_flags |= IFF_OACTIVE; + if (m0 != NULL) + m_free(m0); + break; + } + + /* We are now committed to transmitting the packet. */ + + IFQ_DEQUEUE(&ifp->if_snd, m); + if (m0 != NULL) { + m_free(m); + m = m0; + } + #if NBPFILTER > 0 /* * If BPF is listening on this interface, let it see the @@ -1715,18 +1715,42 @@ gem_start(struct ifnet *ifp) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif - /* - * Encapsulate this packet and start it going... - * or fail... - */ - if (gem_encap(sc, m, &bix)) { - ifp->if_flags |= IFF_OACTIVE; - break; + bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, + BUS_DMASYNC_PREWRITE); + + for (i = 0; i < map->dm_nsegs; i++) { + sc->sc_txdescs[frag].gd_addr = + GEM_DMA_WRITE(sc, map->dm_segs[i].ds_addr); + flags = map->dm_segs[i].ds_len & GEM_TD_BUFSIZE; + if (i == 0) + flags |= GEM_TD_START_OF_PACKET; + if (i == (map->dm_nsegs - 1)) + flags |= GEM_TD_END_OF_PACKET; + sc->sc_txdescs[frag].gd_flags = + GEM_DMA_WRITE(sc, flags); + bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, + GEM_CDTXOFF(frag), sizeof(struct gem_desc), + BUS_DMASYNC_PREWRITE); + cur = frag; + if (++frag == GEM_NTXDESC) + frag = 0; } - IFQ_DEQUEUE(&ifp->if_snd, m); + sc->sc_tx_cnt += map->dm_nsegs; + sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map; + sc->sc_txd[cur].sd_map = map; + sc->sc_txd[cur].sd_mbuf = m; + + bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, frag); + sc->sc_tx_prod = frag; + ifp->if_timer = 5; } - sc->sc_tx_prod = bix; + return; + + drop: + IFQ_DEQUEUE(&ifp->if_snd, m); + m_free(m); + ifp->if_oerrors++; } |