summaryrefslogtreecommitdiff
path: root/sys/dev/pci/if_em.c
diff options
context:
space:
mode:
authorStuart Henderson <sthen@cvs.openbsd.org>2008-11-29 10:23:30 +0000
committerStuart Henderson <sthen@cvs.openbsd.org>2008-11-29 10:23:30 +0000
commit3d7b0e739bfd4b898810062007e9797a0882bfea (patch)
tree245627890dad4f46326e17fb65a5458d6fac09b6 /sys/dev/pci/if_em.c
parentb827eb8ff64ab94d31584ff1788cdb9ad06d043d (diff)
revert 1.197 if_em.c, 1.38/1.39 if_em.h, requested by dlg, until a bug
reported on misc@ can be tracked down. identical diff from jsing.
Diffstat (limited to 'sys/dev/pci/if_em.c')
-rw-r--r--sys/dev/pci/if_em.c382
1 files changed, 189 insertions, 193 deletions
diff --git a/sys/dev/pci/if_em.c b/sys/dev/pci/if_em.c
index e5c701319b8..2f00435976a 100644
--- a/sys/dev/pci/if_em.c
+++ b/sys/dev/pci/if_em.c
@@ -31,7 +31,7 @@ POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
-/* $OpenBSD: if_em.c,v 1.198 2008/11/28 02:44:17 brad Exp $ */
+/* $OpenBSD: if_em.c,v 1.199 2008/11/29 10:23:29 sthen Exp $ */
/* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
#include <dev/pci/if_em.h>
@@ -164,12 +164,6 @@ void em_update_stats_counters(struct em_softc *);
void em_txeof(struct em_softc *);
int em_allocate_receive_structures(struct em_softc *);
int em_allocate_transmit_structures(struct em_softc *);
-#ifdef __STRICT_ALIGNMENT
-void em_realign(struct em_softc *, struct mbuf *, u_int16_t *);
-#else
-#define em_realign(a, b, c) /* a, b, c */
-#endif
-void em_rxfill(struct em_softc *);
void em_rxeof(struct em_softc *, int);
void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
struct mbuf *);
@@ -804,7 +798,6 @@ em_intr(void *arg)
if (ifp->if_flags & IFF_RUNNING) {
em_rxeof(sc, -1);
- em_rxfill(sc);
em_txeof(sc);
}
@@ -1455,15 +1448,15 @@ em_stop(void *arg)
struct em_softc *sc = arg;
ifp = &sc->interface_data.ac_if;
- /* Tell the stack that the interface is no longer active */
- ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
-
INIT_DEBUGOUT("em_stop: begin");
em_disable_intr(sc);
em_reset_hw(&sc->hw);
timeout_del(&sc->timer_handle);
timeout_del(&sc->tx_fifo_timer_handle);
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+
em_free_transmit_structures(sc);
em_free_receive_structures(sc);
}
@@ -2294,54 +2287,51 @@ int
em_get_buf(struct em_softc *sc, int i)
{
struct mbuf *m;
- struct em_buffer *pkt;
- struct em_rx_desc *desc;
+ bus_dmamap_t map;
+ struct em_buffer *rx_buffer;
int error;
- pkt = &sc->rx_buffer_area[i];
- desc = &sc->rx_desc_base[i];
-
- if (pkt->m_head != NULL) {
- printf("%s: em_get_buf: slot %d already has an mbuf\n",
- sc->sc_dv.dv_xname, i);
- return (ENOBUFS);
- }
-
MGETHDR(m, M_DONTWAIT, MT_DATA);
if (m == NULL) {
sc->mbuf_alloc_failed++;
return (ENOBUFS);
}
- MCLGETI(m, M_DONTWAIT, &sc->interface_data.ac_if, MCLBYTES);
+ MCLGET(m, M_DONTWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
sc->mbuf_cluster_failed++;
return (ENOBUFS);
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
+
if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
m_adj(m, ETHER_ALIGN);
- error = bus_dmamap_load_mbuf(sc->rxtag, pkt->map, m, BUS_DMA_NOWAIT);
+ /*
+ * Using memory from the mbuf cluster pool, invoke the
+ * bus_dma machinery to arrange the memory mapping.
+ */
+ error = bus_dmamap_load_mbuf(sc->rxtag, sc->rx_sparemap,
+ m, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
return (error);
}
- bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
- BUS_DMASYNC_PREREAD);
- pkt->m_head = m;
+ rx_buffer = &sc->rx_buffer_area[i];
+ if (rx_buffer->m_head != NULL)
+ bus_dmamap_unload(sc->rxtag, rx_buffer->map);
- bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
- sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_POSTWRITE);
+ map = rx_buffer->map;
+ rx_buffer->map = sc->rx_sparemap;
+ sc->rx_sparemap = map;
- bzero(desc, sizeof(*desc));
- desc->buffer_addr = htole64(pkt->map->dm_segs[0].ds_addr);
+ bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
+ rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
- bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
- sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_PREWRITE);
+ rx_buffer->m_head = m;
- sc->rx_ndescs++;
+ sc->rx_desc_base[i].buffer_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
return (0);
}
@@ -2369,18 +2359,33 @@ em_allocate_receive_structures(struct em_softc *sc)
sc->rxtag = sc->osdep.em_pa.pa_dmat;
+ error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1, MCLBYTES,
+ 0, BUS_DMA_NOWAIT, &sc->rx_sparemap);
+ if (error != 0) {
+ printf("%s: em_allocate_receive_structures: "
+ "bus_dmamap_create failed; error %u\n",
+ sc->sc_dv.dv_xname, error);
+ goto fail;
+ }
+
rx_buffer = sc->rx_buffer_area;
for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
- MCLBYTES, 0, BUS_DMA_NOWAIT, &rx_buffer->map);
+ MCLBYTES, 0, BUS_DMA_NOWAIT,
+ &rx_buffer->map);
if (error != 0) {
printf("%s: em_allocate_receive_structures: "
"bus_dmamap_create failed; error %u\n",
sc->sc_dv.dv_xname, error);
goto fail;
}
- rx_buffer->m_head = NULL;
}
+
+ for (i = 0; i < sc->num_rx_desc; i++) {
+ error = em_get_buf(sc, i);
+ if (error != 0)
+ goto fail;
+ }
bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
sc->rxdma.dma_map->dm_mapsize,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -2408,14 +2413,6 @@ em_setup_receive_structures(struct em_softc *sc)
/* Setup our descriptor pointers */
sc->next_rx_desc_to_check = 0;
- sc->last_rx_desc_filled = sc->num_rx_desc - 1;
-
- em_rxfill(sc);
- if (sc->num_rx_desc < 1) {
- printf("%s: unable to fill and rx descriptors\n",
- sc->sc_dv.dv_xname);
- }
-
return (0);
}
@@ -2511,18 +2508,30 @@ em_free_receive_structures(struct em_softc *sc)
INIT_DEBUGOUT("free_receive_structures: begin");
+ if (sc->rx_sparemap) {
+ bus_dmamap_destroy(sc->rxtag, sc->rx_sparemap);
+ sc->rx_sparemap = NULL;
+ }
if (sc->rx_buffer_area != NULL) {
rx_buffer = sc->rx_buffer_area;
for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
- if (rx_buffer->m_head != NULL) {
+ if (rx_buffer->map != NULL &&
+ rx_buffer->map->dm_nsegs > 0) {
bus_dmamap_sync(sc->rxtag, rx_buffer->map,
0, rx_buffer->map->dm_mapsize,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->rxtag, rx_buffer->map);
+ bus_dmamap_unload(sc->rxtag,
+ rx_buffer->map);
+ }
+ if (rx_buffer->m_head != NULL) {
m_freem(rx_buffer->m_head);
rx_buffer->m_head = NULL;
}
- bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
+ if (rx_buffer->map != NULL) {
+ bus_dmamap_destroy(sc->rxtag,
+ rx_buffer->map);
+ rx_buffer->map = NULL;
+ }
}
}
if (sc->rx_buffer_area != NULL) {
@@ -2531,80 +2540,6 @@ em_free_receive_structures(struct em_softc *sc)
}
if (sc->rxtag != NULL)
sc->rxtag = NULL;
-
- if (sc->fmp != NULL) {
- m_freem(sc->fmp);
- sc->fmp = NULL;
- sc->lmp = NULL;
- }
-}
-
-#ifdef __STRICT_ALIGNMENT
-void
-em_realign(struct em_softc *sc, struct mbuf *m, u_int16_t *prev_len_adj)
-{
- unsigned char tmp_align_buf[ETHER_ALIGN];
- int tmp_align_buf_len = 0;
-
- /*
- * The Ethernet payload is not 32-bit aligned when
- * Jumbo packets are enabled, so on architectures with
- * strict alignment we need to shift the entire packet
- * ETHER_ALIGN bytes. Ugh.
- */
- if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
- return;
-
- if (*prev_len_adj > sc->align_buf_len)
- *prev_len_adj -= sc->align_buf_len;
- else
- *prev_len_adj = 0;
-
- if (m->m_len > (MCLBYTES - ETHER_ALIGN)) {
- bcopy(m->m_data + (MCLBYTES - ETHER_ALIGN),
- &tmp_align_buf, ETHER_ALIGN);
- tmp_align_buf_len = m->m_len -
- (MCLBYTES - ETHER_ALIGN);
- m->m_len -= ETHER_ALIGN;
- }
-
- if (m->m_len) {
- bcopy(m->m_data, m->m_data + ETHER_ALIGN, m->m_len);
- if (!sc->align_buf_len)
- m->m_data += ETHER_ALIGN;
- }
-
- if (sc->align_buf_len) {
- m->m_len += sc->align_buf_len;
- bcopy(&sc->align_buf, m->m_data, sc->align_buf_len);
- }
-
- if (tmp_align_buf_len)
- bcopy(&tmp_align_buf, &sc->align_buf, tmp_align_buf_len);
-
- sc->align_buf_len = tmp_align_buf_len;
-}
-#endif /* __STRICT_ALIGNMENT */
-
-void
-em_rxfill(struct em_softc *sc)
-{
- int i;
-
- i = sc->last_rx_desc_filled;
-
- while (sc->rx_ndescs < sc->num_rx_desc) {
- if (++i == sc->num_rx_desc)
- i = 0;
-
- if (em_get_buf(sc, i) != 0)
- break;
-
- sc->last_rx_desc_filled = i;
- }
-
- /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
- E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
}
/*********************************************************************
@@ -2620,59 +2555,44 @@ em_rxfill(struct em_softc *sc)
void
em_rxeof(struct em_softc *sc, int count)
{
- struct ifnet *ifp = &sc->interface_data.ac_if;
- struct mbuf *m;
+ struct ifnet *ifp;
+ struct mbuf *mp;
u_int8_t accept_frame = 0;
u_int8_t eop = 0;
u_int16_t len, desc_len, prev_len_adj;
int i;
/* Pointer to the receive descriptor being examined. */
- struct em_rx_desc *desc;
- struct em_buffer *pkt;
+ struct em_rx_desc *current_desc;
u_int8_t status;
ifp = &sc->interface_data.ac_if;
-
- if (!ISSET(ifp->if_flags, IFF_RUNNING))
- return;
-
i = sc->next_rx_desc_to_check;
+ current_desc = &sc->rx_desc_base[i];
+ bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
+ sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
- while (count != 0 && sc->rx_ndescs > 1) {
- m = NULL;
-
- desc = &sc->rx_desc_base[i];
- pkt = &sc->rx_buffer_area[i];
-
- bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
- sizeof(*desc) * i, sizeof(*desc),
- BUS_DMASYNC_POSTREAD);
+ if (!((current_desc->status) & E1000_RXD_STAT_DD))
+ return;
- status = desc->status;
- if (!ISSET(status, E1000_RXD_STAT_DD)) {
- bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
- sizeof(*desc) * i, sizeof(*desc),
- BUS_DMASYNC_PREREAD);
- break;
- }
+ while ((current_desc->status & E1000_RXD_STAT_DD) &&
+ (count != 0) &&
+ (ifp->if_flags & IFF_RUNNING)) {
+ struct mbuf *m = NULL;
- /* pull the mbuf off the ring */
- bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
+ mp = sc->rx_buffer_area[i].m_head;
+ /*
+ * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
+ * needs to access the last received byte in the mbuf.
+ */
+ bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
+ 0, sc->rx_buffer_area[i].map->dm_mapsize,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->rxtag, pkt->map);
- m = pkt->m_head;
- pkt->m_head = NULL;
-
- if (m == NULL)
- printf("omg teh nulls\n");
-
- sc->rx_ndescs--;
accept_frame = 1;
prev_len_adj = 0;
- desc_len = letoh16(desc->length);
-
+ desc_len = letoh16(current_desc->length);
+ status = current_desc->status;
if (status & E1000_RXD_STAT_EOP) {
count--;
eop = 1;
@@ -2686,18 +2606,20 @@ em_rxeof(struct em_softc *sc, int count)
len = desc_len;
}
- if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
+ if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
u_int8_t last_byte;
u_int32_t pkt_len = desc_len;
if (sc->fmp != NULL)
pkt_len += sc->fmp->m_pkthdr.len;
- last_byte = *(mtod(m, caddr_t) + desc_len - 1);
- if (TBI_ACCEPT(&sc->hw, status, desc->errors,
+ last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
+ if (TBI_ACCEPT(&sc->hw, status, current_desc->errors,
pkt_len, last_byte)) {
- em_tbi_adjust_stats(&sc->hw, &sc->stats,
- pkt_len, sc->hw.mac_addr);
+ em_tbi_adjust_stats(&sc->hw,
+ &sc->stats,
+ pkt_len,
+ sc->hw.mac_addr);
if (len > 0)
len--;
} else
@@ -2705,18 +2627,70 @@ em_rxeof(struct em_softc *sc, int count)
}
if (accept_frame) {
+ if (em_get_buf(sc, i) != 0) {
+ sc->dropped_pkts++;
+ goto discard;
+ }
+
/* Assign correct length to the current fragment */
- m->m_len = len;
+ mp->m_len = len;
- em_realign(sc, m, &prev_len_adj); /* STRICT_ALIGN */
+#ifdef __STRICT_ALIGNMENT
+ /*
+ * The Ethernet payload is not 32-bit aligned when
+ * Jumbo packets are enabled, so on architectures with
+ * strict alignment we need to shift the entire packet
+ * ETHER_ALIGN bytes. Ugh.
+ */
+ if (sc->hw.max_frame_size > (MCLBYTES - ETHER_ALIGN)) {
+ unsigned char tmp_align_buf[ETHER_ALIGN];
+ int tmp_align_buf_len = 0;
+
+ if (prev_len_adj > sc->align_buf_len)
+ prev_len_adj -= sc->align_buf_len;
+ else
+ prev_len_adj = 0;
+
+ if (mp->m_len > (MCLBYTES - ETHER_ALIGN)) {
+ bcopy(mp->m_data +
+ (MCLBYTES - ETHER_ALIGN),
+ &tmp_align_buf,
+ ETHER_ALIGN);
+ tmp_align_buf_len = mp->m_len -
+ (MCLBYTES - ETHER_ALIGN);
+ mp->m_len -= ETHER_ALIGN;
+ }
+
+ if (mp->m_len) {
+ bcopy(mp->m_data,
+ mp->m_data + ETHER_ALIGN,
+ mp->m_len);
+ if (!sc->align_buf_len)
+ mp->m_data += ETHER_ALIGN;
+ }
+
+ if (sc->align_buf_len) {
+ mp->m_len += sc->align_buf_len;
+ bcopy(&sc->align_buf,
+ mp->m_data,
+ sc->align_buf_len);
+ }
+
+ if (tmp_align_buf_len)
+ bcopy(&tmp_align_buf,
+ &sc->align_buf,
+ tmp_align_buf_len);
+ sc->align_buf_len = tmp_align_buf_len;
+ }
+#endif /* __STRICT_ALIGNMENT */
if (sc->fmp == NULL) {
- m->m_pkthdr.len = m->m_len;
- sc->fmp = m; /* Store the first mbuf */
- sc->lmp = m;
+ mp->m_pkthdr.len = mp->m_len;
+ sc->fmp = mp; /* Store the first mbuf */
+ sc->lmp = mp;
} else {
/* Chain mbuf's together */
- m->m_flags &= ~M_PKTHDR;
+ mp->m_flags &= ~M_PKTHDR;
/*
* Adjust length of previous mbuf in chain if
* we received less than 4 bytes in the last
@@ -2726,59 +2700,81 @@ em_rxeof(struct em_softc *sc, int count)
sc->lmp->m_len -= prev_len_adj;
sc->fmp->m_pkthdr.len -= prev_len_adj;
}
- sc->lmp->m_next = m;
- sc->lmp = m;
- sc->fmp->m_pkthdr.len += m->m_len;
+ sc->lmp->m_next = mp;
+ sc->lmp = sc->lmp->m_next;
+ sc->fmp->m_pkthdr.len += mp->m_len;
}
if (eop) {
+ sc->fmp->m_pkthdr.rcvif = ifp;
ifp->if_ipackets++;
+ em_receive_checksum(sc, current_desc, sc->fmp);
- m = sc->fmp;
- m->m_pkthdr.rcvif = ifp;
-
- em_receive_checksum(sc, desc, m);
#if NVLAN > 0
- if (desc->status & E1000_RXD_STAT_VP) {
- m->m_pkthdr.ether_vtag =
- (desc->special &
+ if (current_desc->status & E1000_RXD_STAT_VP) {
+ sc->fmp->m_pkthdr.ether_vtag =
+ (current_desc->special &
E1000_RXD_SPC_VLAN_MASK);
- m->m_flags |= M_VLANTAG;
+ sc->fmp->m_flags |= M_VLANTAG;
}
#endif
-#if NBPFILTER > 0
- if (ifp->if_bpf) {
- bpf_mtap_ether(ifp->if_bpf, m,
- BPF_DIRECTION_IN);
- }
-#endif
-
- ether_input_mbuf(ifp, m);
+ m = sc->fmp;
sc->fmp = NULL;
sc->lmp = NULL;
}
} else {
sc->dropped_pkts++;
-
+discard:
+ /* Reuse loaded DMA map and just update mbuf chain */
+ mp = sc->rx_buffer_area[i].m_head;
+ mp->m_len = mp->m_pkthdr.len = MCLBYTES;
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
+ if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ m_adj(mp, ETHER_ALIGN);
if (sc->fmp != NULL) {
m_freem(sc->fmp);
sc->fmp = NULL;
sc->lmp = NULL;
}
-
- m_freem(m);
+ m = NULL;
}
- bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
- sizeof(*desc) * i, sizeof(*desc),
- BUS_DMASYNC_PREREAD);
+ /* Zero out the receive descriptors status. */
+ current_desc->status = 0;
+ bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
+ sc->rxdma.dma_map->dm_mapsize,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Advance our pointers to the next descriptor. */
if (++i == sc->num_rx_desc)
i = 0;
+ if (m != NULL) {
+ sc->next_rx_desc_to_check = i;
+
+#if NBPFILTER > 0
+ /*
+ * Handle BPF listeners. Let the BPF
+ * user see the packet.
+ */
+ if (ifp->if_bpf)
+ bpf_mtap_ether(ifp->if_bpf, m,
+ BPF_DIRECTION_IN);
+#endif
+
+ ether_input_mbuf(ifp, m);
+
+ i = sc->next_rx_desc_to_check;
+ }
+ current_desc = &sc->rx_desc_base[i];
}
sc->next_rx_desc_to_check = i;
+
+ /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
+ if (--i < 0)
+ i = sc->num_rx_desc - 1;
+ E1000_WRITE_REG(&sc->hw, RDT, i);
}
/*********************************************************************