summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/pci/if_ix.c881
-rw-r--r--sys/dev/pci/if_ix.h35
2 files changed, 445 insertions, 471 deletions
diff --git a/sys/dev/pci/if_ix.c b/sys/dev/pci/if_ix.c
index 5e0dbb64d48..c973db0215a 100644
--- a/sys/dev/pci/if_ix.c
+++ b/sys/dev/pci/if_ix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.c,v 1.19 2009/06/24 13:36:56 deraadt Exp $ */
+/* $OpenBSD: if_ix.c,v 1.20 2009/06/25 17:01:32 deraadt Exp $ */
/******************************************************************************
@@ -85,7 +85,7 @@ void ixgbe_setup_interface(struct ix_softc *);
int ixgbe_allocate_transmit_buffers(struct tx_ring *);
int ixgbe_setup_transmit_structures(struct ix_softc *);
-int ixgbe_setup_transmit_ring(struct tx_ring *);
+void ixgbe_setup_transmit_ring(struct tx_ring *);
void ixgbe_initialize_transmit_units(struct ix_softc *);
void ixgbe_free_transmit_structures(struct ix_softc *);
void ixgbe_free_transmit_buffers(struct tx_ring *);
@@ -96,7 +96,6 @@ int ixgbe_setup_receive_ring(struct rx_ring *);
void ixgbe_initialize_receive_units(struct ix_softc *);
void ixgbe_free_receive_structures(struct ix_softc *);
void ixgbe_free_receive_buffers(struct rx_ring *);
-int ixgbe_rxfill(struct rx_ring *);
void ixgbe_enable_intr(struct ix_softc *);
void ixgbe_disable_intr(struct ix_softc *);
@@ -111,14 +110,16 @@ void ixgbe_set_multi(struct ix_softc *);
void ixgbe_print_hw_stats(struct ix_softc *);
#endif
void ixgbe_update_link_status(struct ix_softc *);
-int ixgbe_get_buf(struct rx_ring *, int);
+int ixgbe_get_buf(struct rx_ring *, int, struct mbuf *);
int ixgbe_encap(struct tx_ring *, struct mbuf *);
void ixgbe_enable_hw_vlans(struct ix_softc * sc);
int ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
struct ixgbe_dma_alloc *, int);
void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
-int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-int ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *);
+#ifdef IX_CSUM_OFFLOAD
+int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+int ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *);
+#endif
void ixgbe_set_ivar(struct ix_softc *, uint16_t, uint8_t);
void ixgbe_configure_ivars(struct ix_softc *);
uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
@@ -130,10 +131,6 @@ int ixgbe_legacy_irq(void *);
void desc_flip(void *);
#endif
-struct rwlock ix_tx_pool_lk = RWLOCK_INITIALIZER("ixplinit");
-struct pool *ix_tx_pool = NULL;
-void ix_alloc_pkts(void *, void *);
-
/*********************************************************************
* OpenBSD Device Interface Entry Points
*********************************************************************/
@@ -578,25 +575,9 @@ ixgbe_init(void *arg)
struct ifnet *ifp = &sc->arpcom.ac_if;
uint32_t txdctl, rxdctl, mhadd, gpie;
int i, s;
- int txpl = 1;
INIT_DEBUGOUT("ixgbe_init: begin");
- if (rw_enter(&ix_tx_pool_lk, RW_WRITE | RW_INTR) != 0)
- return;
- if (ix_tx_pool == NULL) {
- ix_tx_pool = malloc(sizeof(*ix_tx_pool), M_DEVBUF, M_WAITOK);
- if (ix_tx_pool != NULL) {
- pool_init(ix_tx_pool, sizeof(struct ix_pkt), 0, 0, 0,
- "ixpkts", &pool_allocator_nointr);
- } else
- txpl = 0;
- }
- rw_exit(&ix_tx_pool_lk);
-
- if (!txpl)
- return;
-
s = splnet();
ixgbe_stop(sc);
@@ -717,7 +698,7 @@ ixgbe_legacy_irq(void *arg)
struct tx_ring *txr = sc->tx_rings;
struct rx_ring *rxr = sc->rx_rings;
struct ixgbe_hw *hw = &sc->hw;
- int claimed = 0, refill = 0;
+ int claimed = 0;
for (;;) {
reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
@@ -725,12 +706,10 @@ ixgbe_legacy_irq(void *arg)
break;
claimed = 1;
- refill = 0;
if (ifp->if_flags & IFF_RUNNING) {
ixgbe_rxeof(rxr, -1);
ixgbe_txeof(txr);
- refill = 1;
}
/* Check for fan failure */
@@ -748,12 +727,6 @@ ixgbe_legacy_irq(void *arg)
ixgbe_update_link_status(sc);
timeout_add_sec(&sc->timer, 1);
}
-
- if (refill && ixgbe_rxfill(rxr)) {
- /* Advance the Rx Queue "Tail Pointer" */
- IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),
- rxr->last_rx_desc_filled);
- }
}
if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
@@ -844,30 +817,16 @@ int
ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
{
struct ix_softc *sc = txr->sc;
- struct ix_pkt *pkt;
uint32_t olinfo_status = 0, cmd_type_len = 0;
int i, j, error;
int first, last = 0;
bus_dmamap_t map;
+ struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
union ixgbe_adv_tx_desc *txd = NULL;
-#ifdef notyet
+#ifdef IX_CSUM_OFFLOAD
uint32_t paylen = 0;
#endif
- mtx_enter(&txr->tx_pkt_mtx);
- pkt = TAILQ_FIRST(&txr->tx_free_pkts);
- if (pkt == NULL) {
- if (txr->tx_pkt_count <= sc->num_tx_desc &&
- !ISSET(sc->ix_flags, IX_ALLOC_PKTS_FLAG) &&
- workq_add_task(NULL, 0, ix_alloc_pkts, sc, NULL) == 0)
- SET(sc->ix_flags, IX_ALLOC_PKTS_FLAG);
-
- mtx_leave(&txr->tx_pkt_mtx);
- return (ENOMEM);
- }
- TAILQ_REMOVE(&txr->tx_free_pkts, pkt, pkt_entry);
- mtx_leave(&txr->tx_pkt_mtx);
-
/* Basic descriptor defines */
cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
@@ -877,7 +836,6 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
#endif
-#if 0
/*
* Force a cleanup if number of TX descriptors
* available is below the threshold. If it fails
@@ -891,7 +849,6 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
return (ENOBUFS);
}
}
-#endif
/*
* Important to capture the first descriptor
@@ -899,16 +856,22 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
* the one we tell the hardware to report back
*/
first = txr->next_avail_tx_desc;
- map = pkt->pkt_dmamap;
+ txbuf = &txr->tx_buffers[first];
+ txbuf_mapped = txbuf;
+ map = txbuf->map;
/*
* Map the packet for DMA.
*/
- error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
+ error = bus_dmamap_load_mbuf(txr->txtag, map,
m_head, BUS_DMA_NOWAIT);
- if (error != 0) {
+
+ if (error == ENOMEM) {
+ sc->no_tx_dma_setup++;
+ return (error);
+ } else if (error != 0) {
sc->no_tx_dma_setup++;
- goto maperr;
+ return (error);
}
/* Make certain there are enough descriptors */
@@ -918,25 +881,25 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
goto xmit_fail;
}
+#ifdef IX_CSUM_OFFLOAD
/*
* Set the appropriate offload context
* this becomes the first descriptor of
* a packet.
*/
-#ifdef notyet
if (ixgbe_tso_setup(txr, m_head, &paylen)) {
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
++sc->tso_tx;
- } else
-#endif
- if (ixgbe_tx_ctx_setup(txr, m_head))
+ } else if (ixgbe_tx_ctx_setup(txr, m_head))
olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+#endif
i = txr->next_avail_tx_desc;
for (j = 0; j < map->dm_nsegs; j++) {
+ txbuf = &txr->tx_buffers[i];
txd = &txr->tx_base[i];
txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
@@ -948,6 +911,8 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
if (++i == sc->num_tx_desc)
i = 0;
+ txbuf->m_head = NULL;
+
/*
* we have to do this inside the loop right now
* because of the hardware workaround.
@@ -964,32 +929,21 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
txr->tx_avail -= map->dm_nsegs;
txr->next_avail_tx_desc = i;
- pkt->pkt_mbuf = m_head;
- pkt->pkt_start_desc = first;
-
- mtx_enter(&txr->tx_pkt_mtx);
- TAILQ_INSERT_TAIL(&txr->tx_used_pkts, pkt, pkt_entry);
- mtx_leave(&txr->tx_pkt_mtx);
-
- bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
+ txbuf->m_head = m_head;
+ txbuf->map = map;
+ bus_dmamap_sync(txr->txtag, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREWRITE);
-#if 0
/* Set the index of the descriptor that will be marked done */
txbuf = &txr->tx_buffers[first];
-#endif
++txr->tx_packets;
return (0);
xmit_fail:
- bus_dmamap_unload(txr->txdma.dma_tag, map);
-maperr:
- mtx_enter(&txr->tx_pkt_mtx);
- TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
- mtx_leave(&txr->tx_pkt_mtx);
-
- return (ENOMEM);
+ bus_dmamap_unload(txr->txtag, txbuf->map);
+ return (error);
+
}
void
@@ -1195,12 +1149,12 @@ ixgbe_stop(void *arg)
struct ix_softc *sc = arg;
struct ifnet *ifp = &sc->arpcom.ac_if;
- /* Tell the stack that the interface is no longer active */
- ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
-
INIT_DEBUGOUT("ixgbe_stop: begin\n");
ixgbe_disable_intr(sc);
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+
ixgbe_hw(&sc->hw, reset_hw);
sc->hw.adapter_stopped = FALSE;
ixgbe_hw(&sc->hw, stop_adapter);
@@ -1208,9 +1162,6 @@ ixgbe_stop(void *arg)
/* reprogram the RAR[0] in case user changed it. */
ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
-
- ixgbe_free_transmit_structures(sc);
- ixgbe_free_receive_structures(sc);
}
@@ -1432,12 +1383,10 @@ ixgbe_setup_interface(struct ix_softc *sc)
ETHER_HDR_LEN - ETHER_CRC_LEN;
IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
IFQ_SET_READY(&ifp->if_snd);
-
- m_clsetwms(ifp, MCLBYTES, 4, sc->num_rx_desc);
ifp->if_capabilities = IFCAP_VLAN_MTU;
-#if NVLAN > 0
+#ifdef IX_VLAN_HWTAGGING
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
#endif
@@ -1493,22 +1442,21 @@ ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
int r;
dma->dma_tag = os->os_pa->pa_dmat;
- r = bus_dmamap_create(dma->dma_tag, size, 1,
- size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
+ r = bus_dmamap_create(dma->dma_tag, size, 1, size, 0,
+ BUS_DMA_NOWAIT, &dma->dma_map);
if (r != 0) {
printf("%s: ixgbe_dma_malloc: bus_dma_tag_create failed; "
"error %u\n", ifp->if_xname, r);
goto fail_0;
}
- r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
- 1, &dma->dma_nseg, BUS_DMA_NOWAIT);
+ r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,
+ &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT);
if (r != 0) {
printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
"error %u\n", ifp->if_xname, r);
goto fail_1;
}
-
r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
&dma->dma_vaddr, BUS_DMA_NOWAIT);
if (r != 0) {
@@ -1516,7 +1464,6 @@ ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
"error %u\n", ifp->if_xname, r);
goto fail_2;
}
-
r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
dma->dma_vaddr, size, NULL,
mapflags | BUS_DMA_NOWAIT);
@@ -1525,7 +1472,6 @@ ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
"error %u\n", ifp->if_xname, r);
goto fail_3;
}
-
dma->dma_size = size;
return (0);
fail_3:
@@ -1554,7 +1500,6 @@ ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
- dma->dma_map = NULL;
}
}
@@ -1631,6 +1576,15 @@ ixgbe_allocate_queues(struct ix_softc *sc)
}
txr->tx_hwb = (uint32_t *)txr->txwbdma.dma_vaddr;
*txr->tx_hwb = 0;
+
+ /* Now allocate transmit buffers for the ring */
+ if (ixgbe_allocate_transmit_buffers(txr)) {
+ printf("%s: Critical Failure setting up transmit buffers\n",
+ ifp->if_xname);
+ error = ENOMEM;
+ goto err_tx_desc;
+ }
+
}
/*
@@ -1656,6 +1610,14 @@ ixgbe_allocate_queues(struct ix_softc *sc)
}
rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
bzero((void *)rxr->rx_base, rsize);
+
+ /* Allocate receive buffers for the ring*/
+ if (ixgbe_allocate_receive_buffers(rxr)) {
+ printf("%s: Critical Failure setting up receive buffers\n",
+ ifp->if_xname);
+ error = ENOMEM;
+ goto err_rx_desc;
+ }
}
return (0);
@@ -1669,10 +1631,8 @@ err_tx_desc:
ixgbe_dma_free(sc, &txr->txwbdma);
}
free(sc->rx_rings, M_DEVBUF);
- sc->rx_rings = NULL;
rx_fail:
free(sc->tx_rings, M_DEVBUF);
- sc->tx_rings = NULL;
fail:
return (error);
}
@@ -1687,26 +1647,38 @@ fail:
int
ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
{
- struct ix_softc *sc;
- struct ixgbe_osdep *os;
- struct ifnet *ifp;
-
- sc = txr->sc;
- os = &sc->osdep;
- ifp = &sc->arpcom.ac_if;
-
- txr->txtag = txr->txdma.dma_tag;
-
- /* Create lists to hold TX mbufs */
- TAILQ_INIT(&txr->tx_free_pkts);
- TAILQ_INIT(&txr->tx_used_pkts);
- txr->tx_pkt_count = 0;
- mtx_init(&txr->tx_pkt_mtx, IPL_NET);
+ struct ix_softc *sc = txr->sc;
+ struct ixgbe_osdep *os = &sc->osdep;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ixgbe_tx_buf *txbuf;
+ int error, i;
+
+ txr->txtag = os->os_pa->pa_dmat;
+ if (!(txr->tx_buffers =
+ (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
+ sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ printf("%s: Unable to allocate tx_buffer memory\n", ifp->if_xname);
+ error = ENOMEM;
+ goto fail;
+ }
- /* Force an allocate of some dmamaps for tx up front */
- ix_alloc_pkts(sc, NULL);
+ /* Create the descriptor buffer dma maps */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
+ error = bus_dmamap_create(txr->txtag, IXGBE_TSO_SIZE,
+ IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
+ BUS_DMA_NOWAIT, &txbuf->map);
+ if (error != 0) {
+ printf("%s: Unable to create TX DMA map\n", ifp->if_xname);
+ goto fail;
+ }
+ }
return 0;
+fail:
+ /* We free all, it handles case where we are in the middle */
+ ixgbe_free_transmit_structures(sc);
+ return (error);
}
/*********************************************************************
@@ -1714,24 +1686,33 @@ ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
* Initialize a transmit ring.
*
**********************************************************************/
-int
+void
ixgbe_setup_transmit_ring(struct tx_ring *txr)
{
- struct ix_softc *sc = txr->sc;
- int error;
-
- /* Now allocate transmit buffers for the ring */
- if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
- return (error);
+ struct ix_softc *sc = txr->sc;
+ struct ixgbe_tx_buf *txbuf;
+ int i;
/* Clear the old ring contents */
bzero((void *)txr->tx_base,
(sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
-
/* Reset indices */
txr->next_avail_tx_desc = 0;
txr->next_tx_to_clean = 0;
+ /* Free any existing tx buffers. */
+ txbuf = txr->tx_buffers;
+ for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
+ if (txbuf->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, txbuf->map,
+ 0, txbuf->map->dm_mapsize,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag, txbuf->map);
+ m_freem(txbuf->m_head);
+ }
+ txbuf->m_head = NULL;
+ }
+
/* Set number of descriptors available */
txr->tx_avail = sc->num_tx_desc;
@@ -1739,7 +1720,6 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
0, txr->txdma.dma_map->dm_mapsize,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- return (0);
}
/*********************************************************************
@@ -1751,17 +1731,12 @@ int
ixgbe_setup_transmit_structures(struct ix_softc *sc)
{
struct tx_ring *txr = sc->tx_rings;
- int i, error;
+ int i;
- for (i = 0; i < sc->num_tx_queues; i++, txr++) {
- if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
- goto fail;
- }
+ for (i = 0; i < sc->num_tx_queues; i++, txr++)
+ ixgbe_setup_transmit_ring(txr);
return (0);
-fail:
- ixgbe_free_transmit_structures(sc);
- return (error);
}
/*********************************************************************
@@ -1830,7 +1805,10 @@ ixgbe_free_transmit_structures(struct ix_softc *sc)
for (i = 0; i < sc->num_tx_queues; i++, txr++) {
ixgbe_free_transmit_buffers(txr);
+ ixgbe_dma_free(sc, &txr->txdma);
+ ixgbe_dma_free(sc, &txr->txwbdma);
}
+ free(sc->tx_rings, M_DEVBUF);
}
/*********************************************************************
@@ -1841,38 +1819,45 @@ ixgbe_free_transmit_structures(struct ix_softc *sc)
void
ixgbe_free_transmit_buffers(struct tx_ring *txr)
{
- struct ix_pkt *pkt;
+ struct ix_softc *sc = txr->sc;
+ struct ixgbe_tx_buf *tx_buffer;
+ int i;
+
INIT_DEBUGOUT("free_transmit_ring: begin");
- mtx_enter(&txr->tx_pkt_mtx);
- while ((pkt = TAILQ_FIRST(&txr->tx_used_pkts)) != NULL) {
- TAILQ_REMOVE(&txr->tx_used_pkts, pkt, pkt_entry);
- mtx_leave(&txr->tx_pkt_mtx);
-
- bus_dmamap_sync(txr->txdma.dma_tag, pkt->pkt_dmamap,
- 0, pkt->pkt_dmamap->dm_mapsize,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txdma.dma_tag, pkt->pkt_dmamap);
- m_freem(pkt->pkt_mbuf);
+ if (txr->tx_buffers == NULL)
+ return;
- mtx_enter(&txr->tx_pkt_mtx);
- TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
+ tx_buffer = txr->tx_buffers;
+ for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
+ if (tx_buffer->m_head != NULL) {
+ bus_dmamap_sync(txr->txtag, tx_buffer->map,
+ 0, tx_buffer->map->dm_mapsize,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ if (tx_buffer->map != NULL) {
+ bus_dmamap_destroy(txr->txtag,
+ tx_buffer->map);
+ }
+ } else if (tx_buffer->map != NULL) {
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ bus_dmamap_destroy(txr->txtag,
+ tx_buffer->map);
+ }
+ tx_buffer->m_head = NULL;
+ tx_buffer->map = NULL;
}
- /* Destroy all the dmamaps we allocated for TX */
- while ((pkt = TAILQ_FIRST(&txr->tx_free_pkts)) != NULL) {
- TAILQ_REMOVE(&txr->tx_free_pkts, pkt, pkt_entry);
- txr->tx_pkt_count--;
- mtx_leave(&txr->tx_pkt_mtx);
-
- bus_dmamap_destroy(txr->txdma.dma_tag, pkt->pkt_dmamap);
- pool_put(ix_tx_pool, pkt);
-
- mtx_enter(&txr->tx_pkt_mtx);
- }
- mtx_leave(&txr->tx_pkt_mtx);
+ if (txr->tx_buffers != NULL)
+ free(txr->tx_buffers, M_DEVBUF);
+ txr->tx_buffers = NULL;
+ txr->txtag = NULL;
}
+#ifdef IX_CSUM_OFFLOAD
/*********************************************************************
*
* Advanced Context Descriptor setup for VLAN or CSUM
@@ -1885,12 +1870,13 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
struct ix_softc *sc = txr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct ixgbe_adv_tx_context_desc *TXD;
+ struct ixgbe_tx_buf *tx_buffer;
uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
struct ip *ip;
struct ip6_hdr *ip6;
- uint8_t ipproto = 0;
int ehdrlen, ip_hlen = 0;
uint16_t etype;
+ uint8_t ipproto = 0;
int offload = TRUE;
int ctxd = txr->next_avail_tx_desc;
#if NVLAN > 0
@@ -1902,6 +1888,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
offload = FALSE;
+ tx_buffer = &txr->tx_buffers[ctxd];
TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
/*
@@ -1939,10 +1926,8 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
/* Set the ether header length */
vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- if (offload == TRUE) {
- switch (etype) {
+ switch (etype) {
case ETHERTYPE_IP:
ip = (struct ip *)(mp->m_data + ehdrlen);
ip_hlen = ip->ip_hl << 2;
@@ -1964,23 +1949,23 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
default:
offload = FALSE;
break;
- }
+ }
- vlan_macip_lens |= ip_hlen;
+ vlan_macip_lens |= ip_hlen;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- break;
- default:
- offload = FALSE;
- break;
- }
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_UDP:
+ if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ break;
+ default:
+ offload = FALSE;
+ break;
}
/* Now copy bits into descriptor */
@@ -1994,6 +1979,8 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
desc_flip(TXD);
#endif
+ tx_buffer->m_head = NULL;
+
/* We've consumed the first desc, adjust counters */
if (++ctxd == sc->num_tx_desc)
ctxd = 0;
@@ -2109,7 +2096,7 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
return TRUE;
}
-#else
+#else /* For 6.2 RELEASE */
/* This makes it easy to keep the code common */
int
ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
@@ -2117,6 +2104,7 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
return (FALSE);
}
#endif
+#endif
/**********************************************************************
*
@@ -2131,9 +2119,8 @@ ixgbe_txeof(struct tx_ring *txr)
struct ix_softc *sc = txr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
uint first, last, done, num_avail;
- struct ixgbe_legacy_tx_desc *tx_desc;
- struct ix_pkt *pkt;
- bus_dmamap_t map;
+ struct ixgbe_tx_buf *tx_buffer;
+ struct ixgbe_legacy_tx_desc *tx_desc;
if (txr->tx_avail == sc->num_tx_desc)
return FALSE;
@@ -2141,6 +2128,8 @@ ixgbe_txeof(struct tx_ring *txr)
num_avail = txr->tx_avail;
first = txr->next_tx_to_clean;
+ tx_buffer = &txr->tx_buffers[first];
+
/* For cleanup we just use legacy struct */
tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
@@ -2162,30 +2151,22 @@ ixgbe_txeof(struct tx_ring *txr)
tx_desc->buffer_addr = 0;
num_avail++;
- mtx_enter(&txr->tx_pkt_mtx);
- pkt = TAILQ_FIRST(&txr->tx_used_pkts);
- if (pkt != NULL) {
- TAILQ_REMOVE(&txr->tx_used_pkts, pkt, pkt_entry);
- mtx_leave(&txr->tx_pkt_mtx);
- /*
- * Free the associated mbuf.
- */
- map = pkt->pkt_dmamap;
- bus_dmamap_sync(txr->txdma.dma_tag, map,
- 0, map->dm_mapsize,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txdma.dma_tag, map);
- m_freem(pkt->pkt_mbuf);
+ if (tx_buffer->m_head) {
ifp->if_opackets++;
-
- mtx_enter(&txr->tx_pkt_mtx);
- TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
+ bus_dmamap_sync(txr->txtag,
+ tx_buffer->map,
+ 0, tx_buffer->map->dm_mapsize,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txtag,
+ tx_buffer->map);
+ m_freem(tx_buffer->m_head);
+ tx_buffer->m_head = NULL;
}
- mtx_leave(&txr->tx_pkt_mtx);
if (++first == sc->num_tx_desc)
first = 0;
+ tx_buffer = &txr->tx_buffers[first];
tx_desc = (struct ixgbe_legacy_tx_desc *)
&txr->tx_base[first];
}
@@ -2239,127 +2220,95 @@ ixgbe_txeof(struct tx_ring *txr)
*
**********************************************************************/
int
-ixgbe_get_buf(struct rx_ring *rxr, int i)
+ixgbe_get_buf(struct rx_ring *rxr, int i, struct mbuf *nmp)
{
- struct ix_softc *sc = rxr->sc;
- struct mbuf *m;
- int error;
- int size = MCLBYTES;
+ struct ix_softc *sc = rxr->sc;
+ struct mbuf *mp = nmp;
+ bus_dmamap_t map;
+ int error, old, s = 0;
+ int size = MCLBYTES;
struct ixgbe_rx_buf *rxbuf;
- union ixgbe_adv_rx_desc *rxdesc;
- size_t dsize = sizeof(union ixgbe_adv_rx_desc);
-
- rxbuf = &rxr->rx_buffers[i];
- rxdesc = &rxr->rx_base[i];
- if (rxbuf->m_head != NULL) {
- printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
- sc->dev.dv_xname, i);
- return (ENOBUFS);
- }
-
- MGETHDR(m, M_DONTWAIT, MT_DATA);
- if (m == NULL) {
+#ifdef notyet
+ /* Are we going to Jumbo clusters? */
+ if (sc->bigbufs) {
+ size = MJUMPAGESIZE;
+ s = 1;
+ };
+
+ mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
+ if (mp == NULL) {
sc->mbuf_alloc_failed++;
return (ENOBUFS);
}
- MCLGETI(m, M_DONTWAIT, &sc->arpcom.ac_if, size);
- if ((m->m_flags & M_EXT) == 0) {
- m_freem(m);
- sc->mbuf_cluster_failed++;
- return (ENOBUFS);
+#endif
+
+ if (mp == NULL) {
+ MGETHDR(mp, M_DONTWAIT, MT_DATA);
+ if (mp == NULL) {
+ sc->mbuf_alloc_failed++;
+ return (ENOBUFS);
+ }
+ MCLGET(mp, M_DONTWAIT);
+ if ((mp->m_flags & M_EXT) == 0) {
+ m_freem(mp);
+ sc->mbuf_cluster_failed++;
+ return (ENOBUFS);
+ }
+ mp->m_len = mp->m_pkthdr.len = size;
+ } else {
+ mp->m_len = mp->m_pkthdr.len = size;
+ mp->m_data = mp->m_ext.ext_buf;
+ mp->m_next = NULL;
}
- m->m_len = m->m_pkthdr.len = size;
- if (sc->max_frame_size <= (size - ETHER_ALIGN))
- m_adj(m, ETHER_ALIGN);
- error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
- m, BUS_DMA_NOWAIT);
+ if (sc->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+ m_adj(mp, ETHER_ALIGN);
+
+ /*
+ * Using memory from the mbuf cluster pool, invoke the bus_dma
+ * machinery to arrange the memory mapping.
+ */
+ error = bus_dmamap_load_mbuf(rxr->rxtag[s], rxr->spare_map[s],
+ mp, BUS_DMA_NOWAIT);
if (error) {
- m_freem(m);
+ m_freem(mp);
return (error);
}
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
- 0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
- rxbuf->m_head = m;
+ /* Now check our target buffer for existing mapping */
+ rxbuf = &rxr->rx_buffers[i];
+ old = rxbuf->bigbuf;
+ if (rxbuf->m_head != NULL)
+ bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
+ map = rxbuf->map[old];
+ rxbuf->map[s] = rxr->spare_map[s];
+ rxr->spare_map[old] = map;
+ rxbuf->m_head = mp;
+ rxbuf->bigbuf = s;
+
+ rxr->rx_base[i].read.pkt_addr =
+ htole64(rxbuf->map[s]->dm_segs[0].ds_addr);
- bzero(rxdesc, dsize);
- rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
+ bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ 0, rxbuf->map[s]->dm_mapsize, BUS_DMASYNC_PREREAD);
#ifndef NO_82598_A0_SUPPORT
/* A0 needs to One's Compliment descriptors */
if (sc->hw.revision_id == 0) {
- struct dhack {
- uint32_t a1;
- uint32_t a2;
- uint32_t b1;
- uint32_t b2;
- };
+ struct dhack {uint32_t a1; uint32_t a2; uint32_t b1; uint32_t b2;};
struct dhack *d;
- d = (struct dhack *)rxdesc;
+ d = (struct dhack *)&rxr->rx_base[i];
d->a1 = ~(d->a1);
d->a2 = ~(d->a2);
}
#endif
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- dsize * i, dsize, BUS_DMASYNC_PREWRITE);
-
- rxr->rx_ndescs++;
-
return (0);
}
-void
-ix_alloc_pkts(void *xsc, void *arg)
-{
- struct ix_softc *sc = (struct ix_softc *)xsc;
- struct tx_ring *txr = &sc->tx_rings[0]; /* XXX */
- struct ifnet *ifp = &sc->arpcom.ac_if;
- struct ix_pkt *pkt;
- int i, s;
-
- for (i = 0; i < 4; i++) { /* magic! */
- pkt = pool_get(ix_tx_pool, PR_WAITOK);
- if (pkt == NULL)
- break;
-
- if (bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
- IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
- BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &pkt->pkt_dmamap) != 0)
- goto put;
-
- if (!ISSET(ifp->if_flags, IFF_UP))
- goto stopping;
-
- mtx_enter(&txr->tx_pkt_mtx);
- TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
- txr->tx_pkt_count++;
- mtx_leave(&txr->tx_pkt_mtx);
- }
-
- mtx_enter(&txr->tx_pkt_mtx);
- CLR(sc->ix_flags, IX_ALLOC_PKTS_FLAG);
- mtx_leave(&txr->tx_pkt_mtx);
-
- s = splnet();
- if (!IFQ_IS_EMPTY(&ifp->if_snd))
- ixgbe_start(ifp);
- splx(s);
-
- return;
-
-stopping:
- bus_dmamap_destroy(txr->txdma.dma_tag, pkt->pkt_dmamap);
-put:
- pool_put(ix_tx_pool, pkt);
-}
-
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one
@@ -2373,37 +2322,57 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
{
struct ix_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ixgbe_osdep *os = &sc->osdep;
struct ixgbe_rx_buf *rxbuf;
- int i, bsize, error, size = MCLBYTES;
+ int i, bsize, error;
bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
- if (!(rxr->rx_buffers = (struct ixgbe_rx_buf *) malloc(bsize,
+ if (!(rxr->rx_buffers =
+ (struct ixgbe_rx_buf *) malloc(bsize,
M_DEVBUF, M_NOWAIT | M_ZERO))) {
- printf("%s: Unable to allocate rx_buffer memory\n",
- ifp->if_xname);
+ printf("%s: Unable to allocate rx_buffer memory\n", ifp->if_xname);
error = ENOMEM;
goto fail;
}
- rxr->rxtag = rxr->rxdma.dma_tag;
+ rxr->rxtag[0] = rxr->rxtag[1] = os->os_pa->pa_dmat;
+
+ /* Create the spare maps (used by getbuf) */
+ error = bus_dmamap_create(rxr->rxtag[0],
+ MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &rxr->spare_map[0]);
+ if (error) {
+ printf("%s: %s: bus_dmamap_create failed: %d\n", ifp->if_xname,
+ __func__, error);
+ goto fail;
+ }
+ error = bus_dmamap_create(rxr->rxtag[1],
+ MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, BUS_DMA_NOWAIT, &rxr->spare_map[1]);
+ if (error) {
+ printf("%s: %s: bus_dmamap_create failed: %d\n", ifp->if_xname,
+ __func__, error);
+ goto fail;
+ }
- rxbuf = rxr->rx_buffers;
for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
- error = bus_dmamap_create(rxr->rxdma.dma_tag, size, 1,
- size, 0, BUS_DMA_NOWAIT, &rxbuf->map);
+ rxbuf = &rxr->rx_buffers[i];
+ error = bus_dmamap_create(rxr->rxtag[0], MCLBYTES, 1, MCLBYTES,
+ 0, BUS_DMA_NOWAIT, &rxbuf->map[0]);
if (error) {
- printf("%s: Unable to create Rx DMA map\n",
- ifp->if_xname);
+ printf("%s: Unable to create Small RX DMA map\n", ifp->if_xname);
+ goto fail;
+ }
+ error = bus_dmamap_create(rxr->rxtag[1], MJUMPAGESIZE, 1, MJUMPAGESIZE,
+ 0, BUS_DMA_NOWAIT, &rxbuf->map[1]);
+ if (error) {
+ printf("%s: Unable to create Large RX DMA map\n", ifp->if_xname);
goto fail;
}
- rxbuf->m_head = NULL;
}
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
- rxr->rxdma.dma_map->dm_mapsize,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
fail:
+ /* Frees all, but can handle partial completion */
+ ixgbe_free_receive_structures(sc);
return (error);
}
@@ -2416,50 +2385,69 @@ int
ixgbe_setup_receive_ring(struct rx_ring *rxr)
{
struct ix_softc *sc = rxr->sc;
- int rsize, error;
+ struct ixgbe_rx_buf *rxbuf;
+ int j, rsize, s = 0, i;
rsize = roundup2(sc->num_rx_desc *
sizeof(union ixgbe_adv_rx_desc), 4096);
/* Clear the ring contents */
bzero((void *)rxr->rx_base, rsize);
- if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
- return (error);
+ /*
+ ** Free current RX buffers: the size buffer
+ ** that is loaded is indicated by the buffer
+ ** bigbuf value.
+ */
+ for (i = 0; i < sc->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
+ s = rxbuf->bigbuf;
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ 0, rxbuf->map[s]->dm_mapsize,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
+ m_freem(rxbuf->m_head);
+ rxbuf->m_head = NULL;
+ }
+ }
+
+ for (j = 0; j < sc->num_rx_desc; j++) {
+ if (ixgbe_get_buf(rxr, j, NULL) == ENOBUFS) {
+ rxr->rx_buffers[j].m_head = NULL;
+ rxr->rx_base[j].read.pkt_addr = 0;
+ /* If we fail some may have change size */
+ s = sc->bigbufs;
+ goto fail;
+ }
+ }
/* Setup our descriptor indices */
rxr->next_to_check = 0;
- rxr->last_rx_desc_filled = sc->num_rx_desc - 1;
+ rxr->last_cleaned = 0;
- ixgbe_rxfill(rxr);
- if (rxr->rx_ndescs < 1) {
- printf("%s: unable to fill any rx descriptors\n",
- sc->dev.dv_xname);
- return (ENOBUFS);
- }
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+ 0, rxr->rxdma.dma_map->dm_mapsize,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return (0);
-}
-
-int
-ixgbe_rxfill(struct rx_ring *rxr)
-{
- struct ix_softc *sc = rxr->sc;
- int post = 0;
- int i;
-
- i = rxr->last_rx_desc_filled;
- while (rxr->rx_ndescs < sc->num_rx_desc) {
- if (++i == sc->num_rx_desc)
- i = 0;
-
- if (ixgbe_get_buf(rxr, i) != 0)
- break;
-
- rxr->last_rx_desc_filled = i;
- post = 1;
+fail:
+ /*
+ * We need to clean up any buffers allocated so far
+ * 'j' is the failing index, decrement it to get the
+ * last success.
+ */
+ for (--j; j < 0; j--) {
+ rxbuf = &rxr->rx_buffers[j];
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ 0, rxbuf->map[s]->dm_mapsize,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
+ m_freem(rxbuf->m_head);
+ rxbuf->m_head = NULL;
+ }
}
-
- return (post);
+ return (ENOBUFS);
}
/*********************************************************************
@@ -2471,16 +2459,36 @@ int
ixgbe_setup_receive_structures(struct ix_softc *sc)
{
struct rx_ring *rxr = sc->rx_rings;
- int i;
+ int i, j, s;
for (i = 0; i < sc->num_rx_queues; i++, rxr++)
if (ixgbe_setup_receive_ring(rxr))
goto fail;
return (0);
-
fail:
- ixgbe_free_receive_structures(sc);
+ /*
+ * Free RX buffers allocated so far, we will only handle
+ * the rings that completed, the failing case will have
+ * cleaned up for itself. The value of 'i' will be the
+ * failed ring so we must pre-decrement it.
+ */
+ rxr = sc->rx_rings;
+ for (--i; i > 0; i--, rxr++) {
+ for (j = 0; j < sc->num_rx_desc; j++) {
+ struct ixgbe_rx_buf *rxbuf;
+ rxbuf = &rxr->rx_buffers[j];
+ s = rxbuf->bigbuf;
+ if (rxbuf->m_head != NULL) {
+ bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
+ 0, rxbuf->map[s]->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
+ m_freem(rxbuf->m_head);
+ rxbuf->m_head = NULL;
+ }
+ }
+ }
+
return (ENOBUFS);
}
@@ -2549,7 +2557,7 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
/* Setup the HW Rx Head and Tail Descriptor Pointers */
IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i),
- rxr->last_rx_desc_filled);
+ sc->num_rx_desc - 1);
}
rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
@@ -2629,7 +2637,11 @@ ixgbe_free_receive_structures(struct ix_softc *sc)
for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
ixgbe_free_receive_buffers(rxr);
+ /* Free the ring memory as well */
+ ixgbe_dma_free(sc, &rxr->rxdma);
}
+
+ free(sc->rx_rings, M_DEVBUF);
}
/*********************************************************************
@@ -2642,38 +2654,34 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
{
struct ix_softc *sc = NULL;
struct ixgbe_rx_buf *rxbuf = NULL;
- int i;
+ int i, s;
INIT_DEBUGOUT("free_receive_buffers: begin");
sc = rxr->sc;
if (rxr->rx_buffers != NULL) {
- rxbuf = rxr->rx_buffers;
- for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
+ rxbuf = &rxr->rx_buffers[0];
+ for (i = 0; i < sc->num_rx_desc; i++) {
+ int s = rxbuf->bigbuf;
+ if (rxbuf->map != NULL) {
+ bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
+ bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
+ }
if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
- 0, rxbuf->map->dm_mapsize,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
m_freem(rxbuf->m_head);
- rxbuf->m_head = NULL;
}
- bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
- rxbuf->map = NULL;
+ rxbuf->m_head = NULL;
+ ++rxbuf;
}
}
if (rxr->rx_buffers != NULL) {
free(rxr->rx_buffers, M_DEVBUF);
rxr->rx_buffers = NULL;
}
-
- if (rxr->rxtag != NULL)
- rxr->rxtag = NULL;
-
- if (rxr->fmp != NULL) {
- m_freem(rxr->fmp);
- rxr->fmp = NULL;
- rxr->lmp = NULL;
+ for (s = 0; s < 2; s++) {
+ if (rxr->rxtag[s] != NULL)
+ rxr->rxtag[s] = NULL;
}
+ return;
}
/*********************************************************************
@@ -2691,154 +2699,130 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
{
struct ix_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
- struct mbuf *m;
- uint8_t accept_frame = 0;
- uint8_t eop = 0;
- uint16_t len, desc_len, prev_len_adj;
+ struct mbuf *mp;
+ int len, i, eop = 0;
+ uint8_t accept_frame = 0;
uint32_t staterr;
- struct ixgbe_rx_buf *rxbuf;
- union ixgbe_adv_rx_desc *rxdesc;
- size_t dsize = sizeof(union ixgbe_adv_rx_desc);
- int i;
-
- if (!ISSET(ifp->if_flags, IFF_RUNNING))
- return FALSE;
+ union ixgbe_adv_rx_desc *cur;
i = rxr->next_to_check;
+ cur = &rxr->rx_base[i];
- while (count != 0 && rxr->rx_ndescs > 0) {
- m = NULL;
-
- rxdesc = &rxr->rx_base[i];
- rxbuf = &rxr->rx_buffers[i];
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
+ rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- dsize * i, dsize,
- BUS_DMASYNC_POSTREAD);
+ staterr = cur->wb.upper.status_error;
+ if (!(staterr & IXGBE_RXD_STAT_DD))
+ return FALSE;
- staterr = letoh32(rxdesc->wb.upper.status_error);
- if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- dsize * i, dsize,
- BUS_DMASYNC_PREREAD);
- break;
- }
+ while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
+ (ifp->if_flags & IFF_RUNNING)) {
+ struct mbuf *m = NULL;
+ int s;
- /* pull the mbuf off the ring */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
- rxbuf->map->dm_mapsize,
+ mp = rxr->rx_buffers[i].m_head;
+ s = rxr->rx_buffers[i].bigbuf;
+ bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
+ 0, rxr->rx_buffers[i].map[s]->dm_mapsize,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
- m = rxbuf->m_head;
- rxbuf->m_head = NULL;
-
- if (m == NULL) {
- panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
- "(nrx %d, filled %d)", sc->dev.dv_xname,
- i, rxr->rx_ndescs,
- rxr->last_rx_desc_filled);
- }
-
- m_cluncount(m, 1);
- rxr->rx_ndescs--;
+ bus_dmamap_unload(rxr->rxtag[s],
+ rxr->rx_buffers[i].map[s]);
accept_frame = 1;
- prev_len_adj = 0;
- desc_len = letoh16(rxdesc->wb.upper.length);
-
if (staterr & IXGBE_RXD_STAT_EOP) {
count--;
eop = 1;
} else {
eop = 0;
}
- len = desc_len;
+ len = cur->wb.upper.length;
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
accept_frame = 0;
if (accept_frame) {
- m->m_len = len;
+ /* Get a fresh buffer */
+ if (ixgbe_get_buf(rxr, i, NULL) != 0) {
+ ifp->if_iqdrops++;
+ goto discard;
+ }
- /* XXX ixgbe_realign() STRICT_ALIGN */
+ /* Assign correct length to the current fragment */
+ mp->m_len = len;
if (rxr->fmp == NULL) {
- m->m_pkthdr.len = m->m_len;
- rxr->fmp = m; /* Store the first mbuf */
- rxr->lmp = m;
+ mp->m_pkthdr.len = len;
+ rxr->fmp = mp; /* Store the first mbuf */
+ rxr->lmp = mp;
} else {
/* Chain mbuf's together */
- m->m_flags &= ~M_PKTHDR;
-#if 0
- /*
- * Adjust length of previous mbuf in chain if
- * we received less than 4 bytes in the last
- * descriptor.
- */
- if (prev_len_adj > 0) {
- rxr->lmp->m_len -= prev_len_adj;
- rxr->fmp->m_pkthdr.len -= prev_len_adj;
- }
-#endif
- rxr->lmp->m_next = m;
- rxr->lmp = m;
- rxr->fmp->m_pkthdr.len += m->m_len;
+ mp->m_flags &= ~M_PKTHDR;
+ rxr->lmp->m_next = mp;
+ rxr->lmp = rxr->lmp->m_next;
+ rxr->fmp->m_pkthdr.len += len;
}
if (eop) {
+ rxr->fmp->m_pkthdr.rcvif = ifp;
ifp->if_ipackets++;
-
- m = rxr->fmp;
- m->m_pkthdr.rcvif = ifp;
-
rxr->packet_count++;
rxr->byte_count += rxr->fmp->m_pkthdr.len;
-
- ixgbe_rx_checksum(sc, staterr, m);
+ ixgbe_rx_checksum(sc, staterr, rxr->fmp);
#if NVLAN > 0
if (staterr & IXGBE_RXD_STAT_VP) {
- m->m_pkthdr.ether_vtag =
- letoh16(rxdesc->wb.upper.vlan);
- m->m_flags |= M_VLANTAG;
+ rxr->fmp->m_pkthdr.ether_vtag =
+ letoh16(cur->wb.upper.vlan);
+ rxr->fmp->m_flags |= M_VLANTAG;
}
#endif
-#if NBPFILTER > 0
- if (ifp->if_bpf)
- bpf_mtap_ether(ifp->if_bpf, m,
- BPF_DIRECTION_IN);
-#endif
-
- ether_input_mbuf(ifp, m);
+ m = rxr->fmp;
rxr->fmp = NULL;
rxr->lmp = NULL;
}
} else {
- sc->dropped_pkts++;
-
+discard:
+ ixgbe_get_buf(rxr, i, mp);
if (rxr->fmp != NULL) {
m_freem(rxr->fmp);
rxr->fmp = NULL;
rxr->lmp = NULL;
}
-
- m_freem(m);
+ m = NULL;
}
/* Zero out the receive descriptors status */
- rxdesc->wb.upper.status_error = 0;
-
+ cur->wb.upper.status_error = 0;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- dsize * i, dsize,
- BUS_DMASYNC_PREREAD);
+ 0, rxr->rxdma.dma_map->dm_mapsize,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ rxr->last_cleaned = i; /* for updating tail */
- /* Advance our pointers to the next descriptor. */
if (++i == sc->num_rx_desc)
i = 0;
+
+ /* Now send up to the stack */
+ if (m != NULL) {
+ rxr->next_to_check = i;
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap_ether(ifp->if_bpf, m,
+ BPF_DIRECTION_IN);
+#endif
+ ether_input_mbuf(ifp, m);
+ i = rxr->next_to_check;
+ }
+ /* Get next descriptor */
+ cur = &rxr->rx_base[i];
+ staterr = cur->wb.upper.status_error;
}
rxr->next_to_check = i;
+ /* Advance the IXGB's Receive Queue "Tail Pointer" */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
+
if (!(staterr & IXGBE_RXD_STAT_DD))
return FALSE;
@@ -2886,7 +2870,6 @@ ixgbe_enable_hw_vlans(struct ix_softc *sc)
ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- ctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
ixgbe_enable_intr(sc);
}
diff --git a/sys/dev/pci/if_ix.h b/sys/dev/pci/if_ix.h
index ba535db5a1e..09db3de2318 100644
--- a/sys/dev/pci/if_ix.h
+++ b/sys/dev/pci/if_ix.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.h,v 1.5 2009/06/04 22:27:31 jsg Exp $ */
+/* $OpenBSD: if_ix.h,v 1.6 2009/06/25 17:01:32 deraadt Exp $ */
/******************************************************************************
@@ -151,9 +151,16 @@
/* Used for auto RX queue configuration */
extern int mp_ncpus;
+struct ixgbe_tx_buf {
+ struct mbuf *m_head;
+ bus_dmamap_t map;
+};
+
struct ixgbe_rx_buf {
struct mbuf *m_head;
- bus_dmamap_t map;
+ int bigbuf;
+ /* one small and one large map */
+ bus_dmamap_t map[2];
};
/*
@@ -168,15 +175,6 @@ struct ixgbe_dma_alloc {
int dma_nseg;
};
-struct ix_pkt {
- TAILQ_ENTRY(ix_pkt) pkt_entry;
- bus_dmamap_t pkt_dmamap;
- struct mbuf *pkt_mbuf;
- u_int16_t pkt_start_desc;
-};
-
-TAILQ_HEAD(ix_pkt_list, ix_pkt);
-
/*
* The transmit ring, one per tx queue
*/
@@ -193,10 +191,7 @@ struct tx_ring {
struct ixgbe_dma_alloc txwbdma;
uint32_t next_avail_tx_desc;
uint32_t next_tx_to_clean;
- struct mutex tx_pkt_mtx;
- u_int tx_pkt_count;
- struct ix_pkt_list tx_free_pkts;
- struct ix_pkt_list tx_used_pkts;
+ struct ixgbe_tx_buf *tx_buffers;
volatile uint16_t tx_avail;
uint32_t txd_cmd;
bus_dma_tag_t txtag;
@@ -223,11 +218,11 @@ struct rx_ring {
#if 0
struct lro_ctrl lro;
#endif
- unsigned int last_rx_desc_filled;
+ unsigned int last_cleaned;
unsigned int next_to_check;
- int rx_ndescs;
struct ixgbe_rx_buf *rx_buffers;
- bus_dma_tag_t rxtag;
+ bus_dma_tag_t rxtag[2];
+ bus_dmamap_t spare_map[2];
struct mbuf *fmp;
struct mbuf *lmp;
/* Soft stats */
@@ -246,10 +241,6 @@ struct ix_softc {
void *powerhook;
void *shutdownhook;
- /* general flags */
- int ix_flags;
-#define IX_ALLOC_PKTS_FLAG 0x01
-
struct resource *pci_mem;
struct resource *msix_mem;