summaryrefslogtreecommitdiff
path: root/sys/dev/pci/if_ix.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2009-06-04 22:27:32 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2009-06-04 22:27:32 +0000
commit91de0ed61f4bcb515bb9ea8b3438aea031200887 (patch)
treee179e73a7d9352757784ceb460d9199af9b05530 /sys/dev/pci/if_ix.c
parentd43e491163b2b74081409764cffe70d535b73e1d (diff)
switch to a bnx style dynamic pool backed tx so we don't
have to allocate everything up front. Requested by reyk@, 'I'm fine with it' dlg@, 'commit it' deraadt@
Diffstat (limited to 'sys/dev/pci/if_ix.c')
-rw-r--r--sys/dev/pci/if_ix.c244
1 files changed, 154 insertions, 90 deletions
diff --git a/sys/dev/pci/if_ix.c b/sys/dev/pci/if_ix.c
index 2e6304f7fb2..e56da4f5ec5 100644
--- a/sys/dev/pci/if_ix.c
+++ b/sys/dev/pci/if_ix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.c,v 1.17 2009/04/29 13:18:58 jsg Exp $ */
+/* $OpenBSD: if_ix.c,v 1.18 2009/06/04 22:27:31 jsg Exp $ */
/******************************************************************************
@@ -130,6 +130,10 @@ int ixgbe_legacy_irq(void *);
void desc_flip(void *);
#endif
+struct rwlock ix_tx_pool_lk = RWLOCK_INITIALIZER("ixplinit");
+struct pool *ix_tx_pool = NULL;
+void ix_alloc_pkts(void *, void *);
+
/*********************************************************************
* OpenBSD Device Interface Entry Points
*********************************************************************/
@@ -576,9 +580,25 @@ ixgbe_init(void *arg)
struct ifnet *ifp = &sc->arpcom.ac_if;
uint32_t txdctl, rxdctl, mhadd, gpie;
int i, s;
+ int txpl = 1;
INIT_DEBUGOUT("ixgbe_init: begin");
+ if (rw_enter(&ix_tx_pool_lk, RW_WRITE | RW_INTR) != 0)
+ return;
+ if (ix_tx_pool == NULL) {
+ ix_tx_pool = malloc(sizeof(*ix_tx_pool), M_DEVBUF, M_WAITOK);
+ if (ix_tx_pool != NULL) {
+ pool_init(ix_tx_pool, sizeof(struct ix_pkt), 0, 0, 0,
+ "ixpkts", &pool_allocator_nointr);
+ } else
+ txpl = 0;
+ }
+ rw_exit(&ix_tx_pool_lk);
+
+ if (!txpl)
+ return;
+
s = splnet();
ixgbe_stop(sc);
@@ -826,16 +846,30 @@ int
ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
{
struct ix_softc *sc = txr->sc;
+ struct ix_pkt *pkt;
uint32_t olinfo_status = 0, cmd_type_len = 0;
int i, j, error;
int first, last = 0;
bus_dmamap_t map;
- struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
union ixgbe_adv_tx_desc *txd = NULL;
#ifdef notyet
uint32_t paylen = 0;
#endif
+ mtx_enter(&txr->tx_pkt_mtx);
+ pkt = TAILQ_FIRST(&txr->tx_free_pkts);
+ if (pkt == NULL) {
+ if (txr->tx_pkt_count <= sc->num_tx_desc &&
+ !ISSET(sc->ix_flags, IX_ALLOC_PKTS_FLAG) &&
+ workq_add_task(NULL, 0, ix_alloc_pkts, sc, NULL) == 0)
+ SET(sc->ix_flags, IX_ALLOC_PKTS_FLAG);
+
+ mtx_leave(&txr->tx_pkt_mtx);
+ return (ENOMEM);
+ }
+ TAILQ_REMOVE(&txr->tx_free_pkts, pkt, pkt_entry);
+ mtx_leave(&txr->tx_pkt_mtx);
+
/* Basic descriptor defines */
cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
@@ -845,6 +879,7 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
#endif
+#if 0
/*
* Force a cleanup if number of TX descriptors
* available is below the threshold. If it fails
@@ -858,6 +893,7 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
return (ENOBUFS);
}
}
+#endif
/*
* Important to capture the first descriptor
@@ -865,22 +901,16 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
* the one we tell the hardware to report back
*/
first = txr->next_avail_tx_desc;
- txbuf = &txr->tx_buffers[first];
- txbuf_mapped = txbuf;
- map = txbuf->map;
+ map = pkt->pkt_dmamap;
/*
* Map the packet for DMA.
*/
error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
m_head, BUS_DMA_NOWAIT);
-
- if (error == ENOMEM) {
+ if (error != 0) {
sc->no_tx_dma_setup++;
- return (error);
- } else if (error != 0) {
- sc->no_tx_dma_setup++;
- return (error);
+ goto maperr;
}
/* Make certain there are enough descriptors */
@@ -909,7 +939,6 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
i = txr->next_avail_tx_desc;
for (j = 0; j < map->dm_nsegs; j++) {
- txbuf = &txr->tx_buffers[i];
txd = &txr->tx_base[i];
txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
@@ -921,8 +950,6 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
if (++i == sc->num_tx_desc)
i = 0;
- txbuf->m_head = NULL;
-
/*
* we have to do this inside the loop right now
* because of the hardware workaround.
@@ -939,21 +966,32 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
txr->tx_avail -= map->dm_nsegs;
txr->next_avail_tx_desc = i;
- txbuf->m_head = m_head;
- txbuf->map = map;
+ pkt->pkt_mbuf = m_head;
+ pkt->pkt_start_desc = first;
+
+ mtx_enter(&txr->tx_pkt_mtx);
+ TAILQ_INSERT_TAIL(&txr->tx_used_pkts, pkt, pkt_entry);
+ mtx_leave(&txr->tx_pkt_mtx);
+
bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREWRITE);
+#if 0
/* Set the index of the descriptor that will be marked done */
txbuf = &txr->tx_buffers[first];
+#endif
++txr->tx_packets;
return (0);
xmit_fail:
- bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
- return (error);
-
+ bus_dmamap_unload(txr->txdma.dma_tag, map);
+maperr:
+ mtx_enter(&txr->tx_pkt_mtx);
+ TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
+ mtx_leave(&txr->tx_pkt_mtx);
+
+ return (ENOMEM);
}
void
@@ -1654,40 +1692,23 @@ ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
struct ix_softc *sc;
struct ixgbe_osdep *os;
struct ifnet *ifp;
- struct ixgbe_tx_buf *txbuf;
- int error, i;
sc = txr->sc;
os = &sc->osdep;
ifp = &sc->arpcom.ac_if;
- if (!(txr->tx_buffers =
- (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
- sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- printf("%s: Unable to allocate tx_buffer memory\n",
- ifp->if_xname);
- error = ENOMEM;
- goto fail;
- }
txr->txtag = txr->txdma.dma_tag;
- /* Create the descriptor buffer dma maps */
- for (i = 0; i < sc->num_tx_desc; i++) {
- txbuf = &txr->tx_buffers[i];
- error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
- IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
- BUS_DMA_NOWAIT, &txbuf->map);
+ /* Create lists to hold TX mbufs */
+ TAILQ_INIT(&txr->tx_free_pkts);
+ TAILQ_INIT(&txr->tx_used_pkts);
+ txr->tx_pkt_count = 0;
+ mtx_init(&txr->tx_pkt_mtx, IPL_NET);
- if (error != 0) {
- printf("%s: Unable to create TX DMA map\n",
- ifp->if_xname);
- goto fail;
- }
- }
+ /* Force an allocate of some dmamaps for tx up front */
+ ix_alloc_pkts(sc, NULL);
return 0;
-fail:
- return (error);
}
/*********************************************************************
@@ -1822,41 +1843,36 @@ ixgbe_free_transmit_structures(struct ix_softc *sc)
void
ixgbe_free_transmit_buffers(struct tx_ring *txr)
{
- struct ix_softc *sc = txr->sc;
- struct ixgbe_tx_buf *tx_buffer;
- int i;
-
+ struct ix_pkt *pkt;
INIT_DEBUGOUT("free_transmit_ring: begin");
- if (txr->tx_buffers == NULL)
- return;
+ mtx_enter(&txr->tx_pkt_mtx);
+ while ((pkt = TAILQ_FIRST(&txr->tx_used_pkts)) != NULL) {
+ TAILQ_REMOVE(&txr->tx_used_pkts, pkt, pkt_entry);
+ mtx_leave(&txr->tx_pkt_mtx);
+
+ bus_dmamap_sync(txr->txdma.dma_tag, pkt->pkt_dmamap,
+ 0, pkt->pkt_dmamap->dm_mapsize,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(txr->txdma.dma_tag, pkt->pkt_dmamap);
+ m_freem(pkt->pkt_mbuf);
- tx_buffer = txr->tx_buffers;
- for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
- if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
- bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
- 0, tx_buffer->map->dm_mapsize,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txdma.dma_tag,
- tx_buffer->map);
- }
- if (tx_buffer->m_head != NULL) {
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
- }
- if (tx_buffer->map != NULL) {
- bus_dmamap_destroy(txr->txdma.dma_tag,
- tx_buffer->map);
- tx_buffer->map = NULL;
- }
+ mtx_enter(&txr->tx_pkt_mtx);
+ TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
}
- if (txr->tx_buffers != NULL) {
- free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
+ /* Destroy all the dmamaps we allocated for TX */
+ while ((pkt = TAILQ_FIRST(&txr->tx_free_pkts)) != NULL) {
+ TAILQ_REMOVE(&txr->tx_free_pkts, pkt, pkt_entry);
+ txr->tx_pkt_count--;
+ mtx_leave(&txr->tx_pkt_mtx);
+
+ bus_dmamap_destroy(txr->txdma.dma_tag, pkt->pkt_dmamap);
+ pool_put(ix_tx_pool, pkt);
+
+ mtx_enter(&txr->tx_pkt_mtx);
}
- txr->tx_buffers = NULL;
- txr->txtag = NULL;
+ mtx_leave(&txr->tx_pkt_mtx);
}
/*********************************************************************
@@ -1871,7 +1887,6 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
struct ix_softc *sc = txr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct ixgbe_adv_tx_context_desc *TXD;
- struct ixgbe_tx_buf *tx_buffer;
uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
struct ip *ip;
struct ip6_hdr *ip6;
@@ -1889,7 +1904,6 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
offload = FALSE;
- tx_buffer = &txr->tx_buffers[ctxd];
TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
/*
@@ -1982,8 +1996,6 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
desc_flip(TXD);
#endif
- tx_buffer->m_head = NULL;
-
/* We've consumed the first desc, adjust counters */
if (++ctxd == sc->num_tx_desc)
ctxd = 0;
@@ -2121,8 +2133,9 @@ ixgbe_txeof(struct tx_ring *txr)
struct ix_softc *sc = txr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
uint first, last, done, num_avail;
- struct ixgbe_tx_buf *tx_buffer;
- struct ixgbe_legacy_tx_desc *tx_desc;
+ struct ixgbe_legacy_tx_desc *tx_desc;
+ struct ix_pkt *pkt;
+ bus_dmamap_t map;
if (txr->tx_avail == sc->num_tx_desc)
return FALSE;
@@ -2130,8 +2143,6 @@ ixgbe_txeof(struct tx_ring *txr)
num_avail = txr->tx_avail;
first = txr->next_tx_to_clean;
- tx_buffer = &txr->tx_buffers[first];
-
/* For cleanup we just use legacy struct */
tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
@@ -2153,22 +2164,30 @@ ixgbe_txeof(struct tx_ring *txr)
tx_desc->buffer_addr = 0;
num_avail++;
- if (tx_buffer->m_head) {
- ifp->if_opackets++;
- bus_dmamap_sync(txr->txdma.dma_tag,
- tx_buffer->map,
- 0, tx_buffer->map->dm_mapsize,
+ mtx_enter(&txr->tx_pkt_mtx);
+ pkt = TAILQ_FIRST(&txr->tx_used_pkts);
+ if (pkt != NULL) {
+ TAILQ_REMOVE(&txr->tx_used_pkts, pkt, pkt_entry);
+ mtx_leave(&txr->tx_pkt_mtx);
+ /*
+ * Free the associated mbuf.
+ */
+ map = pkt->pkt_dmamap;
+ bus_dmamap_sync(txr->txdma.dma_tag, map,
+ 0, map->dm_mapsize,
BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(txr->txdma.dma_tag,
- tx_buffer->map);
- m_freem(tx_buffer->m_head);
- tx_buffer->m_head = NULL;
+ bus_dmamap_unload(txr->txdma.dma_tag, map);
+ m_freem(pkt->pkt_mbuf);
+ ifp->if_opackets++;
+
+ mtx_enter(&txr->tx_pkt_mtx);
+ TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
}
+ mtx_leave(&txr->tx_pkt_mtx);
if (++first == sc->num_tx_desc)
first = 0;
- tx_buffer = &txr->tx_buffers[first];
tx_desc = (struct ixgbe_legacy_tx_desc *)
&txr->tx_base[first];
}
@@ -2298,6 +2317,51 @@ ixgbe_get_buf(struct rx_ring *rxr, int i)
return (0);
}
+void
+ix_alloc_pkts(void *xsc, void *arg)
+{
+ struct ix_softc *sc = (struct ix_softc *)xsc;
+ struct tx_ring *txr = &sc->tx_rings[0]; /* XXX */
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ix_pkt *pkt;
+ int i, s;
+
+ for (i = 0; i < 4; i++) { /* magic! */
+ pkt = pool_get(ix_tx_pool, PR_WAITOK);
+ if (pkt == NULL)
+ break;
+
+ if (bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
+ IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
+ BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &pkt->pkt_dmamap) != 0)
+ goto put;
+
+ if (!ISSET(ifp->if_flags, IFF_UP))
+ goto stopping;
+
+ mtx_enter(&txr->tx_pkt_mtx);
+ TAILQ_INSERT_TAIL(&txr->tx_free_pkts, pkt, pkt_entry);
+ txr->tx_pkt_count++;
+ mtx_leave(&txr->tx_pkt_mtx);
+ }
+
+ mtx_enter(&txr->tx_pkt_mtx);
+ CLR(sc->ix_flags, IX_ALLOC_PKTS_FLAG);
+ mtx_leave(&txr->tx_pkt_mtx);
+
+ s = splnet();
+ if (!IFQ_IS_EMPTY(&ifp->if_snd))
+ ixgbe_start(ifp);
+ splx(s);
+
+ return;
+
+stopping:
+ bus_dmamap_destroy(txr->txdma.dma_tag, pkt->pkt_dmamap);
+put:
+ pool_put(ix_tx_pool, pkt);
+}
+
/*********************************************************************
*
* Allocate memory for rx_buffer structures. Since we use one