summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorBrad Smith <brad@cvs.openbsd.org>2005-06-29 03:36:07 +0000
committerBrad Smith <brad@cvs.openbsd.org>2005-06-29 03:36:07 +0000
commit94a313a3097b35650df4c3c8d004fc90852b8e7b (patch)
tree41fd6d363dd6458043f536d4e43487eaa49fd679 /sys
parent20872ae2cefa31a862e9e2b8f35faa8dfb1dad74 (diff)
- sync bus_dma ops and add bus_dmamap_sync()'s
and rev 1.25 * bcm5700 chips rev Bx wedge up if given DMA descriptors of eight bytes or less. Once hit, only reovery is a watchdog timeout/reset. If the offending packet is retransmitted, the chip will wedge again... Check for teeny fragments in a Tx request, and either fold the teeny chunk residue into an adjacent mbuf, or m_dup the entire buffer. From NetBSD Tested on alpha/amd64/macppc by krw@ and ok krw@
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/pci/if_bge.c463
-rw-r--r--sys/dev/pci/if_bgereg.h25
2 files changed, 403 insertions, 85 deletions
diff --git a/sys/dev/pci/if_bge.c b/sys/dev/pci/if_bge.c
index 81ba9569eb7..82b8146c463 100644
--- a/sys/dev/pci/if_bge.c
+++ b/sys/dev/pci/if_bge.c
@@ -1,4 +1,5 @@
-/* $OpenBSD: if_bge.c,v 1.66 2005/06/26 06:06:17 brad Exp $ */
+/* $OpenBSD: if_bge.c,v 1.67 2005/06/29 03:36:06 brad Exp $ */
+
/*
* Copyright (c) 2001 Wind River Systems
* Copyright (c) 1997, 1998, 1999, 2001
@@ -128,6 +129,7 @@ void bge_tick(void *);
void bge_stats_update(struct bge_softc *);
void bge_stats_update_regs(struct bge_softc *);
int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
+int bge_compact_dma_runt(struct mbuf *pkt);
int bge_intr(void *);
void bge_start(struct ifnet *);
@@ -150,7 +152,7 @@ int bge_alloc_jumbo_mem(struct bge_softc *);
void bge_free_jumbo_mem(struct bge_softc *);
void *bge_jalloc(struct bge_softc *);
void bge_jfree(caddr_t, u_int, void *);
-int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
+int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
int bge_init_rx_ring_std(struct bge_softc *);
void bge_free_rx_ring_std(struct bge_softc *);
@@ -528,8 +530,9 @@ bge_miibus_writereg(dev, phy, reg, val)
/* Reading with autopolling on may trigger PCI errors */
autopoll = CSR_READ_4(sc, BGE_MI_MODE);
if (autopoll & BGE_MIMODE_AUTOPOLL) {
- BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
DELAY(40);
+ BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
+ DELAY(10); /* 40 usec is supposed to be adequate */
}
CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
@@ -593,43 +596,49 @@ bge_alloc_jumbo_mem(sc)
{
caddr_t ptr, kva;
bus_dma_segment_t seg;
- int i, rseg;
+ int i, rseg, state, error;
struct bge_jpool_entry *entry;
+ state = error = 0;
+
/* Grab a big chunk o' storage. */
if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
&seg, 1, &rseg, BUS_DMA_NOWAIT)) {
printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
return (ENOBUFS);
}
+
+ state = 1;
if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
BUS_DMA_NOWAIT)) {
printf("%s: can't map dma buffers (%d bytes)\n",
sc->bge_dev.dv_xname, BGE_JMEM);
- bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
- return (ENOBUFS);
+ error = ENOBUFS;
+ goto out;
}
+
+ state = 2;
if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
- bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
- bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
- return (ENOBUFS);
+ error = ENOBUFS;
+ goto out;
}
+
+ state = 3;
if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
- bus_dmamap_destroy(sc->bge_dmatag,
- sc->bge_cdata.bge_rx_jumbo_map);
- bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
- bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
- return (ENOBUFS);
+ error = ENOBUFS;
+ goto out;
}
+
+ state = 4;
sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
DPRINTFN(1,("bge_jumbo_buf = 0x%08X\n", sc->bge_cdata.bge_jumbo_buf));
- LIST_INIT(&sc->bge_jfree_listhead);
- LIST_INIT(&sc->bge_jinuse_listhead);
+ SLIST_INIT(&sc->bge_jfree_listhead);
+ SLIST_INIT(&sc->bge_jinuse_listhead);
/*
* Now divide it up into 9K pieces and save the addresses
@@ -642,23 +651,35 @@ bge_alloc_jumbo_mem(sc)
entry = malloc(sizeof(struct bge_jpool_entry),
M_DEVBUF, M_NOWAIT);
if (entry == NULL) {
- bus_dmamap_unload(sc->bge_dmatag,
- sc->bge_cdata.bge_rx_jumbo_map);
- bus_dmamap_destroy(sc->bge_dmatag,
- sc->bge_cdata.bge_rx_jumbo_map);
- bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
- bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
- sc->bge_cdata.bge_jumbo_buf = NULL;
printf("%s: no memory for jumbo buffer queue!\n",
sc->bge_dev.dv_xname);
- return(ENOBUFS);
+ error = ENOBUFS;
+ goto out;
}
entry->slot = i;
- LIST_INSERT_HEAD(&sc->bge_jfree_listhead,
+ SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
entry, jpool_entries);
}
+out:
+ if (error != 0) {
+ switch (state) {
+ case 4:
+ bus_dmamap_unload(sc->bge_dmatag,
+ sc->bge_cdata.bge_rx_jumbo_map);
+ case 3:
+ bus_dmamap_destroy(sc->bge_dmatag,
+ sc->bge_cdata.bge_rx_jumbo_map);
+ case 2:
+ bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
+ case 1:
+ bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
+ break;
+ default:
+ break;
+ }
+ }
- return(0);
+ return (error);
}
/*
@@ -670,7 +691,7 @@ bge_jalloc(sc)
{
struct bge_jpool_entry *entry;
- entry = LIST_FIRST(&sc->bge_jfree_listhead);
+ entry = SLIST_FIRST(&sc->bge_jfree_listhead);
if (entry == NULL) {
DPRINTFN(1,("%s: no free jumbo buffers\n",
@@ -678,8 +699,8 @@ bge_jalloc(sc)
return(NULL);
}
- LIST_REMOVE(entry, jpool_entries);
- LIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
+ SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
return(sc->bge_cdata.bge_jslots[entry->slot]);
}
@@ -710,12 +731,12 @@ bge_jfree(buf, size, arg)
if ((i < 0) || (i >= BGE_JSLOTS))
panic("bge_jfree: asked to free buffer that we don't manage!");
- entry = LIST_FIRST(&sc->bge_jinuse_listhead);
+ entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
if (entry == NULL)
panic("bge_jfree: buffer not in use!");
entry->slot = i;
- LIST_REMOVE(entry, jpool_entries);
- LIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
+ SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
+ SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
}
@@ -723,14 +744,24 @@ bge_jfree(buf, size, arg)
* Intialize a standard receive ring descriptor.
*/
int
-bge_newbuf_std(sc, i, m)
+bge_newbuf_std(sc, i, m, dmamap)
struct bge_softc *sc;
int i;
struct mbuf *m;
+ bus_dmamap_t dmamap;
{
struct mbuf *m_new = NULL;
struct bge_rx_bd *r;
- bus_dmamap_t rxmap = sc->bge_cdata.bge_rx_std_map[i];
+ int error;
+
+ if (dmamap == NULL) {
+ error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
+ MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
+ if (error != 0)
+ return error;
+ }
+
+ sc->bge_cdata.bge_rx_std_map[i] = dmamap;
if (m == NULL) {
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
@@ -753,20 +784,26 @@ bge_newbuf_std(sc, i, m)
if (!sc->bge_rx_alignment_bug)
m_adj(m_new, ETHER_ALIGN);
- if (bus_dmamap_load_mbuf(sc->bge_dmatag, rxmap, m_new,
- BUS_DMA_NOWAIT)) {
+ if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
+ BUS_DMA_READ|BUS_DMA_NOWAIT)) {
if (m == NULL)
- m_freem(m_new);
+ m_freem(m_new);
return(ENOBUFS);
}
-
+
sc->bge_cdata.bge_rx_std_chain[i] = m_new;
r = &sc->bge_rdata->bge_rx_std_ring[i];
- BGE_HOSTADDR(r->bge_addr, rxmap->dm_segs[0].ds_addr);
+ BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
r->bge_flags = BGE_RXBDFLAG_END;
r->bge_len = m_new->m_len;
r->bge_idx = i;
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offsetof(struct bge_ring_data, bge_rx_std_ring) +
+ i * sizeof (struct bge_rx_bd),
+ sizeof (struct bge_rx_bd),
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+
return(0);
}
@@ -784,7 +821,7 @@ bge_newbuf_jumbo(sc, i, m)
struct bge_rx_bd *r;
if (m == NULL) {
- caddr_t *buf = NULL;
+ caddr_t buf = NULL;
/* Allocate the mbuf. */
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
@@ -818,6 +855,12 @@ bge_newbuf_jumbo(sc, i, m)
r->bge_len = m_new->m_len;
r->bge_idx = i;
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
+ i * sizeof (struct bge_rx_bd),
+ sizeof (struct bge_rx_bd),
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+
return(0);
}
@@ -833,20 +876,19 @@ bge_init_rx_ring_std(sc)
{
int i;
- for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
- if (bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES,
- 0, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_std_map[i]))
- return(ENOBUFS);
- }
+ if (sc->bge_flags & BGE_RXRING_VALID)
+ return 0;
for (i = 0; i < BGE_SSLOTS; i++) {
- if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
+ if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
return(ENOBUFS);
}
sc->bge_std = i - 1;
CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
+ sc->bge_flags |= BGE_RXRING_VALID;
+
return(0);
}
@@ -856,16 +898,21 @@ bge_free_rx_ring_std(sc)
{
int i;
+ if (!(sc->bge_flags & BGE_RXRING_VALID))
+ return;
+
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
sc->bge_cdata.bge_rx_std_chain[i] = NULL;
- bus_dmamap_unload(sc->bge_dmatag,
- sc->bge_cdata.bge_rx_std_map[i]);
+ bus_dmamap_destroy(sc->bge_dmatag,
+ sc->bge_cdata.bge_rx_std_map[i]);
}
bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
sizeof(struct bge_rx_bd));
}
+
+ sc->bge_flags &= ~BGE_RXRING_VALID;
}
int
@@ -873,7 +920,10 @@ bge_init_rx_ring_jumbo(sc)
struct bge_softc *sc;
{
int i;
- struct bge_rcb *rcb;
+ volatile struct bge_rcb *rcb;
+
+ if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
+ return 0;
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
@@ -881,6 +931,7 @@ bge_init_rx_ring_jumbo(sc)
};
sc->bge_jumbo = i - 1;
+ sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
@@ -897,6 +948,9 @@ bge_free_rx_ring_jumbo(sc)
{
int i;
+ if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
+ return;
+
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
@@ -905,6 +959,8 @@ bge_free_rx_ring_jumbo(sc)
bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
sizeof(struct bge_rx_bd));
}
+
+ sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
}
void
@@ -912,20 +968,30 @@ bge_free_tx_ring(sc)
struct bge_softc *sc;
{
int i;
+ struct txdmamap_pool_entry *dma;
- if (sc->bge_rdata->bge_tx_ring == NULL)
+ if (!(sc->bge_flags & BGE_TXRING_VALID))
return;
for (i = 0; i < BGE_TX_RING_CNT; i++) {
if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
m_freem(sc->bge_cdata.bge_tx_chain[i]);
sc->bge_cdata.bge_tx_chain[i] = NULL;
- bus_dmamap_unload(sc->bge_dmatag,
- sc->bge_cdata.bge_tx_map[i]);
+ SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
+ link);
+ sc->txdma[i] = 0;
}
bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
sizeof(struct bge_tx_bd));
}
+
+ while ((dma = SLIST_FIRST(&sc->txdma_list))) {
+ SLIST_REMOVE_HEAD(&sc->txdma_list, link);
+ bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
+ free(dma, M_DEVBUF);
+ }
+
+ sc->bge_flags &= ~BGE_TXRING_VALID;
}
int
@@ -933,10 +999,14 @@ bge_init_tx_ring(sc)
struct bge_softc *sc;
{
int i;
+ bus_dmamap_t dmamap;
+ struct txdmamap_pool_entry *dma;
+
+ if (sc->bge_flags & BGE_TXRING_VALID)
+ return 0;
sc->bge_txcnt = 0;
sc->bge_tx_saved_considx = 0;
-
CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)
CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
@@ -945,12 +1015,27 @@ bge_init_tx_ring(sc)
if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG)
CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
- for (i = 0; i < BGE_TX_RING_CNT; i++) {
- if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, BGE_NTXSEG,
- BGE_JLEN, 0, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_tx_map[i]))
+ SLIST_INIT(&sc->txdma_list);
+ for (i = 0; i < BGE_RSLOTS; i++) {
+ if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
+ BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
+ &dmamap))
return(ENOBUFS);
+ if (dmamap == NULL)
+ panic("dmamap NULL in bge_init_tx_ring");
+ dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
+ if (dma == NULL) {
+ printf("%s: can't alloc txdmamap_pool_entry\n",
+ sc->bge_dev.dv_xname);
+ bus_dmamap_destroy(sc->bge_dmatag, dmamap);
+ return (ENOMEM);
+ }
+ dma->dmamap = dmamap;
+ SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
}
+ sc->bge_flags |= BGE_TXRING_VALID;
+
return(0);
}
@@ -1136,7 +1221,6 @@ bge_blockinit(sc)
int i;
bge_hostaddr taddr;
-
/*
* Initialize the memory window pointer register so that
* we can access the first 32K of internal NIC RAM. This will
@@ -1170,7 +1254,7 @@ bge_blockinit(sc)
}
/* Configure mbuf pool watermarks */
- /* new broadcom docs strongly recommend these: */
+ /* new Broadcom docs strongly recommend these: */
if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
if (ifp->if_mtu > ETHER_MAX_LEN) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
@@ -1279,6 +1363,11 @@ bge_blockinit(sc)
BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
rcb->bge_maxlen_flags);
+
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offsetof(struct bge_ring_data, bge_info),
+ sizeof (struct bge_gib),
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
/*
@@ -1725,6 +1814,9 @@ bge_attach(parent, self, aux)
u_int32_t hwcfg = 0;
u_int32_t mac_addr = 0;
u_int32_t command;
+#if 0
+ u_int32_t pm_ctl;
+#endif
struct ifnet *ifp;
int unit, error = 0;
caddr_t kva;
@@ -1785,6 +1877,21 @@ bge_attach(parent, self, aux)
goto fail;
}
+#if 0
+ /*
+ * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
+ * can clobber the chip's PCI config-space power control registers,
+ * leaving the card in D3 powersave state.
+ * We do not have memory-mapped registers in this state,
+ * so force device into D0 state before starting initialization.
+ */
+ pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
+ pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
+ pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
+ pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
+ DELAY(1000); /* 27 usec is allegedly sufficent */
+#endif
+
/*
* Save ASIC rev. Look up any quirks
* associated with this ASIC.
@@ -2122,7 +2229,7 @@ bge_reset(sc)
val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
if (val == ~BGE_MAGIC_NUMBER)
break;
- DELAY(10);
+ DELAY(1000);
}
if (i == BGE_TIMEOUT) {
@@ -2194,9 +2301,36 @@ bge_rxeof(sc)
{
struct ifnet *ifp;
int stdcnt = 0, jumbocnt = 0;
+ bus_dmamap_t dmamap;
+ bus_addr_t offset, toff;
+ bus_size_t tlen;
+ int tosync;
ifp = &sc->arpcom.ac_if;
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offsetof(struct bge_ring_data, bge_status_block),
+ sizeof (struct bge_status_block),
+ BUS_DMASYNC_POSTREAD);
+
+ offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
+ tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
+ sc->bge_rx_saved_considx;
+
+ toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
+
+ if (tosync < 0) {
+ tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
+ sizeof (struct bge_rx_bd);
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ toff, tlen, BUS_DMASYNC_POSTREAD);
+ tosync = -tosync;
+ }
+
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offset, tosync * sizeof (struct bge_rx_bd),
+ BUS_DMASYNC_POSTREAD);
+
while(sc->bge_rx_saved_considx !=
sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
struct bge_rx_bd *cur_rx;
@@ -2240,18 +2374,19 @@ bge_rxeof(sc)
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
m = sc->bge_cdata.bge_rx_std_chain[rxidx];
sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
- bus_dmamap_unload(sc->bge_dmatag,
- sc->bge_cdata.bge_rx_std_map[rxidx]);
stdcnt++;
+ dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
+ sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
+ bus_dmamap_unload(sc->bge_dmatag, dmamap);
if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
ifp->if_ierrors++;
- bge_newbuf_std(sc, sc->bge_std, m);
+ bge_newbuf_std(sc, sc->bge_std, m, dmamap);
continue;
}
if (bge_newbuf_std(sc, sc->bge_std,
- NULL) == ENOBUFS) {
+ NULL, dmamap) == ENOBUFS) {
ifp->if_ierrors++;
- bge_newbuf_std(sc, sc->bge_std, m);
+ bge_newbuf_std(sc, sc->bge_std, m, dmamap);
continue;
}
}
@@ -2310,9 +2445,37 @@ bge_txeof(sc)
{
struct bge_tx_bd *cur_tx = NULL;
struct ifnet *ifp;
+ struct txdmamap_pool_entry *dma;
+ bus_addr_t offset, toff;
+ bus_size_t tlen;
+ int tosync;
+ struct mbuf *m;
ifp = &sc->arpcom.ac_if;
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offsetof(struct bge_ring_data, bge_status_block),
+ sizeof (struct bge_status_block),
+ BUS_DMASYNC_POSTREAD);
+
+ offset = offsetof(struct bge_ring_data, bge_tx_ring);
+ tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
+ sc->bge_tx_saved_considx;
+
+ toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
+
+ if (tosync < 0) {
+ tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
+ sizeof (struct bge_tx_bd);
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+ tosync = -tosync;
+ }
+
+ bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
+ offset, tosync * sizeof (struct bge_tx_bd),
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
/*
* Go through our tx ring and free mbufs for those
* frames that have been sent.
@@ -2325,11 +2488,17 @@ bge_txeof(sc)
cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
ifp->if_opackets++;
- if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
- m_freem(sc->bge_cdata.bge_tx_chain[idx]);
+ m = sc->bge_cdata.bge_tx_chain[idx];
+ if (m != NULL) {
sc->bge_cdata.bge_tx_chain[idx] = NULL;
- bus_dmamap_unload(sc->bge_dmatag,
- sc->bge_cdata.bge_tx_map[idx]);
+ dma = sc->txdma[idx];
+ bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
+ dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
+ SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
+ sc->txdma[idx] = NULL;
+
+ m_freem(m);
}
sc->bge_txcnt--;
BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
@@ -2560,6 +2729,123 @@ bge_stats_update(sc)
}
/*
+ * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
+ */
+int
+bge_compact_dma_runt(struct mbuf *pkt)
+{
+ struct mbuf *m, *prev;
+ int totlen, prevlen;
+
+ prev = NULL;
+ totlen = 0;
+ prevlen = -1;
+
+ for (m = pkt; m != NULL; prev = m,m = m->m_next) {
+ int mlen = m->m_len;
+ int shortfall = 8 - mlen ;
+
+ totlen += mlen;
+ if (mlen == 0) {
+ continue;
+ }
+ if (mlen >= 8)
+ continue;
+
+ /* If we get here, mbuf data is too small for DMA engine.
+ * Try to fix by shuffling data to prev or next in chain.
+ * If that fails, do a compacting deep-copy of the whole chain.
+ */
+
+ /* Internal frag. If fits in prev, copy it there. */
+ if (prev && !M_READONLY(prev) &&
+ M_TRAILINGSPACE(prev) >= m->m_len) {
+ bcopy(m->m_data,
+ prev->m_data+prev->m_len,
+ mlen);
+ prev->m_len += mlen;
+ m->m_len = 0;
+ /* XXX stitch chain */
+ prev->m_next = m_free(m);
+ m = prev;
+ continue;
+ }
+ else if (m->m_next != NULL && !M_READONLY(m) &&
+ M_TRAILINGSPACE(m) >= shortfall &&
+ m->m_next->m_len >= (8 + shortfall)) {
+ /* m is writable and have enough data in next, pull up. */
+
+ bcopy(m->m_next->m_data,
+ m->m_data+m->m_len,
+ shortfall);
+ m->m_len += shortfall;
+ m->m_next->m_len -= shortfall;
+ m->m_next->m_data += shortfall;
+ }
+ else if (m->m_next == NULL || 1) {
+ /* Got a runt at the very end of the packet.
+ * borrow data from the tail of the preceding mbuf and
+ * update its length in-place. (The original data is still
+ * valid, so we can do this even if prev is not writable.)
+ */
+
+ /* if we'd make prev a runt, just move all of its data. */
+#ifdef DEBUG
+ KASSERT(prev != NULL /*, ("runt but null PREV")*/);
+ KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
+#endif
+ if ((prev->m_len - shortfall) < 8)
+ shortfall = prev->m_len;
+
+#ifdef notyet /* just do the safe slow thing for now */
+ if (!M_READONLY(m)) {
+ if (M_LEADINGSPACE(m) < shorfall) {
+ void *m_dat;
+ m_dat = (m->m_flags & M_PKTHDR) ?
+ m->m_pktdat : m->dat;
+ memmove(m_dat, mtod(m, void*), m->m_len);
+ m->m_data = m_dat;
+ }
+ } else
+#endif /* just do the safe slow thing */
+ {
+ struct mbuf * n = NULL;
+ int newprevlen = prev->m_len - shortfall;
+
+ MGET(n, M_NOWAIT, MT_DATA);
+ if (n == NULL)
+ return ENOBUFS;
+ KASSERT(m->m_len + shortfall < MLEN
+ /*,
+ ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
+
+ /* first copy the data we're stealing from prev */
+ bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
+
+ /* update prev->m_len accordingly */
+ prev->m_len -= shortfall;
+
+ /* copy data from runt m */
+ bcopy(m->m_data, n->m_data + shortfall, m->m_len);
+
+ /* n holds what we stole from prev, plus m */
+ n->m_len = shortfall + m->m_len;
+
+ /* stitch n into chain and free m */
+ n->m_next = m->m_next;
+ prev->m_next = n;
+ /* KASSERT(m->m_next == NULL); */
+ m->m_next = NULL;
+ m_free(m);
+ m = n; /* for continuing loop */
+ }
+ }
+ prevlen = m->m_len;
+ }
+ return 0;
+}
+
+/*
* Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
* pointers to descriptors.
*/
@@ -2572,7 +2858,8 @@ bge_encap(sc, m_head, txidx)
struct bge_tx_bd *f = NULL;
u_int32_t frag, cur, cnt = 0;
u_int16_t csum_flags = 0;
- bus_dmamap_t txmap;
+ struct txdmamap_pool_entry *dma;
+ bus_dmamap_t dmamap;
int i = 0;
#if NVLAN > 0
struct ifvlan *ifv = NULL;
@@ -2599,23 +2886,36 @@ bge_encap(sc, m_head, txidx)
#endif
}
#endif
+ if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
+ goto doit;
+ /*
+ * bcm5700 Revision B silicon cannot handle DMA descriptors with
+ * less than eight bytes. If we encounter a teeny mbuf
+ * at the end of a chain, we can pad. Otherwise, copy.
+ */
+ if (bge_compact_dma_runt(m_head) != 0)
+ return ENOBUFS;
+doit:
+ dma = SLIST_FIRST(&sc->txdma_list);
+ if (dma == NULL)
+ return ENOBUFS;
+ dmamap = dma->dmamap;
/*
* Start packing the mbufs in this chain into
* the fragment pointers. Stop when we run out
* of fragments or hit the end of the mbuf chain.
*/
- txmap = sc->bge_cdata.bge_tx_map[frag];
- if (bus_dmamap_load_mbuf(sc->bge_dmatag, txmap, m_head,
+ if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
BUS_DMA_NOWAIT))
return(ENOBUFS);
- for (i = 0; i < txmap->dm_nsegs; i++) {
+ for (i = 0; i < dmamap->dm_nsegs; i++) {
f = &sc->bge_rdata->bge_tx_ring[frag];
if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
break;
- BGE_HOSTADDR(f->bge_addr, txmap->dm_segs[i].ds_addr);
- f->bge_len = txmap->dm_segs[i].ds_len;
+ BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
+ f->bge_len = dmamap->dm_segs[i].ds_len;
f->bge_flags = csum_flags;
#if NVLAN > 0
if (ifv != NULL) {
@@ -2636,18 +2936,19 @@ bge_encap(sc, m_head, txidx)
cnt++;
}
+ if (i < dmamap->dm_nsegs)
+ return ENOBUFS;
+
+ bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+
if (frag == sc->bge_tx_saved_considx)
return(ENOBUFS);
- /*
- * Put the dmamap for this transmission at the same array index as the
- * last descriptor in this chain. That's where bge_txeof() expects to
- * find it.
- */
- sc->bge_cdata.bge_tx_map[*txidx] = sc->bge_cdata.bge_tx_map[cur];
- sc->bge_cdata.bge_tx_map[cur] = txmap;
sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
sc->bge_cdata.bge_tx_chain[cur] = m_head;
+ SLIST_REMOVE_HEAD(&sc->txdma_list, link);
+ sc->txdma[cur] = dma;
sc->bge_txcnt += cnt;
*txidx = frag;
diff --git a/sys/dev/pci/if_bgereg.h b/sys/dev/pci/if_bgereg.h
index 70b710b3d76..02cb8ebad0f 100644
--- a/sys/dev/pci/if_bgereg.h
+++ b/sys/dev/pci/if_bgereg.h
@@ -1,4 +1,5 @@
-/* $OpenBSD: if_bgereg.h,v 1.23 2005/06/17 15:15:43 brad Exp $ */
+/* $OpenBSD: if_bgereg.h,v 1.24 2005/06/29 03:36:06 brad Exp $ */
+
/*
* Copyright (c) 2001 Wind River Systems
* Copyright (c) 1997, 1998, 1999, 2001
@@ -2196,6 +2197,7 @@ struct vpd_key {
#define BGE_SSLOTS 256
#define BGE_MSLOTS 256
#define BGE_JSLOTS 384
+#define BGE_RSLOTS 256
#define BGE_JRAWLEN (ETHER_MAX_LEN_JUMBO + ETHER_ALIGN)
#define BGE_JLEN (BGE_JRAWLEN + (sizeof(u_int64_t) - \
@@ -2273,9 +2275,21 @@ struct bge_type {
struct bge_jpool_entry {
int slot;
- LIST_ENTRY(bge_jpool_entry) jpool_entries;
+ SLIST_ENTRY(bge_jpool_entry) jpool_entries;
+};
+
+struct txdmamap_pool_entry {
+ bus_dmamap_t dmamap;
+ SLIST_ENTRY(txdmamap_pool_entry) link;
};
+/*
+ * Flags for bge_flags.
+ */
+#define BGE_TXRING_VALID 0x0001
+#define BGE_RXRING_VALID 0x0002
+#define BGE_JUMBO_RXRING_VALID 0x0004
+
struct bge_softc {
struct device bge_dev;
struct arpcom arpcom; /* interface info */
@@ -2302,8 +2316,8 @@ struct bge_softc {
u_int16_t bge_return_ring_cnt;
u_int16_t bge_std; /* current std ring head */
u_int16_t bge_jumbo; /* current jumo ring head */
- LIST_HEAD(__bge_jfreehead, bge_jpool_entry) bge_jfree_listhead;
- LIST_HEAD(__bge_jinusehead, bge_jpool_entry) bge_jinuse_listhead;
+ SLIST_HEAD(__bge_jfreehead, bge_jpool_entry) bge_jfree_listhead;
+ SLIST_HEAD(__bge_jinusehead, bge_jpool_entry) bge_jinuse_listhead;
u_int32_t bge_stat_ticks;
u_int32_t bge_rx_coal_ticks;
u_int32_t bge_tx_coal_ticks;
@@ -2311,9 +2325,12 @@ struct bge_softc {
u_int32_t bge_tx_max_coal_bds;
u_int32_t bge_tx_buf_ratio;
int bge_if_flags;
+ int bge_flags;
int bge_txcnt;
int bge_link;
struct timeout bge_timeout;
char *bge_vpd_prodname;
char *bge_vpd_readonly;
+ SLIST_HEAD(, txdmamap_pool_entry) txdma_list;
+ struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT];
};