From 0877d8ae029e73aa871ca80221ab5d4d5e4ae40a Mon Sep 17 00:00:00 2001 From: "Thordur I. Bjornsson" Date: Fri, 18 Jul 2008 13:38:41 +0000 Subject: o Use mbufs, for the RX ring, instead of malloc()'ing an MCLBYTES sized buffer. o On non-strict alignment archs, dont copy the mbuf, every time, unload it, and send it up the stack and just get a new one for the rx ring. We still do the copy on strict alignment archs though... o create a function to handle mbuf allocation for the rx ring, vr_mbuf_alloc(), use it to allocate the mbufs and shuffle the bus dma setup around. ideas/code from vic(4) and sis(4); ok reyk@, brad@, dlg@ tested by many, been in snapshots for a while. --- sys/dev/pci/if_vr.c | 163 ++++++++++++++++++++++++++++--------------------- sys/dev/pci/if_vrreg.h | 5 +- 2 files changed, 98 insertions(+), 70 deletions(-) (limited to 'sys/dev') diff --git a/sys/dev/pci/if_vr.c b/sys/dev/pci/if_vr.c index 9394ba4dbef..4186a61c689 100644 --- a/sys/dev/pci/if_vr.c +++ b/sys/dev/pci/if_vr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_vr.c,v 1.73 2008/05/22 19:23:04 mk Exp $ */ +/* $OpenBSD: if_vr.c,v 1.74 2008/07/18 13:38:40 thib Exp $ */ /* * Copyright (c) 1997, 1998 @@ -138,6 +138,8 @@ void vr_reset(struct vr_softc *); int vr_list_rx_init(struct vr_softc *); int vr_list_tx_init(struct vr_softc *); +int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *, struct mbuf *); + const struct pci_matchid vr_devices[] = { { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE }, { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII }, @@ -674,62 +676,40 @@ vr_list_rx_init(struct vr_softc *sc) { struct vr_chain_data *cd; struct vr_list_data *ld; - int i; struct vr_desc *d; + int i, nexti; cd = &sc->vr_cdata; ld = sc->vr_ldata; - for (i = 0; i < VR_RX_LIST_CNT; i++) { + for (i = 0; i < VR_RX_LIST_CNT; i++) { + if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, + 0, BUS_DMA_NOWAIT | BUS_DMA_READ, + &cd->vr_rx_chain[i].vr_map)) + return (ENOBUFS); + d = (struct vr_desc *)&ld->vr_rx_list[i]; cd->vr_rx_chain[i].vr_ptr = d; cd->vr_rx_chain[i].vr_paddr = sc->sc_listmap->dm_segs[0].ds_addr + offsetof(struct vr_list_data, vr_rx_list[i]); - cd->vr_rx_chain[i].vr_buf = - (u_int8_t *)malloc(MCLBYTES, M_DEVBUF, M_NOWAIT); - if (cd->vr_rx_chain[i].vr_buf == NULL) - return (ENOBUFS); - if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, - 0, BUS_DMA_NOWAIT | BUS_DMA_READ, - &cd->vr_rx_chain[i].vr_map)) + if (vr_alloc_mbuf(sc, &cd->vr_rx_chain[i], NULL)) return (ENOBUFS); - if (bus_dmamap_load(sc->sc_dmat, cd->vr_rx_chain[i].vr_map, - cd->vr_rx_chain[i].vr_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT)) - return (ENOBUFS); - bus_dmamap_sync(sc->sc_dmat, cd->vr_rx_chain[i].vr_map, - 0, cd->vr_rx_chain[i].vr_map->dm_mapsize, - BUS_DMASYNC_PREREAD); - - d->vr_status = htole32(VR_RXSTAT); - d->vr_data = - htole32(cd->vr_rx_chain[i].vr_map->dm_segs[0].ds_addr + - sizeof(u_int64_t)); - d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); - - if (i == (VR_RX_LIST_CNT - 1)) { - cd->vr_rx_chain[i].vr_nextdesc = - &cd->vr_rx_chain[0]; - ld->vr_rx_list[i].vr_next = - htole32(sc->sc_listmap->dm_segs[0].ds_addr + - offsetof(struct vr_list_data, vr_rx_list[0])); - } else { - cd->vr_rx_chain[i].vr_nextdesc = - &cd->vr_rx_chain[i + 1]; - ld->vr_rx_list[i].vr_next = - htole32(sc->sc_listmap->dm_segs[0].ds_addr + - offsetof(struct vr_list_data, vr_rx_list[i + 1])); - } + if (i == (VR_RX_LIST_CNT - 1)) + nexti = 0; + else + nexti = i + 1; + + cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti]; + ld->vr_rx_list[i].vr_next = + htole32(sc->sc_listmap->dm_segs[0].ds_addr + + offsetof(struct vr_list_data, vr_rx_list[nexti])); } cd->vr_rx_head = &cd->vr_rx_chain[0]; - bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, - sc->sc_listmap->dm_mapsize, - BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); - return(0); } @@ -740,7 +720,7 @@ vr_list_rx_init(struct vr_softc *sc) void vr_rxeof(struct vr_softc *sc) { - struct mbuf *m0; + struct mbuf *m0, *m; struct ifnet *ifp; struct vr_chain_onefrag *cur_rx; int total_len = 0; @@ -813,30 +793,29 @@ vr_rxeof(struct vr_softc *sc) */ total_len -= ETHER_CRC_LEN; - bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, - cur_rx->vr_map->dm_mapsize, - BUS_DMASYNC_POSTREAD); - m0 = m_devget(cur_rx->vr_buf + sizeof(u_int64_t) - ETHER_ALIGN, - total_len + ETHER_ALIGN, 0, ifp, NULL); - bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, - cur_rx->vr_map->dm_mapsize, - BUS_DMASYNC_PREREAD); - - /* Reinitialize descriptor */ - cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT); - cur_rx->vr_ptr->vr_data = - htole32(cur_rx->vr_map->dm_segs[0].ds_addr + - sizeof(u_int64_t)); - cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); - bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, - sc->sc_listmap->dm_mapsize, - BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + m = cur_rx->vr_mbuf; + cur_rx->vr_mbuf = NULL; - if (m0 == NULL) { - ifp->if_ierrors++; - continue; + bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0, + cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map); + +#ifndef __STRICT_ALIGNMENT + if (vr_alloc_mbuf(sc, cur_rx, NULL) == 0) { + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = total_len; + } else +#endif + { + m0 = m_devget(mtod(m, caddr_t) - ETHER_ALIGN, + total_len + ETHER_ALIGN, 0, ifp, NULL); + if (m0 == NULL || vr_alloc_mbuf(sc, cur_rx, m)) { + ifp->if_ierrors++; + continue; + } + m_adj(m0, ETHER_ALIGN); + m = m0; } - m_adj(m0, ETHER_ALIGN); ifp->if_ipackets++; @@ -845,10 +824,10 @@ vr_rxeof(struct vr_softc *sc) * Handle BPF listeners. Let the BPF user see the packet. */ if (ifp->if_bpf) - bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_IN); + bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); #endif /* pass it on. */ - ether_input_mbuf(ifp, m0); + ether_input_mbuf(ifp, m); } bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, @@ -1479,9 +1458,9 @@ vr_stop(struct vr_softc *sc) */ for (i = 0; i < VR_RX_LIST_CNT; i++) { - if (sc->vr_cdata.vr_rx_chain[i].vr_buf != NULL) { - free(sc->vr_cdata.vr_rx_chain[i].vr_buf, M_DEVBUF); - sc->vr_cdata.vr_rx_chain[i].vr_buf = NULL; + if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { + m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); + sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; } map = sc->vr_cdata.vr_rx_chain[i].vr_map; @@ -1529,3 +1508,51 @@ vr_shutdown(void *arg) vr_stop(sc); } + +int +vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r, struct mbuf *mb) +{ + struct vr_desc *d; + struct mbuf *m; + + if (mb == NULL) { + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) + return (ENOBUFS); + + MCLGET(m, M_DONTWAIT); + if (!(m->m_flags & M_EXT)) { + m_free(m); + return (ENOBUFS); + } + } else { + m = mb; + m->m_data = m->m_ext.ext_buf; + } + + m->m_len = m->m_pkthdr.len = MCLBYTES; + r->vr_mbuf = m; + + m_adj(m, sizeof(u_int64_t)); + + if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, r->vr_mbuf, + BUS_DMA_NOWAIT)) { + m_freem(r->vr_mbuf); + return (ENOBUFS); + } + + bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize, + BUS_DMASYNC_PREREAD); + + /* Reinitialize the RX descriptor */ + d = r->vr_ptr; + d->vr_status = htole32(VR_RXSTAT); + d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr); + d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN); + + bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0, + sc->sc_listmap->dm_mapsize, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + + return (0); +} diff --git a/sys/dev/pci/if_vrreg.h b/sys/dev/pci/if_vrreg.h index a9fb9ad47e0..588b53a5d6b 100644 --- a/sys/dev/pci/if_vrreg.h +++ b/sys/dev/pci/if_vrreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_vrreg.h,v 1.20 2007/10/04 01:29:39 brad Exp $ */ +/* $OpenBSD: if_vrreg.h,v 1.21 2008/07/18 13:38:40 thib Exp $ */ /* * Copyright (c) 1997, 1998 @@ -409,7 +409,7 @@ struct vr_chain { struct vr_chain_onefrag { struct vr_desc *vr_ptr; struct vr_chain_onefrag *vr_nextdesc; - u_int8_t *vr_buf; + struct mbuf *vr_mbuf; bus_addr_t vr_paddr; bus_dmamap_t vr_map; }; @@ -468,6 +468,7 @@ struct vr_softc { bus_dmamap_t sc_listmap; /* descriptor list map */ bus_dma_segment_t sc_listseg; int sc_if_flags; + int sc_rxbufs; }; #define VR_F_RESTART 0x01 /* Restart unit on next tick */ -- cgit v1.2.3