diff options
author | Kevin Lo <kevlo@cvs.openbsd.org> | 2011-01-15 04:35:35 +0000 |
---|---|---|
committer | Kevin Lo <kevlo@cvs.openbsd.org> | 2011-01-15 04:35:35 +0000 |
commit | 9d5a6edf9401fddb810be5c0aaee5c1b8e631b7c (patch) | |
tree | eb134e55d961951527b5afc54dff04a80f90838e /sys/dev/pci | |
parent | db58922e2754fcbd760702366a7a6983e9b6f364 (diff) |
Add drivers for the RDC R6040 Ethernet chipset
and RDC R6040 Ethernet PHY.
Written by Pyun YongHyeon for FreeBSD then
ported to OpenBSD by me.
Thanks once again to DMP for supplying hardware
which made this possible.
ok deraadt@
Diffstat (limited to 'sys/dev/pci')
-rw-r--r-- | sys/dev/pci/files.pci | 7 | ||||
-rw-r--r-- | sys/dev/pci/if_vte.c | 1518 | ||||
-rw-r--r-- | sys/dev/pci/if_vtereg.h | 460 |
3 files changed, 1984 insertions, 1 deletions
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci index d43bb0ec80a..25780271001 100644 --- a/sys/dev/pci/files.pci +++ b/sys/dev/pci/files.pci @@ -1,4 +1,4 @@ -# $OpenBSD: files.pci,v 1.276 2010/10/14 21:23:04 pirofti Exp $ +# $OpenBSD: files.pci,v 1.277 2011/01/15 04:35:34 kevlo Exp $ # $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $ # # Config file and device description for machine-independent PCI code. @@ -308,6 +308,11 @@ device vr: ether, ifnet, mii, ifmedia attach vr at pci file dev/pci/if_vr.c vr +# RDC R6040 +device vte: ether, ifnet, mii, ifmedia, mii_phy +attach vte at pci +file dev/pci/if_vte.c vte + # Texas Instruments ThunderLAN device tl: ether, ifnet, mii, ifmedia attach tl at pci diff --git a/sys/dev/pci/if_vte.c b/sys/dev/pci/if_vte.c new file mode 100644 index 00000000000..376fd723a48 --- /dev/null +++ b/sys/dev/pci/if_vte.c @@ -0,0 +1,1518 @@ +/* $OpenBSD: if_vte.c,v 1.1 2011/01/15 04:35:34 kevlo Exp $ */ +/*- + * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ + +#include "bpfilter.h" +#include "vlan.h" + +#include <sys/param.h> +#include <sys/endian.h> +#include <sys/systm.h> +#include <sys/types.h> +#include <sys/sockio.h> +#include <sys/mbuf.h> +#include <sys/queue.h> +#include <sys/kernel.h> +#include <sys/device.h> +#include <sys/timeout.h> +#include <sys/socket.h> + +#include <machine/bus.h> + +#include <net/if.h> +#include <net/if_dl.h> +#include <net/if_llc.h> +#include <net/if_media.h> + +#ifdef INET +#include <netinet/in.h> +#include <netinet/in_systm.h> +#include <netinet/in_var.h> +#include <netinet/ip.h> +#include <netinet/if_ether.h> +#endif + +#include <net/if_types.h> +#include <net/if_vlan_var.h> + +#if NBPFILTER > 0 +#include <net/bpf.h> +#endif + +#include <dev/rndvar.h> + +#include <dev/mii/mii.h> +#include <dev/mii/miivar.h> + +#include <dev/pci/pcireg.h> +#include <dev/pci/pcivar.h> +#include <dev/pci/pcidevs.h> + +#include <dev/pci/if_vtereg.h> + +int vte_match(struct device *, void *, void *); +void vte_attach(struct device *, struct device *, void *); +int vte_detach(struct device *, int); + +int vte_miibus_readreg(struct device *, int, int); +void vte_miibus_writereg(struct device *, int, int, int); +void vte_miibus_statchg(struct device *); + +int vte_init(struct ifnet *); +void vte_start(struct ifnet *); +int vte_ioctl(struct ifnet *, u_long, caddr_t); +void vte_watchdog(struct ifnet *); +int vte_mediachange(struct ifnet *); +void vte_mediastatus(struct ifnet *, struct ifmediareq *); + +int vte_intr(void *); +int vte_dma_alloc(struct vte_softc *); +void vte_dma_free(struct vte_softc *); +struct vte_txdesc * + vte_encap(struct vte_softc *, struct mbuf **); +void vte_get_macaddr(struct vte_softc *); +int vte_init_rx_ring(struct vte_softc *); +int vte_init_tx_ring(struct vte_softc *); +void vte_mac_config(struct vte_softc *); +int vte_newbuf(struct vte_softc *, struct vte_rxdesc *, int); +void vte_reset(struct vte_softc *); +void vte_rxeof(struct vte_softc *); +void vte_iff(struct vte_softc *); +void vte_start_mac(struct vte_softc *); +void vte_stats_clear(struct vte_softc *); +void vte_stats_update(struct vte_softc *); +void vte_stop(struct vte_softc *); +void vte_stop_mac(struct vte_softc *); +void vte_tick(void *); +void vte_txeof(struct vte_softc *); + +const struct pci_matchid vte_devices[] = { + { PCI_VENDOR_RDC, PCI_PRODUCT_RDC_R6040_ETHER } +}; + +struct cfattach vte_ca = { + sizeof(struct vte_softc), vte_match, vte_attach +}; + +struct cfdriver vte_cd = { + NULL, "vte", DV_IFNET +}; + +int vtedebug = 0; +#define DPRINTF(x) do { if (vtedebug) printf x; } while (0) + +int +vte_miibus_readreg(struct device *dev, int phy, int reg) +{ + struct vte_softc *sc = (struct vte_softc *)dev; + int i; + + CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | + (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); + for (i = VTE_PHY_TIMEOUT; i > 0; i--) { + DELAY(5); + if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) + break; + } + + if (i == 0) { + printf("%s: phy read timeout: phy %d, reg %d\n", + sc->sc_dev.dv_xname, phy, reg); + return (0); + } + + return (CSR_READ_2(sc, VTE_MMRD)); +} + +void +vte_miibus_writereg(struct device *dev, int phy, int reg, int val) +{ + struct vte_softc *sc = (struct vte_softc *)dev; + int i; + + CSR_WRITE_2(sc, VTE_MMWD, val); + CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | + (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); + for (i = VTE_PHY_TIMEOUT; i > 0; i--) { + DELAY(5); + if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) + break; + } + + if (i == 0) + printf("%s: phy write timeout: phy %d, reg %d\n", + sc->sc_dev.dv_xname, phy, reg); +} + +void +vte_miibus_statchg(struct device *dev) +{ + struct vte_softc *sc = (struct vte_softc *)dev; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct mii_data *mii; + uint16_t val; + + if ((ifp->if_flags & IFF_RUNNING) == 0) + return; + + mii = &sc->sc_miibus; + + sc->vte_flags &= ~VTE_FLAG_LINK; + if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == + (IFM_ACTIVE | IFM_AVALID)) { + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_10_T: + case IFM_100_TX: + sc->vte_flags |= VTE_FLAG_LINK; + break; + default: + break; + } + } + + /* Stop RX/TX MACs. */ + vte_stop_mac(sc); + /* Program MACs with resolved duplex and flow control. */ + if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { + /* + * Timer waiting time : (63 + TIMER * 64) MII clock. + * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). + */ + if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) + val = 18 << VTE_IM_TIMER_SHIFT; + else + val = 1 << VTE_IM_TIMER_SHIFT; + sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; + val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; + /* 48.6us for 100Mbps, 50.8us for 10Mbps */ + CSR_WRITE_2(sc, VTE_MRICR, val); + + if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) + val = 18 << VTE_IM_TIMER_SHIFT; + else + val = 1 << VTE_IM_TIMER_SHIFT; + sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; + val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; + /* 48.6us for 100Mbps, 50.8us for 10Mbps */ + CSR_WRITE_2(sc, VTE_MTICR, val); + + vte_mac_config(sc); + vte_start_mac(sc); + } +} + +void +vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct vte_softc *sc = ifp->if_softc; + struct mii_data *mii = &sc->sc_miibus; + + mii_pollstat(mii); + ifmr->ifm_status = mii->mii_media_status; + ifmr->ifm_active = mii->mii_media_active; +} + +int +vte_mediachange(struct ifnet *ifp) +{ + struct vte_softc *sc = ifp->if_softc; + struct mii_data *mii = &sc->sc_miibus; + int error; + + if (mii->mii_instance != 0) { + struct mii_softc *miisc; + + LIST_FOREACH(miisc, &mii->mii_phys, mii_list) + mii_phy_reset(miisc); + } + error = mii_mediachg(mii); + + return (error); +} + +int +vte_match(struct device *dev, void *match, void *aux) +{ + return pci_matchbyid((struct pci_attach_args *)aux, vte_devices, + sizeof(vte_devices) / sizeof(vte_devices[0])); +} + +void +vte_get_macaddr(struct vte_softc *sc) +{ + uint16_t mid; + + /* + * It seems there is no way to reload station address and + * it is supposed to be set by BIOS. + */ + mid = CSR_READ_2(sc, VTE_MID0L); + sc->vte_eaddr[0] = (mid >> 0) & 0xFF; + sc->vte_eaddr[1] = (mid >> 8) & 0xFF; + mid = CSR_READ_2(sc, VTE_MID0M); + sc->vte_eaddr[2] = (mid >> 0) & 0xFF; + sc->vte_eaddr[3] = (mid >> 8) & 0xFF; + mid = CSR_READ_2(sc, VTE_MID0H); + sc->vte_eaddr[4] = (mid >> 0) & 0xFF; + sc->vte_eaddr[5] = (mid >> 8) & 0xFF; +} + +void +vte_attach(struct device *parent, struct device *self, void *aux) +{ + struct vte_softc *sc = (struct vte_softc *)self; + struct pci_attach_args *pa = aux; + pci_chipset_tag_t pc = pa->pa_pc; + pci_intr_handle_t ih; + const char *intrstr; + struct ifnet *ifp; + pcireg_t memtype; + int error = 0; + + memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VTE_PCI_LOMEM); + if (pci_mapreg_map(pa, VTE_PCI_LOMEM, memtype, 0, &sc->sc_mem_bt, + &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { + printf(": can't map mem space\n"); + return; + } + + if (pci_intr_map(pa, &ih) != 0) { + printf(": can't map interrupt\n"); + goto fail; + } + + /* + * Allocate IRQ + */ + intrstr = pci_intr_string(pc, ih); + sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, vte_intr, sc, + sc->sc_dev.dv_xname); + if (sc->sc_irq_handle == NULL) { + printf(": could not establish interrupt"); + if (intrstr != NULL) + printf(" at %s", intrstr); + printf("\n"); + goto fail; + } + printf(": %s", intrstr); + + sc->sc_dmat = pa->pa_dmat; + sc->sc_pct = pa->pa_pc; + sc->sc_pcitag = pa->pa_tag; + + /* Reset the ethernet controller. */ + vte_reset(sc); + + error = vte_dma_alloc(sc); + if (error) + goto fail; + + /* Load station address. */ + vte_get_macaddr(sc); + + ifp = &sc->sc_arpcom.ac_if; + ifp->if_softc = sc; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = vte_ioctl; + ifp->if_start = vte_start; + ifp->if_watchdog = vte_watchdog; + ifp->if_baudrate = IF_Mbps(100); + IFQ_SET_MAXLEN(&ifp->if_snd, VTE_TX_RING_CNT - 1); + IFQ_SET_READY(&ifp->if_snd); + bcopy(sc->vte_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); + bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); + + ifp->if_capabilities = IFCAP_VLAN_MTU; + + printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); + + /* + * Set up MII bus. + * BIOS would have initialized VTE_MPSCCR to catch PHY + * status changes so driver may be able to extract + * configured PHY address. Since it's common to see BIOS + * fails to initialize the register(including the sample + * board I have), let mii(4) probe it. This is more + * reliable than relying on BIOS's initialization. + * + * Advertising flow control capability to mii(4) was + * intentionally disabled due to severe problems in TX + * pause frame generation. See vte_rxeof() for more + * details. + */ + sc->sc_miibus.mii_ifp = ifp; + sc->sc_miibus.mii_readreg = vte_miibus_readreg; + sc->sc_miibus.mii_writereg = vte_miibus_writereg; + sc->sc_miibus.mii_statchg = vte_miibus_statchg; + + ifmedia_init(&sc->sc_miibus.mii_media, 0, vte_mediachange, + vte_mediastatus); + mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, + MII_OFFSET_ANY, 0); + + if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { + printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); + ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, + 0, NULL); + ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); + } else + ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); + + if_attach(ifp); + ether_ifattach(ifp); + + timeout_set(&sc->vte_tick_ch, vte_tick, sc); + return; +fail: + vte_detach(&sc->sc_dev, 0); +} + +int +vte_detach(struct device *self, int flags) +{ + struct vte_softc *sc = (struct vte_softc *)self; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + int s; + + s = splnet(); + vte_stop(sc); + splx(s); + + mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); + + /* Delete all remaining media. */ + ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); + + ether_ifdetach(ifp); + if_detach(ifp); + vte_dma_free(sc); + + if (sc->sc_irq_handle != NULL) { + pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); + sc->sc_irq_handle = NULL; + } + + return (0); +} + +int +vte_dma_alloc(struct vte_softc *sc) +{ + struct vte_txdesc *txd; + struct vte_rxdesc *rxd; + int error, i, nsegs; + + /* Create DMA stuffs for TX ring */ + error = bus_dmamap_create(sc->sc_dmat, VTE_TX_RING_SZ, 1, + VTE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_tx_ring_map); + if (error) + return (ENOBUFS); + + /* Allocate DMA'able memory for TX ring */ + error = bus_dmamem_alloc(sc->sc_dmat, VTE_TX_RING_SZ, ETHER_ALIGN, + 0, &sc->vte_cdata.vte_tx_ring_seg, 1, &nsegs, + BUS_DMA_WAITOK | BUS_DMA_ZERO); + if (error) { + printf("%s: could not allocate DMA'able memory for Tx ring.\n", + sc->sc_dev.dv_xname); + return (error); + } + + error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_tx_ring_seg, + nsegs, VTE_TX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_tx_ring, + BUS_DMA_NOWAIT); + if (error) + return (ENOBUFS); + + /* Load the DMA map for Tx ring. */ + error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, + sc->vte_cdata.vte_tx_ring, VTE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); + if (error) { + printf("%s: could not load DMA'able memory for Tx ring.\n", + sc->sc_dev.dv_xname); + bus_dmamem_free(sc->sc_dmat, + (bus_dma_segment_t *)&sc->vte_cdata.vte_tx_ring, 1); + return (error); + } + + sc->vte_cdata.vte_tx_ring_paddr = + sc->vte_cdata.vte_tx_ring_map->dm_segs[0].ds_addr; + + /* Create DMA stuffs for RX ring */ + error = bus_dmamap_create(sc->sc_dmat, VTE_RX_RING_SZ, 1, + VTE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_ring_map); + if (error) + return (ENOBUFS); + + /* Allocate DMA'able memory for RX ring */ + error = bus_dmamem_alloc(sc->sc_dmat, VTE_RX_RING_SZ, ETHER_ALIGN, + 0, &sc->vte_cdata.vte_rx_ring_seg, 1, &nsegs, + BUS_DMA_WAITOK | BUS_DMA_ZERO); + if (error) { + printf("%s: could not allocate DMA'able memory for Rx ring.\n", + sc->sc_dev.dv_xname); + return (error); + } + + error = bus_dmamem_map(sc->sc_dmat, &sc->vte_cdata.vte_rx_ring_seg, + nsegs, VTE_RX_RING_SZ, (caddr_t *)&sc->vte_cdata.vte_rx_ring, + BUS_DMA_NOWAIT); + if (error) + return (ENOBUFS); + + /* Load the DMA map for Rx ring. */ + error = bus_dmamap_load(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, + sc->vte_cdata.vte_rx_ring, VTE_RX_RING_SZ, NULL, BUS_DMA_WAITOK); + if (error) { + printf("%s: could not load DMA'able memory for Rx ring.\n", + sc->sc_dev.dv_xname); + bus_dmamem_free(sc->sc_dmat, + (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1); + return (error); + } + + sc->vte_cdata.vte_rx_ring_paddr = + sc->vte_cdata.vte_rx_ring_map->dm_segs[0].ds_addr; + + /* Create DMA maps for Tx buffers. */ + for (i = 0; i < VTE_TX_RING_CNT; i++) { + txd = &sc->vte_cdata.vte_txdesc[i]; + txd->tx_m = NULL; + txd->tx_dmamap = NULL; + error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, + MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->tx_dmamap); + if (error) { + printf("%s: could not create Tx dmamap.\n", + sc->sc_dev.dv_xname); + return (error); + } + } + + /* Create DMA maps for Rx buffers. */ + error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, + BUS_DMA_NOWAIT, &sc->vte_cdata.vte_rx_sparemap); + if (error) { + printf("%s: could not create spare Rx dmamap.\n", + sc->sc_dev.dv_xname); + return (error); + } + for (i = 0; i < VTE_RX_RING_CNT; i++) { + rxd = &sc->vte_cdata.vte_rxdesc[i]; + rxd->rx_m = NULL; + rxd->rx_dmamap = NULL; + error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, + MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); + if (error) { + printf("%s: could not create Rx dmamap.\n", + sc->sc_dev.dv_xname); + return (error); + } + } + + return (0); +} + +void +vte_dma_free(struct vte_softc *sc) +{ + struct vte_txdesc *txd; + struct vte_rxdesc *rxd; + int i; + + /* TX buffers. */ + for (i = 0; i < VTE_TX_RING_CNT; i++) { + txd = &sc->vte_cdata.vte_txdesc[i]; + if (txd->tx_dmamap != NULL) { + bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); + txd->tx_dmamap = NULL; + } + } + /* Rx buffers */ + for (i = 0; i < VTE_RX_RING_CNT; i++) { + rxd = &sc->vte_cdata.vte_rxdesc[i]; + if (rxd->rx_dmamap != NULL) { + bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); + rxd->rx_dmamap = NULL; + } + } + if (sc->vte_cdata.vte_rx_sparemap != NULL) { + bus_dmamap_destroy(sc->sc_dmat, sc->vte_cdata.vte_rx_sparemap); + sc->vte_cdata.vte_rx_sparemap = NULL; + } + /* TX descriptor ring. */ + if (sc->vte_cdata.vte_tx_ring_map != NULL) + bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map); + if (sc->vte_cdata.vte_tx_ring_map != NULL && + sc->vte_cdata.vte_tx_ring != NULL) + bus_dmamem_free(sc->sc_dmat, + (bus_dma_segment_t *)sc->vte_cdata.vte_tx_ring, 1); + sc->vte_cdata.vte_tx_ring = NULL; + sc->vte_cdata.vte_tx_ring_map = NULL; + /* RX ring. */ + if (sc->vte_cdata.vte_rx_ring_map != NULL) + bus_dmamap_unload(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map); + if (sc->vte_cdata.vte_rx_ring_map != NULL && + sc->vte_cdata.vte_rx_ring != NULL) + bus_dmamem_free(sc->sc_dmat, + (bus_dma_segment_t *)sc->vte_cdata.vte_rx_ring, 1); + sc->vte_cdata.vte_rx_ring = NULL; + sc->vte_cdata.vte_rx_ring_map = NULL; +} + +struct vte_txdesc * +vte_encap(struct vte_softc *sc, struct mbuf **m_head) +{ + struct vte_txdesc *txd; + struct mbuf *m, *n; + int copy, error, padlen; + + txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; + m = *m_head; + /* + * Controller doesn't auto-pad, so we have to make sure pad + * short frames out to the minimum frame length. + */ + if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) + padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; + else + padlen = 0; + + /* + * Controller does not support multi-fragmented TX buffers. + * Controller spends most of its TX processing time in + * de-fragmenting TX buffers. Either faster CPU or more + * advanced controller DMA engine is required to speed up + * TX path processing. + * To mitigate the de-fragmenting issue, perform deep copy + * from fragmented mbuf chains to a pre-allocated mbuf + * cluster with extra cost of kernel memory. For frames + * that is composed of single TX buffer, the deep copy is + * bypassed. + */ + copy = 0; + if (m->m_next != NULL) + copy++; + if (padlen > 0 && (padlen > M_TRAILINGSPACE(m))) + copy++; + if (copy != 0) { + /* Avoid expensive m_defrag(9) and do deep copy. */ + n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; + m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); + n->m_pkthdr.len = m->m_pkthdr.len; + n->m_len = m->m_pkthdr.len; + m = n; + txd->tx_flags |= VTE_TXMBUF; + } + + if (padlen > 0) { + /* Zero out the bytes in the pad area. */ + bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); + m->m_pkthdr.len += padlen; + m->m_len = m->m_pkthdr.len; + } + + error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap, m, + BUS_DMA_NOWAIT); + + if (error != 0) { + txd->tx_flags &= ~VTE_TXMBUF; + return (NULL); + } + + bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, + txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); + + txd->tx_desc->dtlen = + htole16(VTE_TX_LEN(txd->tx_dmamap->dm_segs[0].ds_len)); + txd->tx_desc->dtbp = htole32(txd->tx_dmamap->dm_segs[0].ds_addr); + sc->vte_cdata.vte_tx_cnt++; + /* Update producer index. */ + VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); + + /* Finally hand over ownership to controller. */ + txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); + txd->tx_m = m; + + return (txd); +} + +void +vte_start(struct ifnet *ifp) +{ + struct vte_softc *sc = ifp->if_softc; + struct vte_txdesc *txd; + struct mbuf *m_head; + int enq = 0; + + if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) + return; + + for (;;) { + /* Reserve one free TX descriptor. */ + if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { + ifp->if_flags |= IFF_OACTIVE; + break; + } + IFQ_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + + /* + * Pack the data into the transmit ring. If we + * don't have room, set the OACTIVE flag and wait + * for the NIC to drain the ring. + */ + if ((txd = vte_encap(sc, &m_head)) == NULL) { + break; + } + + enq++; + +#if NBPFILTER > 0 + /* + * If there's a BPF listener, bounce a copy of this frame + * to him. + */ + if (ifp->if_bpf != NULL) + bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); +#endif + /* Free consumed TX frame. */ + if ((txd->tx_flags & VTE_TXMBUF) != 0) + m_freem(m_head); + } + + if (enq > 0) { + bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0, + sc->vte_cdata.vte_tx_ring_map->dm_mapsize, + BUS_DMASYNC_PREWRITE); + CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); + ifp->if_timer = VTE_TX_TIMEOUT; + } +} + +void +vte_watchdog(struct ifnet *ifp) +{ + struct vte_softc *sc = ifp->if_softc; + + printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); + ifp->if_oerrors++; + vte_init(ifp); + + if (!IFQ_IS_EMPTY(&ifp->if_snd)) + vte_start(ifp); +} + +int +vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct vte_softc *sc = ifp->if_softc; + struct mii_data *mii = &sc->sc_miibus; + struct ifaddr *ifa = (struct ifaddr *)data; + struct ifreq *ifr = (struct ifreq *)data; + int s, error = 0; + + s = splnet(); + + switch (cmd) { + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP; + if (!(ifp->if_flags & IFF_RUNNING)) + vte_init(ifp); +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) + arp_ifinit(&sc->sc_arpcom, ifa); +#endif + break; + case SIOCSIFFLAGS: + if (ifp->if_flags & IFF_UP) { + if (ifp->if_flags & IFF_RUNNING) + error = ENETRESET; + else + vte_init(ifp); + } else { + if (ifp->if_flags & IFF_RUNNING) + vte_stop(sc); + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); + break; + default: + error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); + break; + } + + if (error == ENETRESET) { + if (ifp->if_flags & IFF_RUNNING) + vte_iff(sc); + error = 0; + } + + splx(s); + return (error); +} + +void +vte_mac_config(struct vte_softc *sc) +{ + struct mii_data *mii; + uint16_t mcr; + + mii = &sc->sc_miibus; + mcr = CSR_READ_2(sc, VTE_MCR0); + mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); + if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { + mcr |= MCR0_FULL_DUPLEX; +#ifdef notyet + if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) + mcr |= MCR0_FC_ENB; + /* + * The data sheet is not clear whether the controller + * honors received pause frames or not. The is no + * separate control bit for RX pause frame so just + * enable MCR0_FC_ENB bit. + */ + if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) + mcr |= MCR0_FC_ENB; +#endif + } + CSR_WRITE_2(sc, VTE_MCR0, mcr); +} + +void +vte_stats_clear(struct vte_softc *sc) +{ + + /* Reading counter registers clears its contents. */ + CSR_READ_2(sc, VTE_CNT_RX_DONE); + CSR_READ_2(sc, VTE_CNT_MECNT0); + CSR_READ_2(sc, VTE_CNT_MECNT1); + CSR_READ_2(sc, VTE_CNT_MECNT2); + CSR_READ_2(sc, VTE_CNT_MECNT3); + CSR_READ_2(sc, VTE_CNT_TX_DONE); + CSR_READ_2(sc, VTE_CNT_MECNT4); + CSR_READ_2(sc, VTE_CNT_PAUSE); +} + +void +vte_stats_update(struct vte_softc *sc) +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct vte_hw_stats *stat; + uint16_t value; + + stat = &sc->vte_stats; + + CSR_READ_2(sc, VTE_MECISR); + /* RX stats. */ + stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); + value = CSR_READ_2(sc, VTE_CNT_MECNT0); + stat->rx_bcast_frames += (value >> 8); + stat->rx_mcast_frames += (value & 0xFF); + value = CSR_READ_2(sc, VTE_CNT_MECNT1); + stat->rx_runts += (value >> 8); + stat->rx_crcerrs += (value & 0xFF); + value = CSR_READ_2(sc, VTE_CNT_MECNT2); + stat->rx_long_frames += (value & 0xFF); + value = CSR_READ_2(sc, VTE_CNT_MECNT3); + stat->rx_fifo_full += (value >> 8); + stat->rx_desc_unavail += (value & 0xFF); + + /* TX stats. */ + stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); + value = CSR_READ_2(sc, VTE_CNT_MECNT4); + stat->tx_underruns += (value >> 8); + stat->tx_late_colls += (value & 0xFF); + + value = CSR_READ_2(sc, VTE_CNT_PAUSE); + stat->tx_pause_frames += (value >> 8); + stat->rx_pause_frames += (value & 0xFF); + + /* Update ifp counters. */ + ifp->if_opackets = stat->tx_frames; + ifp->if_collisions = stat->tx_late_colls; + ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; + ifp->if_ipackets = stat->rx_frames; + ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + + stat->rx_long_frames + stat->rx_fifo_full; +} + +int +vte_intr(void *arg) +{ + struct vte_softc *sc = arg; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + uint16_t status; + int n; + int claimed = 0; + + /* Reading VTE_MISR acknowledges interrupts. */ + status = CSR_READ_2(sc, VTE_MISR); + if ((status & VTE_INTRS) == 0) + return (0); + + /* Disable interrupts. */ + CSR_WRITE_2(sc, VTE_MIER, 0); + for (n = 8; (status & VTE_INTRS) != 0;) { + if ((ifp->if_flags & IFF_RUNNING) == 0) + break; + claimed = 1; + if (status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | + MISR_RX_FIFO_FULL)) + vte_rxeof(sc); + if (status & MISR_TX_DONE) + vte_txeof(sc); + if (status & MISR_EVENT_CNT_OFLOW) + vte_stats_update(sc); + if (!IFQ_IS_EMPTY(&ifp->if_snd)) + vte_start(ifp); + if (--n > 0) + status = CSR_READ_2(sc, VTE_MISR); + else + break; + } + + /* Re-enable interrupts. */ + CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); + + return (claimed); +} + +void +vte_txeof(struct vte_softc *sc) +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct vte_txdesc *txd; + uint16_t status; + int cons, prog; + + if (sc->vte_cdata.vte_tx_cnt == 0) + return; + bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0, + sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); + cons = sc->vte_cdata.vte_tx_cons; + /* + * Go through our TX list and free mbufs for those + * frames which have been transmitted. + */ + for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { + txd = &sc->vte_cdata.vte_txdesc[cons]; + status = letoh16(txd->tx_desc->dtst); + if (status & VTE_DTST_TX_OWN) + break; + sc->vte_cdata.vte_tx_cnt--; + /* Reclaim transmitted mbufs. */ + bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); + if ((txd->tx_flags & VTE_TXMBUF) == 0) + m_freem(txd->tx_m); + txd->tx_flags &= ~VTE_TXMBUF; + txd->tx_m = NULL; + prog++; + VTE_DESC_INC(cons, VTE_TX_RING_CNT); + } + + if (prog > 0) { + ifp->if_flags &= ~IFF_OACTIVE; + sc->vte_cdata.vte_tx_cons = cons; + /* + * Unarm watchdog timer only when there is no pending + * frames in TX queue. + */ + if (sc->vte_cdata.vte_tx_cnt == 0) + ifp->if_timer = 0; + } +} + +int +vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd, int init) +{ + struct mbuf *m; + bus_dmamap_t map; + int error; + + MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); + if (m == NULL) + return (ENOBUFS); + MCLGET(m, init ? M_WAITOK : M_DONTWAIT); + if (!(m->m_flags & M_EXT)) { + m_freem(m); + return (ENOBUFS); + } + m->m_len = m->m_pkthdr.len = MCLBYTES; + m_adj(m, sizeof(uint32_t)); + + error = bus_dmamap_load_mbuf(sc->sc_dmat, + sc->vte_cdata.vte_rx_sparemap, m, BUS_DMA_NOWAIT); + + if (error != 0) { + if (!error) { + bus_dmamap_unload(sc->sc_dmat, + sc->vte_cdata.vte_rx_sparemap); + error = EFBIG; + printf("%s: too many segments?!\n", + sc->sc_dev.dv_xname); + } + m_freem(m); + + if (init) + printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); + return (error); + } + + if (rxd->rx_m != NULL) { + bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, + rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); + } + map = rxd->rx_dmamap; + rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; + sc->vte_cdata.vte_rx_sparemap = map; + + rxd->rx_m = m; + rxd->rx_desc->drbp = htole32(rxd->rx_dmamap->dm_segs[0].ds_addr); + rxd->rx_desc->drlen = + htole16(VTE_RX_LEN(rxd->rx_dmamap->dm_segs[0].ds_len)); + rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); + + return (0); +} + +void +vte_rxeof(struct vte_softc *sc) +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct vte_rxdesc *rxd; + struct mbuf *m; + uint16_t status, total_len; + int cons, prog; + + bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0, + sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); + cons = sc->vte_cdata.vte_rx_cons; + for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0; prog++, + VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { + rxd = &sc->vte_cdata.vte_rxdesc[cons]; + status = letoh16(rxd->rx_desc->drst); + if (status & VTE_DRST_RX_OWN) + break; + total_len = VTE_RX_LEN(letoh16(rxd->rx_desc->drlen)); + m = rxd->rx_m; + if ((status & VTE_DRST_RX_OK) == 0) { + /* Discard errored frame. */ + rxd->rx_desc->drlen = + htole16(MCLBYTES - sizeof(uint32_t)); + rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); + continue; + } + if (vte_newbuf(sc, rxd, 0) != 0) { + ifp->if_iqdrops++; + rxd->rx_desc->drlen = + htole16(MCLBYTES - sizeof(uint32_t)); + rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); + continue; + } + + /* + * It seems there is no way to strip FCS bytes. + */ + m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; + m->m_pkthdr.rcvif = ifp; + +#if NBPFILTER > 0 + if (ifp->if_bpf) + bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); +#endif + + ether_input_mbuf(ifp, m); + } + + if (prog > 0) { + /* Update the consumer index. */ + sc->vte_cdata.vte_rx_cons = cons; + /* + * Sync updated RX descriptors such that controller see + * modified RX buffer addresses. + */ + bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0, + sc->vte_cdata.vte_rx_ring_map->dm_mapsize, + BUS_DMASYNC_PREWRITE); +#ifdef notyet + /* + * Update residue counter. Controller does not + * keep track of number of available RX descriptors + * such that driver should have to update VTE_MRDCR + * to make controller know how many free RX + * descriptors were added to controller. This is + * a similar mechanism used in VIA velocity + * controllers and it indicates controller just + * polls OWN bit of current RX descriptor pointer. + * A couple of severe issues were seen on sample + * board where the controller continuously emits TX + * pause frames once RX pause threshold crossed. + * Once triggered it never recovered form that + * state, I couldn't find a way to make it back to + * work at least. This issue effectively + * disconnected the system from network. Also, the + * controller used 00:00:00:00:00:00 as source + * station address of TX pause frame. Probably this + * is one of reason why vendor recommends not to + * enable flow control on R6040 controller. + */ + CSR_WRITE_2(sc, VTE_MRDCR, prog | + (((VTE_RX_RING_CNT * 2) / 10) << + VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); +#endif + } +} + +void +vte_tick(void *arg) +{ + struct vte_softc *sc = arg; + struct mii_data *mii = &sc->sc_miibus; + int s; + + s = splnet(); + mii_tick(mii); + vte_stats_update(sc); + timeout_add_sec(&sc->vte_tick_ch, 1); + splx(s); +} + +void +vte_reset(struct vte_softc *sc) +{ + uint16_t mcr; + int i; + + mcr = CSR_READ_2(sc, VTE_MCR1); + CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); + for (i = VTE_RESET_TIMEOUT; i > 0; i--) { + DELAY(10); + if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) + break; + } + if (i == 0) + printf("%s: reset timeout(0x%04x)!\n", sc->sc_dev.dv_xname, + mcr); + /* + * Follow the guide of vendor recommended way to reset MAC. + * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is + * not reliable so manually reset internal state machine. + */ + CSR_WRITE_2(sc, VTE_MACSM, 0x0002); + CSR_WRITE_2(sc, VTE_MACSM, 0); + DELAY(5000); +} + +int +vte_init(struct ifnet *ifp) +{ + struct vte_softc *sc = ifp->if_softc; + bus_addr_t paddr; + uint8_t *eaddr; + int error; + + /* + * Cancel any pending I/O. + */ + vte_stop(sc); + /* + * Reset the chip to a known state. + */ + vte_reset(sc); + + /* Initialize RX descriptors. */ + error = vte_init_rx_ring(sc); + if (error != 0) { + printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); + vte_stop(sc); + return (error); + } + error = vte_init_tx_ring(sc); + if (error != 0) { + printf("%s: no memory for Tx buffers.\n", sc->sc_dev.dv_xname); + vte_stop(sc); + return (error); + } + + /* + * Reprogram the station address. Controller supports up + * to 4 different station addresses so driver programs the + * first station address as its own ethernet address and + * configure the remaining three addresses as perfect + * multicast addresses. + */ + eaddr = LLADDR(ifp->if_sadl); + CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); + CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); + CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); + + /* Set TX descriptor base addresses. */ + paddr = sc->vte_cdata.vte_tx_ring_paddr; + CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); + CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); + /* Set RX descriptor base addresses. */ + paddr = sc->vte_cdata.vte_rx_ring_paddr; + CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); + CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); + /* + * Initialize RX descriptor residue counter and set RX + * pause threshold to 20% of available RX descriptors. + * See comments on vte_rxeof() for details on flow control + * issues. + */ + CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | + (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); + + /* + * Always use maximum frame size that controller can + * support. Otherwise received frames that has longer + * frame length than vte(4) MTU would be silently dropped + * in controller. This would break path-MTU discovery as + * sender wouldn't get any responses from receiver. The + * RX buffer size should be multiple of 4. + * Note, jumbo frames are silently ignored by controller + * and even MAC counters do not detect them. + */ + CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); + + /* Configure FIFO. */ + CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | + MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | + MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); + + /* + * Configure TX/RX MACs. Actual resolved duplex and flow + * control configuration is done after detecting a valid + * link. Note, we don't generate early interrupt here + * as well since FreeBSD does not have interrupt latency + * problems like Windows. + */ + CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); + /* + * We manually keep track of PHY status changes to + * configure resolved duplex and flow control since only + * duplex configuration can be automatically reflected to + * MCR0. + */ + CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | + MCR1_EXCESS_COL_RETRY_16); + + /* Initialize RX filter. */ + vte_iff(sc); + + /* Disable TX/RX interrupt moderation control. */ + CSR_WRITE_2(sc, VTE_MRICR, 0); + CSR_WRITE_2(sc, VTE_MTICR, 0); + + /* Enable MAC event counter interrupts. */ + CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); + /* Clear MAC statistics. */ + vte_stats_clear(sc); + + /* Acknowledge all pending interrupts and clear it. */ + CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); + CSR_WRITE_2(sc, VTE_MISR, 0); + + sc->vte_flags &= ~VTE_FLAG_LINK; + /* Switch to the current media. */ + vte_mediachange(ifp); + + timeout_add_sec(&sc->vte_tick_ch, 1); + + ifp->if_flags |= IFF_RUNNING; + ifp->if_flags &= ~IFF_OACTIVE; + + return (0); +} + +void +vte_stop(struct vte_softc *sc) +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct vte_txdesc *txd; + struct vte_rxdesc *rxd; + int i; + + /* + * Mark the interface down and cancel the watchdog timer. + */ + ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); + ifp->if_timer = 0; + sc->vte_flags &= ~VTE_FLAG_LINK; + timeout_del(&sc->vte_tick_ch); + vte_stats_update(sc); + /* Disable interrupts. */ + CSR_WRITE_2(sc, VTE_MIER, 0); + CSR_WRITE_2(sc, VTE_MECIER, 0); + /* Stop RX/TX MACs. */ + vte_stop_mac(sc); + /* Clear interrupts. */ + CSR_READ_2(sc, VTE_MISR); + /* + * Free TX/RX mbufs still in the queues. + */ + for (i = 0; i < VTE_RX_RING_CNT; i++) { + rxd = &sc->vte_cdata.vte_rxdesc[i]; + if (rxd->rx_m != NULL) { + bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); + m_freem(rxd->rx_m); + rxd->rx_m = NULL; + } + } + for (i = 0; i < VTE_TX_RING_CNT; i++) { + txd = &sc->vte_cdata.vte_txdesc[i]; + if (txd->tx_m != NULL) { + bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); + if ((txd->tx_flags & VTE_TXMBUF) == 0) + m_freem(txd->tx_m); + txd->tx_m = NULL; + txd->tx_flags &= ~VTE_TXMBUF; + } + } + /* Free TX mbuf pools used for deep copy. */ + for (i = 0; i < VTE_TX_RING_CNT; i++) { + if (sc->vte_cdata.vte_txmbufs[i] != NULL) { + m_freem(sc->vte_cdata.vte_txmbufs[i]); + sc->vte_cdata.vte_txmbufs[i] = NULL; + } + } +} + +void +vte_start_mac(struct vte_softc *sc) +{ + uint16_t mcr; + int i; + + /* Enable RX/TX MACs. */ + mcr = CSR_READ_2(sc, VTE_MCR0); + if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != + (MCR0_RX_ENB | MCR0_TX_ENB)) { + mcr |= MCR0_RX_ENB | MCR0_TX_ENB; + CSR_WRITE_2(sc, VTE_MCR0, mcr); + for (i = VTE_TIMEOUT; i > 0; i--) { + mcr = CSR_READ_2(sc, VTE_MCR0); + if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == + (MCR0_RX_ENB | MCR0_TX_ENB)) + break; + DELAY(10); + } + if (i == 0) + printf("%s: could not enable RX/TX MAC(0x%04x)!\n", + sc->sc_dev.dv_xname, mcr); + } +} + +void +vte_stop_mac(struct vte_softc *sc) +{ + uint16_t mcr; + int i; + + /* Disable RX/TX MACs. */ + mcr = CSR_READ_2(sc, VTE_MCR0); + if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { + mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); + CSR_WRITE_2(sc, VTE_MCR0, mcr); + for (i = VTE_TIMEOUT; i > 0; i--) { + mcr = CSR_READ_2(sc, VTE_MCR0); + if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) + break; + DELAY(10); + } + if (i == 0) + printf("%s: could not disable RX/TX MAC(0x%04x)!\n", + sc->sc_dev.dv_xname, mcr); + } +} + +int +vte_init_tx_ring(struct vte_softc *sc) +{ + struct vte_tx_desc *desc; + struct vte_txdesc *txd; + bus_addr_t addr; + int i; + + sc->vte_cdata.vte_tx_prod = 0; + sc->vte_cdata.vte_tx_cons = 0; + sc->vte_cdata.vte_tx_cnt = 0; + + /* Pre-allocate TX mbufs for deep copy. */ + for (i = 0; i < VTE_TX_RING_CNT; i++) { + MGETHDR(sc->vte_cdata.vte_txmbufs[i], + M_DONTWAIT, MT_DATA); + if (sc->vte_cdata.vte_txmbufs[i] == NULL) + return (ENOBUFS); + MCLGET(sc->vte_cdata.vte_txmbufs[i], M_DONTWAIT); + if (!(sc->vte_cdata.vte_txmbufs[i]->m_flags & M_EXT)) { + m_freem(sc->vte_cdata.vte_txmbufs[i]); + return (ENOBUFS); + } + sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; + sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; + } + desc = sc->vte_cdata.vte_tx_ring; + bzero(desc, VTE_TX_RING_SZ); + for (i = 0; i < VTE_TX_RING_CNT; i++) { + txd = &sc->vte_cdata.vte_txdesc[i]; + txd->tx_m = NULL; + if (i != VTE_TX_RING_CNT - 1) + addr = sc->vte_cdata.vte_tx_ring_paddr + + sizeof(struct vte_tx_desc) * (i + 1); + else + addr = sc->vte_cdata.vte_tx_ring_paddr + + sizeof(struct vte_tx_desc) * 0; + desc = &sc->vte_cdata.vte_tx_ring[i]; + desc->dtnp = htole32(addr); + txd->tx_desc = desc; + } + + bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_tx_ring_map, 0, + sc->vte_cdata.vte_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); + return (0); +} + +int +vte_init_rx_ring(struct vte_softc *sc) +{ + struct vte_rx_desc *desc; + struct vte_rxdesc *rxd; + bus_addr_t addr; + int i; + + sc->vte_cdata.vte_rx_cons = 0; + desc = sc->vte_cdata.vte_rx_ring; + bzero(desc, VTE_RX_RING_SZ); + for (i = 0; i < VTE_RX_RING_CNT; i++) { + rxd = &sc->vte_cdata.vte_rxdesc[i]; + rxd->rx_m = NULL; + if (i != VTE_RX_RING_CNT - 1) + addr = sc->vte_cdata.vte_rx_ring_paddr + + sizeof(struct vte_rx_desc) * (i + 1); + else + addr = sc->vte_cdata.vte_rx_ring_paddr + + sizeof(struct vte_rx_desc) * 0; + desc = &sc->vte_cdata.vte_rx_ring[i]; + desc->drnp = htole32(addr); + rxd->rx_desc = desc; + if (vte_newbuf(sc, rxd, 1) != 0) + return (ENOBUFS); + } + + bus_dmamap_sync(sc->sc_dmat, sc->vte_cdata.vte_rx_ring_map, 0, + sc->vte_cdata.vte_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); + + return (0); +} + +void +vte_iff(struct vte_softc *sc) +{ + struct arpcom *ac = &sc->sc_arpcom; + struct ifnet *ifp = &ac->ac_if; + struct ether_multi *enm; + struct ether_multistep step; + uint8_t *eaddr; + uint32_t crc; + uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; + uint16_t mchash[4], mcr; + int i, nperf; + + bzero(mchash, sizeof(mchash)); + for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { + rxfilt_perf[i][0] = 0xFFFF; + rxfilt_perf[i][1] = 0xFFFF; + rxfilt_perf[i][2] = 0xFFFF; + } + + mcr = CSR_READ_2(sc, VTE_MCR0); + mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST | MCR0_MULTICAST); + ifp->if_flags &= ~IFF_ALLMULTI; + + /* Always accept broadcast frames. */ + mcr |= MCR0_BROADCAST; + + if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { + ifp->if_flags |= IFF_ALLMULTI; + if (ifp->if_flags & IFF_PROMISC) + mcr |= MCR0_PROMISC; + else + mcr |= MCR0_MULTICAST; + mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xFFFF; + } else { + nperf = 0; + ETHER_FIRST_MULTI(step, ac, enm); + while (enm != NULL) { + /* + * Program the first 3 multicast groups into + * the perfect filter. For all others, use the + * hash table. + */ + if (nperf < VTE_RXFILT_PERFECT_CNT) { + eaddr = enm->enm_addrlo; + rxfilt_perf[nperf][0] = + eaddr[1] << 8 | eaddr[0]; + rxfilt_perf[nperf][1] = + eaddr[3] << 8 | eaddr[2]; + rxfilt_perf[nperf][2] = + eaddr[5] << 8 | eaddr[4]; + nperf++; + continue; + } + crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); + mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); + ETHER_NEXT_MULTI(step, enm); + } + } + if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || + mchash[3] != 0) + mcr |= MCR0_MULTICAST; + + /* Program multicast hash table. */ + CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); + CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); + CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); + CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); + /* Program perfect filter table. */ + for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { + CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, + rxfilt_perf[i][0]); + CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, + rxfilt_perf[i][1]); + CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, + rxfilt_perf[i][2]); + } + CSR_WRITE_2(sc, VTE_MCR0, mcr); + CSR_READ_2(sc, VTE_MCR0); +} diff --git a/sys/dev/pci/if_vtereg.h b/sys/dev/pci/if_vtereg.h new file mode 100644 index 00000000000..13e8da83586 --- /dev/null +++ b/sys/dev/pci/if_vtereg.h @@ -0,0 +1,460 @@ +/* $OpenBSD: if_vtereg.h,v 1.1 2011/01/15 04:35:34 kevlo Exp $ */ +/*- + * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: head/sys/dev/vte/if_vtereg.h 216829 2010-12-31 00:21:41Z yongari $ + */ + +#ifndef _IF_VTEREG_H +#define _IF_VTEREG_H + +/* PCI low memory base and low I/O base register */ +#define VTE_PCI_LOIO 0x10 +#define VTE_PCI_LOMEM 0x14 + +/* MAC control register 0 */ +#define VTE_MCR0 0x00 +#define MCR0_ACCPT_ERR 0x0001 +#define MCR0_RX_ENB 0x0002 +#define MCR0_ACCPT_RUNT 0x0004 +#define MCR0_ACCPT_LONG_PKT 0x0008 +#define MCR0_ACCPT_DRIBBLE 0x0010 +#define MCR0_PROMISC 0x0020 +#define MCR0_BROADCAST 0x0040 +#define MCR0_RX_EARLY_INTR 0x0080 +#define MCR0_MULTICAST 0x0100 +#define MCR0_FC_ENB 0x0200 +#define MCR0_TX_ENB 0x1000 +#define MCR0_TX_EARLY_INTR 0x4000 +#define MCR0_FULL_DUPLEX 0x8000 + +/* MAC control register 1 */ +#define VTE_MCR1 0x04 +#define MCR1_MAC_RESET 0x0001 +#define MCR1_MAC_LOOPBACK 0x0002 +#define MCR1_EXCESS_COL_RETRANS_DIS 0x0004 +#define MCR1_AUTO_CHG_DUPLEX 0x0008 +#define MCR1_PKT_LENGTH_1518 0x0010 +#define MCR1_PKT_LENGTH_1522 0x0020 +#define MCR1_PKT_LENGTH_1534 0x0030 +#define MCR1_PKT_LENGTH_1537 0x0000 +#define MCR1_EARLY_INTR_THRESH_1129 0x0000 +#define MCR1_EARLY_INTR_THRESH_1257 0x0040 +#define MCR1_EARLY_INTR_THRESH_1385 0x0080 +#define MCR1_EARLY_INTR_THRESH_1513 0x00C0 +#define MCR1_EXCESS_COL_RETRY_16 0x0000 +#define MCR1_EXCESS_COL_RETRY_32 0x0100 +#define MCR1_FC_ACTIVE 0x0200 +#define MCR1_RX_DESC_HASH_IDX 0x4000 +#define MCR1_RX_UNICAST_HASH 0x8000 + +#define MCR1_PKT_LENGTH_MASK 0x0030 +#define MCR1_EARLY_INTR_THRESH_MASK 0x00C0 + +/* MAC bus control register */ +#define VTE_MBCR 0x08 +#define MBCR_FIFO_XFER_LENGTH_4 0x0000 +#define MBCR_FIFO_XFER_LENGTH_8 0x0001 +#define MBCR_FIFO_XFER_LENGTH_16 0x0002 +#define MBCR_FIFO_XFER_LENGTH_32 0x0003 +#define MBCR_TX_FIFO_THRESH_16 0x0000 +#define MBCR_TX_FIFO_THRESH_32 0x0004 +#define MBCR_TX_FIFO_THRESH_64 0x0008 +#define MBCR_TX_FIFO_THRESH_96 0x000C +#define MBCR_RX_FIFO_THRESH_8 0x0000 +#define MBCR_RX_FIFO_THRESH_16 0x0010 +#define MBCR_RX_FIFO_THRESH_32 0x0020 +#define MBCR_RX_FIFO_THRESH_64 0x0030 +#define MBCR_SDRAM_BUS_REQ_TIMER_MASK 0x1F00 +#define MBCR_SDRAM_BUS_REQ_TIMER_SHIFT 8 +#define MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT 0x1F00 + +/* MAC TX interrupt control register */ +#define VTE_MTICR 0x0C +#define MTICR_TX_TIMER_MASK 0x001F +#define MTICR_TX_BUNDLE_MASK 0x0F00 +#define VTE_IM_TX_TIMER_DEFAULT 0x7F +#define VTE_IM_TX_BUNDLE_DEFAULT 15 + +#define VTE_IM_TIMER_MIN 0 +#define VTE_IM_TIMER_MAX 82 +#define VTE_IM_TIMER_MASK 0x001F +#define VTE_IM_TIMER_SHIFT 0 +#define VTE_IM_BUNDLE_MIN 1 +#define VTE_IM_BUNDLE_MAX 15 +#define VTE_IM_BUNDLE_SHIFT 8 + +/* MAC RX interrupt control register */ +#define VTE_MRICR 0x10 +#define MRICR_RX_TIMER_MASK 0x001F +#define MRICR_RX_BUNDLE_MASK 0x0F00 +#define VTE_IM_RX_TIMER_DEFAULT 0x7F +#define VTE_IM_RX_BUNDLE_DEFAULT 15 + +/* MAC TX poll command register */ +#define VTE_TX_POLL 0x14 +#define TX_POLL_START 0x0001 + +/* MAC RX buffer size register */ +#define VTE_MRBSR 0x18 +#define VTE_MRBSR_SIZE_MASK 0x03FF + +/* MAC RX descriptor control register */ +#define VTE_MRDCR 0x1A +#define VTE_MRDCR_RESIDUE_MASK 0x00FF +#define VTE_MRDCR_RX_PAUSE_THRESH_MASK 0xFF00 +#define VTE_MRDCR_RX_PAUSE_THRESH_SHIFT 8 + +/* MAC Last status register */ +#define VTE_MLSR 0x1C +#define MLSR_MULTICAST 0x0001 +#define MLSR_BROADCAST 0x0002 +#define MLSR_CRC_ERR 0x0004 +#define MLSR_RUNT 0x0008 +#define MLSR_LONG_PKT 0x0010 +#define MLSR_TRUNC 0x0020 +#define MLSR_DRIBBLE 0x0040 +#define MLSR_PHY_ERR 0x0080 +#define MLSR_TX_FIFO_UNDERRUN 0x0200 +#define MLSR_RX_DESC_UNAVAIL 0x0400 +#define MLSR_TX_EXCESS_COL 0x2000 +#define MLSR_TX_LATE_COL 0x4000 +#define MLSR_RX_FIFO_OVERRUN 0x8000 + +/* MAC MDIO control register */ +#define VTE_MMDIO 0x20 +#define MMDIO_REG_ADDR_MASK 0x001F +#define MMDIO_PHY_ADDR_MASK 0x1F00 +#define MMDIO_READ 0x2000 +#define MMDIO_WRITE 0x4000 +#define MMDIO_REG_ADDR_SHIFT 0 +#define MMDIO_PHY_ADDR_SHIFT 8 + +/* MAC MDIO read data register */ +#define VTE_MMRD 0x24 +#define MMRD_DATA_MASK 0xFFFF + +/* MAC MDIO write data register */ +#define VTE_MMWD 0x28 +#define MMWD_DATA_MASK 0xFFFF + +/* MAC TX descriptor start address 0 */ +#define VTE_MTDSA0 0x2C + +/* MAC TX descriptor start address 1 */ +#define VTE_MTDSA1 0x30 + +/* MAC RX descriptor start address 0 */ +#define VTE_MRDSA0 0x34 + +/* MAC RX descriptor start address 1 */ +#define VTE_MRDSA1 0x38 + +/* MAC Interrupt status register */ +#define VTE_MISR 0x3C +#define MISR_RX_DONE 0x0001 +#define MISR_RX_DESC_UNAVAIL 0x0002 +#define MISR_RX_FIFO_FULL 0x0004 +#define MISR_RX_EARLY_INTR 0x0008 +#define MISR_TX_DONE 0x0010 +#define MISR_TX_EARLY_INTR 0x0080 +#define MISR_EVENT_CNT_OFLOW 0x0100 +#define MISR_PHY_MEDIA_CHG 0x0200 + +/* MAC Interrupt enable register */ +#define VTE_MIER 0x40 + +#define VTE_INTRS \ + (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | MISR_RX_FIFO_FULL | \ + MISR_TX_DONE | MISR_EVENT_CNT_OFLOW) + +/* MAC Event counter interrupt status register */ +#define VTE_MECISR 0x44 +#define MECISR_EC_RX_DONE 0x0001 +#define MECISR_EC_MULTICAST 0x0002 +#define MECISR_EC_BROADCAST 0x0004 +#define MECISR_EC_CRC_ERR 0x0008 +#define MECISR_EC_RUNT 0x0010 +#define MESCIR_EC_LONG_PKT 0x0020 +#define MESCIR_EC_RX_DESC_UNAVAIL 0x0080 +#define MESCIR_EC_RX_FIFO_FULL 0x0100 +#define MESCIR_EC_TX_DONE 0x0200 +#define MESCIR_EC_LATE_COL 0x0400 +#define MESCIR_EC_TX_UNDERRUN 0x0800 + +/* MAC Event counter interrupt enable register */ +#define VTE_MECIER 0x48 +#define VTE_MECIER_INTRS \ + (MECISR_EC_RX_DONE | MECISR_EC_MULTICAST | MECISR_EC_BROADCAST | \ + MECISR_EC_CRC_ERR | MECISR_EC_RUNT | MESCIR_EC_LONG_PKT | \ + MESCIR_EC_RX_DESC_UNAVAIL | MESCIR_EC_RX_FIFO_FULL | \ + MESCIR_EC_TX_DONE | MESCIR_EC_LATE_COL | MESCIR_EC_TX_UNDERRUN) + +#define VTE_CNT_RX_DONE 0x50 + +#define VTE_CNT_MECNT0 0x52 + +#define VTE_CNT_MECNT1 0x54 + +#define VTE_CNT_MECNT2 0x56 + +#define VTE_CNT_MECNT3 0x58 + +#define VTE_CNT_TX_DONE 0x5A + +#define VTE_CNT_MECNT4 0x5C + +#define VTE_CNT_PAUSE 0x5E + +/* MAC Hash table register */ +#define VTE_MAR0 0x60 +#define VTE_MAR1 0x62 +#define VTE_MAR2 0x64 +#define VTE_MAR3 0x66 + +/* MAC station address and multicast address register */ +#define VTE_MID0L 0x68 +#define VTE_MID0M 0x6A +#define VTE_MID0H 0x6C +#define VTE_MID1L 0x70 +#define VTE_MID1M 0x72 +#define VTE_MID1H 0x74 +#define VTE_MID2L 0x78 +#define VTE_MID2M 0x7A +#define VTE_MID2H 0x7C +#define VTE_MID3L 0x80 +#define VTE_MID3M 0x82 +#define VTE_MID3H 0x84 + +#define VTE_RXFILTER_PEEFECT_BASE VTE_MID1L +#define VTE_RXFILT_PERFECT_CNT 3 + +/* MAC PHY status change configuration register */ +#define VTE_MPSCCR 0x88 +#define MPSCCR_TIMER_DIVIDER_MASK 0x0007 +#define MPSCCR_PHY_ADDR_MASK 0x1F00 +#define MPSCCR_PHY_STS_CHG_ENB 0x8000 +#define MPSCCR_PHY_ADDR_SHIFT 8 + +/* MAC PHY status register2 */ +#define VTE_MPSR 0x8A +#define MPSR_LINK_UP 0x0001 +#define MPSR_SPEED_100 0x0002 +#define MPSR_FULL_DUPLEX 0x0004 + +/* MAC Status machine(undocumented). */ +#define VTE_MACSM 0xAC + +/* MDC Speed control register */ +#define VTE_MDCSC 0xB6 +#define MDCSC_DEFAULT 0x0030 + +/* MAC Identifier and revision register */ +#define VTE_MACID_REV 0xBC +#define VTE_MACID_REV_MASK 0x00FF +#define VTE_MACID_MASK 0xFF00 +#define VTE_MACID_REV_SHIFT 0 +#define VTE_MACID_SHIFT 8 + +/* MAC Identifier register */ +#define VTE_MACID 0xBE + +/* + * RX descriptor + * - Added one more uint16_t member to align it 4 on bytes boundary. + * This does not affect operation of controller since it includes + * next pointer address. + */ +struct vte_rx_desc { + uint16_t drst; + uint16_t drlen; + uint32_t drbp; + uint32_t drnp; + uint16_t hidx; + uint16_t rsvd2; + uint16_t rsvd3; + uint16_t __pad; /* Not actual descriptor member. */ +}; + +#define VTE_DRST_MID_MASK 0x0003 +#define VTE_DRST_MID_HIT 0x0004 +#define VTE_DRST_MULTICAST_HIT 0x0008 +#define VTE_DRST_MULTICAST 0x0010 +#define VTE_DRST_BROADCAST 0x0020 +#define VTE_DRST_CRC_ERR 0x0040 +#define VTE_DRST_RUNT 0x0080 +#define VTE_DRST_LONG 0x0100 +#define VTE_DRST_TRUNC 0x0200 +#define VTE_DRST_DRIBBLE 0x0400 +#define VTE_DRST_PHY_ERR 0x0800 +#define VTE_DRST_RX_OK 0x4000 +#define VTE_DRST_RX_OWN 0x8000 + +#define VTE_RX_LEN(x) ((x) & 0x7FF) + +#define VTE_RX_HIDX(x) ((x) & 0x3F) + +/* + * TX descriptor + * - Added one more uint32_t member to align it on 16 bytes boundary. + */ +struct vte_tx_desc { + uint16_t dtst; + uint16_t dtlen; + uint32_t dtbp; + uint32_t dtnp; + uint32_t __pad; /* Not actual descriptor member. */ +}; + +#define VTE_DTST_EXCESS_COL 0x0010 +#define VTE_DTST_LATE_COL 0x0020 +#define VTE_DTST_UNDERRUN 0x0040 +#define VTE_DTST_NO_CRC 0x2000 +#define VTE_DTST_TX_OK 0x4000 +#define VTE_DTST_TX_OWN 0x8000 + +#define VTE_TX_LEN(x) ((x) & 0x7FF) + +#define VTE_TX_RING_CNT 64 +#define VTE_TX_RING_ALIGN 16 +/* + * The TX/RX descriptor format has no limitation for number of + * descriptors in TX/RX ring. However, the maximum number of + * descriptors that could be set as RX descriptor ring residue + * counter is 255. This effectively limits number of RX + * descriptors available to be less than or equal to 255. + */ +#define VTE_RX_RING_CNT 128 +#define VTE_RX_RING_ALIGN 16 +#define VTE_RX_BUF_ALIGN 4 + +#define VTE_DESC_INC(x, y) ((x) = ((x) + 1) % (y)) + +#define VTE_TX_RING_SZ \ + (sizeof(struct vte_tx_desc) * VTE_TX_RING_CNT) +#define VTE_RX_RING_SZ \ + (sizeof(struct vte_rx_desc) * VTE_RX_RING_CNT) + +#define VTE_RX_BUF_SIZE_MAX (MCLBYTES - sizeof(uint32_t)) + +#define VTE_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN) + +struct vte_rxdesc { + struct mbuf *rx_m; + bus_dmamap_t rx_dmamap; + struct vte_rx_desc *rx_desc; +}; + +struct vte_txdesc { + struct mbuf *tx_m; + bus_dmamap_t tx_dmamap; + struct vte_tx_desc *tx_desc; + int tx_flags; +#define VTE_TXMBUF 0x0001 +}; + +struct vte_chain_data { + struct vte_txdesc vte_txdesc[VTE_TX_RING_CNT]; + struct mbuf *vte_txmbufs[VTE_TX_RING_CNT]; + struct vte_rxdesc vte_rxdesc[VTE_RX_RING_CNT]; + bus_dmamap_t vte_tx_ring_map; + bus_dma_segment_t vte_tx_ring_seg; + bus_dmamap_t vte_rx_ring_map; + bus_dma_segment_t vte_rx_ring_seg; + bus_dmamap_t vte_rr_ring_map; + bus_dma_segment_t vte_rr_ring_seg; + bus_dmamap_t vte_rx_sparemap; + bus_dmamap_t vte_cmb_map; + bus_dma_segment_t vte_cmb_seg; + bus_dmamap_t vte_smb_map; + bus_dma_segment_t vte_smb_seg; + struct vte_tx_desc *vte_tx_ring; + bus_addr_t vte_tx_ring_paddr; + struct vte_rx_desc *vte_rx_ring; + bus_addr_t vte_rx_ring_paddr; + + int vte_tx_prod; + int vte_tx_cons; + int vte_tx_cnt; + int vte_rx_cons; +}; + +struct vte_hw_stats { + /* RX stats. */ + uint32_t rx_frames; + uint32_t rx_bcast_frames; + uint32_t rx_mcast_frames; + uint32_t rx_runts; + uint32_t rx_crcerrs; + uint32_t rx_long_frames; + uint32_t rx_fifo_full; + uint32_t rx_desc_unavail; + uint32_t rx_pause_frames; + + /* TX stats. */ + uint32_t tx_frames; + uint32_t tx_underruns; + uint32_t tx_late_colls; + uint32_t tx_pause_frames; +}; + +/* + * Software state per device. + */ +struct vte_softc { + struct device sc_dev; + struct arpcom sc_arpcom; + bus_space_tag_t sc_mem_bt; + bus_space_handle_t sc_mem_bh; + bus_size_t sc_mem_size; + bus_dma_tag_t sc_dmat; + pci_chipset_tag_t sc_pct; + pcitag_t sc_pcitag; + void *sc_irq_handle; + struct mii_data sc_miibus; + uint8_t vte_eaddr[ETHER_ADDR_LEN]; + int vte_flags; +#define VTE_FLAG_LINK 0x8000 + + struct timeout vte_tick_ch; + struct vte_hw_stats vte_stats; + struct vte_chain_data vte_cdata; + int vte_int_rx_mod; + int vte_int_tx_mod; +}; + +/* Register access macros. */ +#define CSR_WRITE_2(_sc, reg, val) \ + bus_space_write_2((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg), (val)) +#define CSR_READ_2(_sc, reg) \ + bus_space_read_2((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg)) + +#define VTE_TX_TIMEOUT 5 +#define VTE_RESET_TIMEOUT 100 +#define VTE_TIMEOUT 1000 +#define VTE_PHY_TIMEOUT 1000 + +#endif /* _IF_VTEREG_H */ |