summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/pci/if_vic.c1104
-rw-r--r--sys/dev/pci/if_vicreg.h195
-rw-r--r--sys/dev/pci/if_vicvar.h107
3 files changed, 1406 insertions, 0 deletions
diff --git a/sys/dev/pci/if_vic.c b/sys/dev/pci/if_vic.c
new file mode 100644
index 00000000000..3425bdaa477
--- /dev/null
+++ b/sys/dev/pci/if_vic.c
@@ -0,0 +1,1104 @@
+/* $OpenBSD: if_vic.c,v 1.1 2006/02/25 23:49:04 reyk Exp $ */
+
+/*
+ * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Driver for the VMware Virtual NIC ("vmxnet")
+ */
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/timeout.h>
+#include <sys/device.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#endif
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <dev/pci/if_vicreg.h>
+#include <dev/pci/if_vicvar.h>
+
+int vic_pci_match(struct device *, void *, void *);
+void vic_pci_attach(struct device *, struct device *, void *);
+void vic_pci_shutdown(void *);
+
+int vic_attach(struct vic_softc *);
+void vic_link_state(struct vic_softc *);
+void vic_shutdown(void *);
+int vic_intr(void *);
+void vic_rx_proc(struct vic_softc *);
+void vic_tx_proc(struct vic_softc *);
+void vic_iff(struct vic_softc *, u_int);
+void vic_getlladdr(struct vic_softc *);
+void vic_setlladdr(struct vic_softc *);
+int vic_media_change(struct ifnet *);
+void vic_media_status(struct ifnet *, struct ifmediareq *);
+void vic_start(struct ifnet *);
+int vic_tx_start(struct vic_softc *, struct mbuf *);
+void vic_watchdog(struct ifnet *);
+int vic_ioctl(struct ifnet *, u_long, caddr_t);
+void vic_init(struct ifnet *);
+void vic_stop(struct ifnet *);
+void vic_printb(unsigned short, char *);
+void vic_timer(void *);
+void vic_poll(void *); /* XXX poll */
+
+struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t);
+int vic_alloc_data(struct vic_softc *);
+void vic_reset_data(struct vic_softc *);
+void vic_free_data(struct vic_softc *);
+
+struct vic_pci_softc {
+ struct vic_softc sc_sc;
+ pci_chipset_tag_t sc_pc;
+ pcitag_t sc_pcitag;
+ pci_intr_handle_t sc_iht;
+ void *sc_ih;
+};
+
+struct cfdriver vic_cd = {
+ 0, "vic", DV_IFNET
+};
+
+struct cfattach vic_ca = {
+ sizeof(struct vic_pci_softc),
+ vic_pci_match,
+ vic_pci_attach
+};
+
+const struct pci_matchid vic_pci_devices[] = {
+ { PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
+};
+
+extern int ifqmaxlen;
+
+int
+vic_pci_match(struct device *parent, void *match, void *aux)
+{
+ return (pci_matchbyid((struct pci_attach_args *)aux,
+ vic_pci_devices, sizeof(vic_pci_devices) /
+ sizeof(vic_pci_devices[0])));
+}
+
+void
+vic_pci_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct vic_pci_softc *psc = (struct vic_pci_softc *)self;
+ struct pci_attach_args *pa = aux;
+ struct vic_softc *sc = &psc->sc_sc;
+ pci_chipset_tag_t pc = pa->pa_pc;
+ pcitag_t pt = pa->pa_tag;
+ const char *intrstr = NULL;
+ void *hook;
+ pcireg_t res;
+
+ psc->sc_pc = pc;
+ psc->sc_pcitag = pt;
+ sc->sc_psc = psc;
+
+ /* Validate PCI configuration */
+ res = pci_conf_read(pc, pt, PCI_COMMAND_STATUS_REG);
+ if ((res & PCI_COMMAND_MEM_ENABLE) == 0) {
+ printf(": memory mapping is not available\n");
+ return;
+ }
+
+ /* Enable memory mapping */
+ if (pci_mapreg_map(pa, VIC_BAR0, PCI_MAPREG_TYPE_IO, 0,
+ &sc->sc_st, &sc->sc_sh, NULL, NULL, 0)) {
+ printf(": memory mapping of register space failed\n");
+ return;
+ }
+
+ /* Map and enable the interrupt line */
+ if (pci_intr_map(pa, &psc->sc_iht)) {
+ printf(": interrupt mapping failed\n");
+ return;
+ }
+ intrstr = pci_intr_string(pc, psc->sc_iht);
+ if ((psc->sc_ih = pci_intr_establish(pc,
+ psc->sc_iht, IPL_NET, vic_intr, sc, sc->sc_dev.dv_xname)) == NULL) {
+ printf(": failed to establish the interrupt\n");
+ return;
+ }
+
+ printf(": %s\n", intrstr);
+
+ sc->sc_dmat = pa->pa_dmat;
+
+ if ((hook = shutdownhook_establish(vic_pci_shutdown, psc)) == NULL) {
+ printf(": failed to establish the shutdown hook\n");
+ return;
+ }
+
+ /* Finally, attach the device */
+ if (vic_attach(sc) == 0)
+ return;
+
+ shutdownhook_disestablish(hook);
+
+ return;
+}
+
+void
+vic_pci_shutdown(void *self)
+{
+ struct vic_pci_softc *psc = (struct vic_pci_softc *)self;
+ vic_shutdown(&psc->sc_sc);
+}
+
+int
+vic_attach(struct vic_softc *sc)
+{
+ struct ifnet *ifp;
+
+ sc->sc_ver_major = VIC_READ(VIC_VERSION_MAJOR);
+ sc->sc_ver_minor = VIC_READ(VIC_VERSION_MINOR);
+
+ printf("%s: vmxnet %X", sc->sc_dev.dv_xname,
+ sc->sc_ver_major & ~VIC_VERSION_MAJOR_M);
+
+ /* Check for a supported version */
+ if (((sc->sc_ver_major & VIC_VERSION_MAJOR_M) !=
+ (VIC_MAGIC & VIC_VERSION_MAJOR_M)) ||
+ VIC_MAGIC > sc->sc_ver_major ||
+ VIC_MAGIC < sc->sc_ver_minor) {
+ printf("unsupported device\n");
+ return (1);
+ }
+
+ VIC_WRITE(VIC_CMD, VIC_CMD_NUM_Rx_BUF);
+ sc->sc_nrxbuf = VIC_READ(VIC_CMD);
+ if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
+ sc->sc_nrxbuf = VIC_NBUF;
+ VIC_WRITE(VIC_CMD, VIC_CMD_NUM_Tx_BUF);
+ sc->sc_ntxbuf = VIC_READ(VIC_CMD);
+ if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
+ sc->sc_ntxbuf = VIC_NBUF;
+
+ VIC_WRITE(VIC_CMD, VIC_CMD_FEATURE);
+ sc->sc_feature = VIC_READ(VIC_CMD);
+
+ VIC_WRITE(VIC_CMD, VIC_CMD_HWCAP);
+ sc->sc_cap = VIC_READ(VIC_CMD);
+
+ if (sc->sc_cap) {
+ printf(", ");
+ vic_printb(sc->sc_cap, VIC_CMD_HWCAP_BITS);
+ }
+
+ vic_getlladdr(sc);
+
+ printf(", address: %s\n", ether_sprintf(sc->sc_lladdr));
+
+ /* Initialise pseudo media types */
+ ifmedia_init(&sc->sc_media, 0, vic_media_change,
+ vic_media_status);
+ ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
+
+ ifp = &sc->sc_ac.ac_if;
+ ifp->if_carp = NULL;
+ ifp->if_type = IFT_ETHER;
+ ifp->if_softc = sc;
+ ifp->if_start = vic_start;
+ ifp->if_watchdog = vic_watchdog;
+ ifp->if_ioctl = vic_ioctl;
+ ifp->if_output = ether_output;
+ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
+
+ ifp->if_capabilities = 0;
+#if 0
+ /* XXX interface capabilities */
+ if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+ if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
+ ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
+ IFCAP_CSUM_UDPv4;
+#endif
+
+ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
+ bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
+
+ /* Allocate Rx and Tx queues */
+ if (vic_alloc_data(sc) != 0) {
+ printf("%s: could not allocate queues\n", ifp->if_xname);
+ return (1);
+ }
+
+ /* Attach the device structures */
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ /* Initialize timeout for link state update. */
+ timeout_set(&sc->sc_timer, vic_timer, sc);
+
+ /* XXX poll */
+ timeout_set(&sc->sc_poll, vic_poll, sc);
+
+ return (0);
+}
+
+void
+vic_link_state(struct vic_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ u_int32_t status;
+ int link_state = LINK_STATE_DOWN;
+
+ status = VIC_READ(VIC_STATUS);
+ if (status & VIC_STATUS_CONNECTED)
+ link_state = LINK_STATE_UP;
+ if (ifp->if_link_state != link_state) {
+ ifp->if_link_state = link_state;
+ if_link_state_change(ifp);
+ }
+}
+
+void
+vic_shutdown(void *arg)
+{
+ struct vic_softc *sc = (struct vic_softc *)arg;
+ vic_stop(&sc->sc_ac.ac_if);
+}
+
+int
+vic_intr(void *arg)
+{
+ struct vic_softc *sc = (struct vic_softc *)arg;
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+
+ VIC_WRITE(VIC_CMD, VIC_CMD_INTR_ACK);
+
+#ifdef VIC_DEBUG
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: %s\n", ifp->if_xname, __func__);
+#endif
+
+ vic_rx_proc(sc);
+ vic_tx_proc(sc);
+
+ return (1);
+}
+
+void
+vic_rx_proc(struct vic_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct vic_rxdesc *desc;
+ struct vic_rxbuf *rxb;
+ struct mbuf *m;
+ int len, idx;
+
+ for (;;) {
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map, 0,
+ sizeof(struct vic_data),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ idx = letoh32(sc->sc_data->vd_rx_nextidx);
+ if (idx >= sc->sc_data->vd_rx_length) {
+ ifp->if_ierrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: receive index error\n",
+ ifp->if_xname);
+ break;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map,
+ VIC_OFF_RXDESC(idx), sizeof(struct vic_rxdesc),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ desc = &sc->sc_rxq[idx];
+
+ if (desc->rx_owner != VIC_OWNER_DRIVER)
+ break;
+
+ len = letoh32(desc->rx_length);
+ ifp->if_ibytes += len;
+ ifp->if_ipackets++;
+
+ if (len < ETHER_MIN_LEN) {
+ ifp->if_iqdrops++;
+ goto nextp;
+ }
+
+ if ((rxb = (struct vic_rxbuf *)desc->rx_priv) == NULL ||
+ rxb->rxb_m == NULL) {
+ ifp->if_ierrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: receive buffer error\n",
+ ifp->if_xname);
+ break;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, rxb->rxb_map, 0,
+ rxb->rxb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, rxb->rxb_map);
+ m = rxb->rxb_m;
+ rxb->rxb_m = NULL;
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = m->m_len = len;
+
+ /* Get new mbuf for the Rx queue */
+ if ((rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_map)) == NULL) {
+ ifp->if_ierrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: receive buffer failed\n",
+ ifp->if_xname);
+ break;
+ }
+ desc->rx_physaddr = rxb->rxb_map->dm_segs->ds_addr;
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+
+ ether_input_mbuf(ifp, m);
+
+ nextp:
+ desc->rx_owner = VIC_OWNER_NIC;
+ VIC_INC(sc->sc_data->vd_rx_nextidx, sc->sc_data->vd_rx_length);
+ }
+}
+
+void
+vic_tx_proc(struct vic_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct vic_txdesc *desc;
+ struct vic_txbuf *txb;
+ int idx;
+
+ for (; sc->sc_txpending;) {
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map, 0,
+ sizeof(struct vic_data),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ idx = letoh32(sc->sc_data->vd_tx_curidx);
+ if (idx >= sc->sc_data->vd_tx_length) {
+ ifp->if_oerrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: transmit index error\n",
+ ifp->if_xname);
+ break;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map,
+ VIC_OFF_TXDESC(idx), sizeof(struct vic_rxdesc),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ desc = &sc->sc_txq[idx];
+
+ if (desc->tx_owner != VIC_OWNER_DRIVER)
+ break;
+
+ if ((txb = (struct vic_txbuf *)desc->tx_priv) == NULL ||
+ txb->txb_m == NULL) {
+ ifp->if_oerrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: transmit buffer error\n",
+ ifp->if_xname);
+ break;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, txb->txb_map, 0,
+ txb->txb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, txb->txb_map);
+ if (txb->txb_m != NULL) {
+ m_freem(txb->txb_m);
+ txb->txb_m = NULL;
+ ifp->if_flags &= ~IFF_OACTIVE;
+ }
+
+ sc->sc_txpending--;
+ sc->sc_data->vd_tx_stopped = 0;
+
+ VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
+ }
+
+ sc->sc_txtimeout = 0;
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ if (!IFQ_IS_EMPTY(&ifp->if_snd))
+ vic_start(ifp);
+}
+
+void
+vic_iff(struct vic_softc *sc, u_int flags)
+{
+ /* XXX ALLMULTI */
+ memset(&sc->sc_data->vd_mcastfil, 0xff,
+ sizeof(sc->sc_data->vd_mcastfil));
+ sc->sc_data->vd_iff = flags;
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map, 0,
+ sizeof(struct vic_data), BUS_DMASYNC_POSTWRITE);
+
+ VIC_WRITE(VIC_CMD, VIC_CMD_MCASTFIL);
+ VIC_WRITE(VIC_CMD, VIC_CMD_IFF);
+}
+
+void
+vic_getlladdr(struct vic_softc *sc)
+{
+ u_int32_t reg;
+ int i;
+
+ /* Get MAC address */
+ reg = sc->sc_cap & VIC_CMD_HWCAP_VPROM ? VIC_VPROM : VIC_LLADDR;
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ sc->sc_lladdr[i] = VIC_READ8(reg + i);
+
+ /* Update the MAC address register */
+ if (reg == VIC_VPROM)
+ vic_setlladdr(sc);
+}
+
+void
+vic_setlladdr(struct vic_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ VIC_WRITE8(VIC_LLADDR + i, sc->sc_lladdr[i]);
+}
+
+int
+vic_media_change(struct ifnet *ifp)
+{
+ /* Ignore */
+ return (0);
+}
+
+void
+vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
+{
+ struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
+
+ imr->ifm_active = IFM_ETHER | IFM_AUTO;
+ imr->ifm_status = IFM_AVALID;
+
+ vic_link_state(sc);
+
+ if (ifp->if_link_state == LINK_STATE_UP &&
+ ifp->if_flags & IFF_UP)
+ imr->ifm_status |= IFM_ACTIVE;
+}
+
+void
+vic_start(struct ifnet *ifp)
+{
+ struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
+ struct mbuf *m;
+ int error;
+
+ if (ifp->if_flags & IFF_OACTIVE)
+ return;
+
+ for (;; error = 0) {
+ IFQ_POLL(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+
+ if ((error = vic_tx_start(sc, m)) != 0)
+ ifp->if_oerrors++;
+ }
+}
+
+int
+vic_tx_start(struct vic_softc *sc, struct mbuf *m)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct vic_txbuf *txb;
+ struct vic_txdesc *desc;
+ struct mbuf *m0 = NULL;
+ int idx;
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map, 0,
+ sizeof(struct vic_data),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ idx = letoh32(sc->sc_data->vd_tx_nextidx);
+ if (idx >= sc->sc_data->vd_tx_length) {
+ ifp->if_oerrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: transmit index error\n",
+ ifp->if_xname);
+ return (EINVAL);
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map,
+ VIC_OFF_TXDESC(idx), sizeof(struct vic_txdesc),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ desc = &sc->sc_txq[idx];
+ txb = (struct vic_txbuf *)desc->tx_priv;
+
+ if (VIC_TXURN(sc)) {
+ ifp->if_flags |= IFF_OACTIVE;
+ return (ENOBUFS);
+ } else if (txb == NULL) {
+ ifp->if_oerrors++;
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: transmit buffer error\n",
+ ifp->if_xname);
+ return (ENOBUFS);
+ } else if (txb->txb_m != NULL) {
+ sc->sc_data->vd_tx_stopped = 1;
+ ifp->if_oerrors++;
+ return (ENOMEM);
+ }
+
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, txb->txb_map,
+ m, BUS_DMA_NOWAIT) != 0) {
+ MGETHDR(m0, M_DONTWAIT, MT_DATA);
+ if (m0 == NULL)
+ return (ENOBUFS);
+ if (m->m_pkthdr.len > MHLEN) {
+ MCLGET(m0, M_DONTWAIT);
+ if (!(m0->m_flags & M_EXT)) {
+ m_freem(m0);
+ return (E2BIG);
+ }
+ }
+ m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
+ m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, txb->txb_map,
+ m0, BUS_DMA_NOWAIT) != 0) {
+ m_freem(m0);
+ return (ENOBUFS);
+ }
+ }
+
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+ if (m0 != NULL) {
+ m_freem(m);
+ m = m0;
+ m0 = NULL;
+ }
+
+ desc->tx_flags = VIC_TX_FLAGS_KEEP;
+ desc->tx_sa.sa_length = 1;
+ desc->tx_sa.sa_sg[0].sg_length = htole16(m->m_len);
+ desc->tx_sa.sa_sg[0].sg_addr_low = txb->txb_map->dm_segs->ds_addr;
+ desc->tx_owner = VIC_OWNER_NIC;
+
+ if (VIC_TXURN_WARN(sc)) {
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: running out of tx descriptors\n",
+ ifp->if_xname);
+ desc->tx_flags |= VIC_TX_FLAGS_TXURN;
+ }
+
+ ifp->if_opackets++;
+ ifp->if_obytes += m->m_len;
+
+ sc->sc_txpending++;
+ sc->sc_txtimeout = VIC_TX_TIMEOUT;
+ ifp->if_timer = 1;
+
+ VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
+
+ return (0);
+}
+
+void
+vic_watchdog(struct ifnet *ifp)
+{
+ struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
+
+ ifp->if_timer = 0;
+
+ if (sc->sc_txpending && sc->sc_txtimeout > 0) {
+ if (--sc->sc_txtimeout == 0) {
+ printf("%s: device timeout\n", ifp->if_xname);
+ ifp->if_flags &= ~IFF_RUNNING;
+ vic_init(ifp);
+ ifp->if_oerrors++;
+ return;
+ }
+ ifp->if_timer = 1;
+ }
+
+ if (!IFQ_IS_EMPTY(&ifp->if_snd))
+ vic_start(ifp);
+}
+
+int
+vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
+ struct ifaddr *ifa;
+ struct ifreq *ifr;
+ int s, error = 0;
+
+ s = splnet();
+
+ if ((error = ether_ioctl(ifp, &sc->sc_ac, cmd, data)) > 0) {
+ splx(s);
+ return (error);
+ }
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifa = (struct ifaddr *)data;
+ ifp->if_flags |= IFF_UP;
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ arp_ifinit(&sc->sc_ac, ifa);
+#endif
+ /* FALLTHROUGH */
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_flags & IFF_RUNNING) == 0)
+ vic_init(ifp);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ vic_stop(ifp);
+ }
+ break;
+
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu)
+ ifp->if_mtu = ifr->ifr_mtu;
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ ifr = (struct ifreq *)data;
+ error = (cmd == SIOCADDMULTI) ?
+ ether_addmulti(ifr, &sc->sc_ac) :
+ ether_delmulti(ifr, &sc->sc_ac);
+
+ if (error == ENETRESET)
+ error = 0;
+ break;
+
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
+ break;
+
+ default:
+ error = EINVAL;
+ }
+
+ if (error == ENETRESET) {
+ if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
+ (IFF_UP | IFF_RUNNING))
+ vic_init(ifp);
+ error = 0;
+ }
+
+ splx(s);
+
+ return (error);
+}
+
+void
+vic_init(struct ifnet *ifp)
+{
+ struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
+ int s;
+
+ s = splnet();
+
+ timeout_add(&sc->sc_timer, hz * VIC_TIMER_DELAY);
+
+ vic_reset_data(sc);
+
+#ifdef notyet
+ VIC_WRITE(VIC_CMD, VIC_CMD_INTR_ENABLE);
+#endif
+
+ VIC_WRITE(VIC_DATA_ADDR, sc->sc_physaddr);
+ VIC_WRITE(VIC_DATA_LENGTH, htole32(sc->sc_size));
+ if (ifp->if_flags & IFF_PROMISC)
+ vic_iff(sc, VIC_CMD_IFF_PROMISC);
+ else
+ vic_iff(sc, VIC_CMD_IFF_BROADCAST | VIC_CMD_IFF_MULTICAST);
+
+#ifdef VIC_DEBUG
+ if (ifp->if_flags & IFF_DEBUG)
+ printf("%s: physaddr 0x%08x length 0x%08x\n",
+ ifp->if_xname, sc->sc_physaddr, sc->sc_size);
+#endif
+
+ sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ /* XXX poll */
+ if (ifp->if_flags & IFF_LINK0) {
+ sc->sc_polling = 1;
+ timeout_add(&sc->sc_poll, VIC_TIMER_MS(100));
+ } else
+ sc->sc_polling = 0;
+
+ splx(s);
+}
+
+void
+vic_stop(struct ifnet *ifp)
+{
+ struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
+ int s;
+
+ s = splnet();
+
+ sc->sc_txtimeout = 0;
+ ifp->if_timer = 0;
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+
+#ifdef notyet
+ VIC_WRITE(VIC_CMD, VIC_CMD_INTR_DISABLE);
+#endif
+
+ VIC_WRITE(VIC_DATA_ADDR, 0);
+ vic_iff(sc, 0);
+
+ sc->sc_data->vd_tx_stopped = 1;
+ timeout_del(&sc->sc_timer);
+
+ /* XXX poll */
+ if (sc->sc_polling) {
+ sc->sc_polling = 0;
+ timeout_del(&sc->sc_poll);
+ }
+
+ splx(s);
+}
+
+void
+vic_printb(unsigned short v, char *bits)
+{
+ int i, any = 0;
+ char c;
+
+ /*
+ * Used to print capability bits on startup
+ */
+ bits++;
+ if (bits) {
+ while ((i = *bits++)) {
+ if (v & (1 << (i-1))) {
+ if (any)
+ printf(" ");
+ any = 1;
+ for (; (c = *bits) > 32; bits++)
+ printf("%c", c);
+ } else
+ for (; *bits > 32; bits++)
+ ;
+ }
+ }
+}
+
+struct mbuf *
+vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map)
+{
+ struct mbuf *m;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ printf("%s: could not allocate mbuf\n",
+ sc->sc_dev.dv_xname);
+ return (NULL);
+ }
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ printf("%s: could not allocate mbuf cluster\n",
+ sc->sc_dev.dv_xname);
+ return (NULL);
+ }
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
+ printf("%s: could not load mbuf DMA map",
+ sc->sc_dev.dv_xname);
+ return (NULL);
+ }
+
+ return (m);
+}
+
+int
+vic_alloc_data(struct vic_softc *sc)
+{
+ struct vic_rxbuf *rxb;
+ struct vic_txbuf *txb;
+ u_int32_t offset;
+ int i, error;
+
+ sc->sc_size = sizeof(struct vic_data) +
+ (sc->sc_nrxbuf + VIC_QUEUE2_SIZE) * sizeof(struct vic_rxdesc) +
+ sc->sc_ntxbuf * sizeof(struct vic_txdesc);
+
+ if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_size, 1,
+ sc->sc_size, 0, BUS_DMA_NOWAIT, &sc->sc_map)) != 0) {
+ printf("%s: could not create DMA material\n",
+ sc->sc_dev.dv_xname);
+ goto fail;
+ }
+
+ if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_size, PAGE_SIZE, 0,
+ &sc->sc_seg, 1, &sc->sc_nsegs, BUS_DMA_NOWAIT)) != 0) {
+ printf("%s: could not allocate DMA memory\n",
+ sc->sc_dev.dv_xname);
+ goto fail;
+ }
+
+ if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_seg,
+ sc->sc_nsegs, sc->sc_size, (caddr_t *)&sc->sc_data,
+ BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
+ printf("%s: could not map DMA memory\n",
+ sc->sc_dev.dv_xname);
+ goto fail;
+ }
+
+ if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_map,
+ sc->sc_data, sc->sc_size, NULL, BUS_DMA_NOWAIT)) != 0) {
+ printf("%s: could not load DMA map\n",
+ sc->sc_dev.dv_xname);
+ goto fail;
+ }
+
+ bzero(sc->sc_data, sc->sc_size);
+ sc->sc_physaddr = sc->sc_map->dm_segs->ds_addr;
+ sc->sc_data->vd_magic = VIC_MAGIC;
+ sc->sc_data->vd_length = htole32(sc->sc_size);
+ offset = (u_int32_t)sc->sc_data + sizeof(struct vic_data);
+
+#ifdef VIC_DEBUG
+ printf("%s: (rxbuf %d * %d) (txbuf %d * %d) (size %d)\n",
+ sc->sc_dev.dv_xname,
+ sc->sc_nrxbuf, sizeof(struct vic_rxdesc),
+ sc->sc_ntxbuf, sizeof(struct vic_txdesc),
+ sc->sc_size);
+#endif
+
+ /* Setup the Rx queue */
+ sc->sc_rxq = (struct vic_rxdesc *)offset;
+ sc->sc_data->vd_rx_offset = htole32(offset);
+ sc->sc_data->vd_rx_length = htole32(sc->sc_nrxbuf);
+ offset += sc->sc_nrxbuf + sizeof(struct vic_rxdesc);
+ for (i = 0; i < sc->sc_nrxbuf; i++) {
+ rxb = &sc->sc_rxbuf[i];
+ if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
+ VIC_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
+ &rxb->rxb_map)) != 0) {
+ printf("%s: could not create rx DMA map\n",
+ sc->sc_dev.dv_xname);
+ goto fail;
+ }
+
+ /* Preallocate the Rx mbuf */
+ if ((rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_map)) == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+ sc->sc_rxq[i].rx_physaddr = rxb->rxb_map->dm_segs->ds_addr;
+ }
+
+ /* Setup Rx queue 2 (unused gap) */
+ sc->sc_rxq2 = (struct vic_rxdesc *)offset;
+ sc->sc_data->vd_rx_offset = htole32(offset);
+ sc->sc_data->vd_rx_length = htole32(1);
+ sc->sc_rxq2[0].rx_owner = VIC_OWNER_DRIVER;
+ offset += sizeof(struct vic_rxdesc);
+
+ /* Setup the Tx queue */
+ sc->sc_txq = (struct vic_txdesc *)offset;
+ sc->sc_data->vd_tx_offset = htole32(offset);
+ sc->sc_data->vd_tx_length = htole32(sc->sc_nrxbuf);
+ offset += sc->sc_ntxbuf + sizeof(struct vic_txdesc);
+ for (i = 0; i < sc->sc_ntxbuf; i++) {
+ txb = &sc->sc_txbuf[i];
+ if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
+ VIC_MAX_SCATTER, MCLBYTES, 0, BUS_DMA_NOWAIT,
+ &txb->txb_map)) != 0) {
+ printf("%s: could not create tx DMA map\n",
+ sc->sc_dev.dv_xname);
+ goto fail;
+ }
+ }
+
+ return (0);
+
+ fail:
+ vic_free_data(sc);
+
+ return (error);
+}
+
+void
+vic_reset_data(struct vic_softc *sc)
+{
+ struct vic_rxbuf *rxb;
+ struct vic_txbuf *txb;
+ int i;
+
+ for (i = 0; i < sc->sc_nrxbuf; i++) {
+ rxb = &sc->sc_rxbuf[i];
+
+ bzero(&sc->sc_rxq[i], sizeof(struct vic_rxdesc));
+ sc->sc_rxq[i].rx_physaddr =
+ rxb->rxb_map->dm_segs->ds_addr;
+ sc->sc_rxq[i].rx_buflength = htole32(MCLBYTES);
+ sc->sc_rxq[i].rx_owner = VIC_OWNER_NIC;
+ sc->sc_rxq[i].rx_priv = rxb;
+
+ bus_dmamap_sync(sc->sc_dmat, rxb->rxb_map, 0,
+ rxb->rxb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+ }
+
+ for (i = 0; i < sc->sc_ntxbuf; i++) {
+ txb = &sc->sc_txbuf[i];
+
+ bzero(&sc->sc_txq[i], sizeof(struct vic_txdesc));
+ sc->sc_txq[i].tx_owner = VIC_OWNER_DRIVER;
+ sc->sc_txq[i].tx_priv = txb;
+
+ if (sc->sc_txbuf[i].txb_m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, txb->txb_map, 0,
+ txb->txb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, txb->txb_map);
+ m_freem(sc->sc_txbuf[i].txb_m);
+ sc->sc_txbuf[i].txb_m = NULL;
+ }
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map, 0,
+ sc->sc_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+}
+
+void
+vic_free_data(struct vic_softc *sc)
+{
+ bus_dmamap_t map;
+ int i;
+
+ if (sc->sc_data == NULL)
+ return;
+
+ /* Free Rx queue */
+ for (i = 0; i < sc->sc_nrxbuf; i++) {
+ if ((map = sc->sc_rxbuf[i].rxb_map) == NULL)
+ continue;
+ if (sc->sc_rxbuf[i].rxb_m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, map, 0,
+ map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, map);
+ m_freem(sc->sc_rxbuf[i].rxb_m);
+ }
+ bus_dmamap_destroy(sc->sc_dmat, map);
+ }
+
+ /* Free Tx queue */
+ for (i = 0; i < sc->sc_ntxbuf; i++) {
+ if ((map = sc->sc_txbuf[i].txb_map) == NULL)
+ continue;
+ if (sc->sc_txbuf[i].txb_m != NULL) {
+ bus_dmamap_sync(sc->sc_dmat, map, 0,
+ map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, map);
+ m_freem(sc->sc_txbuf[i].txb_m);
+ }
+ bus_dmamap_destroy(sc->sc_dmat, map);
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_map, 0,
+ sc->sc_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, sc->sc_map);
+ bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_data, sc->sc_size);
+ bus_dmamem_free(sc->sc_dmat, &sc->sc_seg, sc->sc_nsegs);
+}
+
+void
+vic_timer(void *arg)
+{
+ struct vic_softc *sc = (struct vic_softc *)arg;
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+
+#ifdef VIC_DEBUG
+ if (ifp->if_flags & IFF_DEBUG) {
+ if (sc->sc_polling)
+ printf("%s: %s (polling #%u)\n",
+ ifp->if_xname, __func__, sc->sc_polling);
+ else
+ printf("%s: %s\n",
+ ifp->if_xname, __func__);
+ }
+#endif
+
+ /* Update link state (if changed) */
+ vic_link_state(sc);
+
+ /* Re-schedule another timeout. */
+ timeout_add(&sc->sc_timer, hz * VIC_TIMER_DELAY);
+}
+
+ /* XXX poll */
+void
+vic_poll(void *arg)
+{
+ struct vic_softc *sc = (struct vic_softc *)arg;
+ int s;
+
+ s = splnet();
+
+ vic_rx_proc(sc);
+ vic_tx_proc(sc);
+
+ VIC_INC_POS(sc->sc_polling, UINT_MAX);
+
+ timeout_add(&sc->sc_poll, VIC_TIMER_MS(100));
+
+ splx(s);
+}
diff --git a/sys/dev/pci/if_vicreg.h b/sys/dev/pci/if_vicreg.h
new file mode 100644
index 00000000000..809aea43ac1
--- /dev/null
+++ b/sys/dev/pci/if_vicreg.h
@@ -0,0 +1,195 @@
+/* $OpenBSD: if_vicreg.h,v 1.1 2006/02/25 23:49:04 reyk Exp $ */
+
+/*
+ * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEV_IC_VICREG_H
+#define _DEV_IC_VICREG_H
+
+#define VIC_MAGIC 0xbabe864f
+
+/*
+ * Register address offsets
+ */
+
+#define VIC_DATA_ADDR 0x0000 /* Shared data address */
+#define VIC_DATA_LENGTH 0x0004 /* Shared data length */
+#define VIC_Tx_ADDR 0x0008 /* Tx pointer address */
+
+/* Command register */
+#define VIC_CMD 0x000c /* Command register */
+#define VIC_CMD_INTR_ACK 0x00000001 /* Acknowledge interrupt */
+#define VIC_CMD_MCASTFIL 0x00000002 /* Multicast address filter */
+#define VIC_CMD_MCASTFIL_LENGTH 2
+#define VIC_CMD_IFF 0x00000004 /* Interface flags */
+#define VIC_CMD_IFF_PROMISC 0x0001 /* Promiscous enabled */
+#define VIC_CMD_IFF_BROADCAST 0x0002 /* Broadcast enabled */
+#define VIC_CMD_IFF_MULTICAST 0x0004 /* Multicast enabled */
+#define VIC_CMD_RESERVED_8 0x00000008
+#define VIC_CMD_RESERVED_10 0x00000010
+#define VIC_CMD_INTR_DISABLE 0x00000020 /* Enable interrupts */
+#define VIC_CMD_INTR_ENABLE 0x00000040 /* Disable interrupts */
+#define VIC_CMD_RESERVED_80 0x00000080
+#define VIC_CMD_Tx_DONE 0x00000100 /* Tx done register */
+#define VIC_CMD_NUM_Rx_BUF 0x00000200 /* Number of Rx buffers */
+#define VIC_CMD_NUM_Tx_BUF 0x00000400 /* Number of Tx buffers */
+#define VIC_CMD_NUM_PINNED_BUF 0x00000800 /* Number of pinned buffers */
+#define VIC_CMD_HWCAP 0x00001000 /* Capability register */
+#define VIC_CMD_HWCAP_SG 0x0001 /* Scatter-gather transmits */
+#define VIC_CMD_HWCAP_CSUM_IPv4 0x0002 /* Hardware checksum of TCP/UDP */
+#define VIC_CMD_HWCAP_CSUM_ALL 0x0004 /* Hardware checksum support */
+#define VIC_CMD_HWCAP_CSUM \
+ (VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
+#define VIC_CMD_HWCAP_DMA_HIGH 0x0008 /* High DMA mapping possible */
+#define VIC_CMD_HWCAP_TOE 0x0010 /* TCP offload engine available */
+#define VIC_CMD_HWCAP_TSO 0x0020 /* TCP segmentation offload */
+#define VIC_CMD_HWCAP_TSO_SW 0x0040 /* Software TCP segmentation */
+#define VIC_CMD_HWCAP_VPROM 0x0080 /* Virtual PROM available */
+#define VIC_CMD_HWCAP_VLAN_Tx 0x0100 /* Hardware VLAN MTU Rx */
+#define VIC_CMD_HWCAP_VLAN_Rx 0x0200 /* Hardware VLAN MTU Tx */
+#define VIC_CMD_HWCAP_VLAN_SW 0x0400 /* Software VLAN MTU */
+#define VIC_CMD_HWCAP_VLAN \
+ (VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | VIC_CMD_HWCAP_VLAN_SW)
+#define VIC_CMD_HWCAP_BITS \
+ "\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE" \
+ "\06TSO\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
+#define VIC_CMD_FEATURE 0x00002000 /* Additional feature register */
+#define VIC_CMD_FEATURE_0_Tx 0x0001
+#define VIC_CMD_FEATURE_TSO 0x0002
+
+#define VIC_LLADDR 0x0010 /* MAC address register */
+#define VIC_VERSION_MINOR 0x0018 /* Minor version register */
+#define VIC_VERSION_MAJOR 0x001c /* Major version register */
+#define VIC_VERSION_MAJOR_M 0xffff0000
+
+/* Status register */
+#define VIC_STATUS 0x0020
+#define VIC_STATUS_CONNECTED 0x00000001
+#define VIC_STATUS_ENABLED 0x00000002
+
+#define VIC_TOE_ADDR 0x0024 /* TCP offload address */
+
+/* Virtual PROM address */
+#define VIC_VPROM 0x0028
+#define VIC_VPROM_LENGTH 6
+
+/*
+ * Shared DMA data structures
+ */
+
+struct vic_sg {
+ u_int32_t sg_addr_low;
+ u_int16_t sg_addr_high;
+ u_int16_t sg_length;
+} __packed;
+
+#define VIC_SG_MAX 6
+#define VIC_SG_ADDR_MACH 0
+#define VIC_SG_ADDR_PHYS 1
+#define VIC_SG_ADDR_VIRT 3
+
+struct vic_sgarray {
+ u_int16_t sa_addr_type;
+ u_int16_t sa_length;
+ struct vic_sg sa_sg[VIC_SG_MAX];
+};
+
+struct vic_rxdesc {
+ u_int64_t rx_physaddr;
+ u_int32_t rx_buflength;
+ u_int32_t rx_length;
+ u_int16_t rx_owner;
+ u_int16_t rx_flags;
+ void *rx_priv;
+} __packed;
+
+#define VIC_RX_FLAGS_CSUMHW_OK 0x0001
+
+struct vic_txdesc {
+ u_int16_t tx_flags;
+ u_int16_t tx_owner;
+ void *tx_priv;
+ u_int32_t tx_tsomss;
+ struct vic_sgarray tx_sa;
+} __packed;
+
+#define VIC_TX_FLAGS_KEEP 0x0001
+#define VIC_TX_FLAGS_TXURN 0x0002
+#define VIC_TX_FLAGS_CSUMHW 0x0004
+#define VIC_TX_FLAGS_TSO 0x0008
+#define VIC_TX_FLAGS_PINNED 0x0010
+#define VIC_TX_FLAGS_QRETRY 0x1000
+
+struct vic_stats {
+ u_int32_t vs_tx_count;
+ u_int32_t vs_tx_packets;
+ u_int32_t vs_tx_0copy;
+ u_int32_t vs_tx_copy;
+ u_int32_t vs_tx_maxpending;
+ u_int32_t vs_tx_stopped;
+ u_int32_t vs_tx_overrun;
+ u_int32_t vs_intr;
+ u_int32_t vs_rx_packets;
+ u_int32_t vs_rx_underrun;
+} __packed;
+
+struct vic_data {
+ u_int32_t vd_magic;
+
+ u_int32_t vd_rx_length;
+ u_int32_t vd_rx_nextidx;
+ u_int32_t vd_rx_length2;
+ u_int32_t vd_rx_nextidx2;
+
+ u_int32_t vd_irq;
+ u_int32_t vd_iff;
+
+ u_int32_t vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
+
+ u_int32_t vd_reserved1[1];
+
+ u_int32_t vd_tx_length;
+ u_int32_t vd_tx_curidx;
+ u_int32_t vd_tx_nextidx;
+ u_int32_t vd_tx_stopped;
+ u_int32_t vd_tx_triggerlvl;
+ u_int32_t vd_tx_queued;
+ u_int32_t vd_tx_minlength;
+
+ u_int32_t vd_reserved2[6];
+
+ u_int32_t vd_rx_saved_nextidx;
+ u_int32_t vd_rx_saved_nextidx2;
+ u_int32_t vd_tx_saved_nextidx;
+
+ u_int32_t vd_length;
+ u_int32_t vd_rx_offset;
+ u_int32_t vd_rx_offset2;
+ u_int32_t vd_tx_offset;
+ u_int32_t vd_debug;
+ u_int32_t vd_tx_physaddr;
+ u_int32_t vd_tx_physaddr_length;
+ u_int32_t vd_tx_maxlength;
+
+ struct vic_stats vd_stats;
+} __packed;
+
+#define VIC_OWNER_DRIVER 0
+#define VIC_OWNER_DRIVER_PEND 1
+#define VIC_OWNER_NIC 2
+#define VIC_OWNER_NIC_PEND 3
+
+#endif /* _DEV_IC_VICREG_H */
diff --git a/sys/dev/pci/if_vicvar.h b/sys/dev/pci/if_vicvar.h
new file mode 100644
index 00000000000..1458acccac2
--- /dev/null
+++ b/sys/dev/pci/if_vicvar.h
@@ -0,0 +1,107 @@
+/* $OpenBSD: if_vicvar.h,v 1.1 2006/02/25 23:49:04 reyk Exp $ */
+
+/*
+ * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEV_IC_VICVAR_H
+#define _DEV_IC_VICVAR_H
+
+#define VIC_DEBUG 1
+
+#define VIC_BAR0 0x10 /* Base Address Register */
+
+#define VIC_NBUF 100
+#define VIC_NBUF_MAX 128
+#define VIC_MAX_SCATTER 1 /* 8? */
+#define VIC_QUEUE_SIZE VIC_NBUF_MAX
+#define VIC_QUEUE2_SIZE 1
+#define VIC_INC(_x, _y) (_x) = ((_x) + 1) % (_y)
+#define VIC_INC_POS(_x, _y) (_x) = (++(_x)) % (_y) ? (_x) : 1
+#define VIC_TX_TIMEOUT 5
+#define VIC_TIMER_DELAY 2
+#define VIC_TIMER_MS(_ms) (_ms * hz / 1000)
+
+#define VIC_TXURN_WARN(_sc) ((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
+#define VIC_TXURN(_sc) ((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
+#define VIC_OFF_RXDESC(_n) \
+ (sizeof(struct vic_data) + ((_n) * sizeof(struct vic_rxdesc)))
+#define VIC_OFF_TXDESC(_n) \
+ (sizeof(struct vic_data) + \
+ ((sc->sc_nrxbuf + VIC_QUEUE2_SIZE) * sizeof(struct vic_rxdesc)) + \
+ ((_n) * sizeof(struct vic_txdesc)))
+
+#define VIC_WRITE(_reg, _val) \
+ bus_space_write_4(sc->sc_st, sc->sc_sh, (_reg), (_val))
+#define VIC_READ(_reg) \
+ bus_space_read_4(sc->sc_st, sc->sc_sh, (_reg))
+#define VIC_WRITE8(_reg, _val) \
+ bus_space_write_1(sc->sc_st, sc->sc_sh, (_reg), (_val))
+#define VIC_READ8(_reg) \
+ bus_space_read_1(sc->sc_st, sc->sc_sh, (_reg))
+
+struct vic_rxbuf {
+ bus_dmamap_t rxb_map;
+ struct mbuf *rxb_m;
+};
+
+struct vic_txbuf {
+ bus_dmamap_t txb_map;
+ struct mbuf *txb_m;
+};
+
+struct vic_softc {
+ struct device sc_dev;
+
+ void *sc_psc;
+ struct arpcom sc_ac;
+ struct ifmedia sc_media;
+ struct timeout sc_timer;
+
+ struct timeout sc_poll; /* XXX poll */
+ u_int sc_polling;
+
+ int (*sc_enable)(struct vic_softc *);
+ void (*sc_disable)(struct vic_softc *);
+ void (*sc_power)(struct vic_softc *, int);
+
+ bus_space_tag_t sc_st;
+ bus_space_handle_t sc_sh;
+ bus_dma_tag_t sc_dmat;
+
+ u_int32_t sc_ver_major, sc_ver_minor;
+ u_int32_t sc_cap, sc_feature;
+
+ u_int8_t sc_lladdr[ETHER_ADDR_LEN];
+
+ u_int32_t sc_nrxbuf, sc_ntxbuf;
+
+ struct vic_rxdesc *sc_rxq, *sc_rxq2;
+ struct vic_rxbuf sc_rxbuf[VIC_QUEUE_SIZE];
+
+ struct vic_txdesc *sc_txq;
+ struct vic_txbuf sc_txbuf[VIC_QUEUE_SIZE];
+ int sc_txpending;
+ int sc_txtimeout;
+
+ struct vic_data *sc_data;
+ bus_addr_t sc_physaddr;
+ bus_dmamap_t sc_map;
+ bus_dma_segment_t sc_seg;
+ int sc_nsegs;
+ int sc_size;
+};
+
+#endif /* _DEV_IC_VICVAR_H */