summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/dev/pci/files.pci7
-rw-r--r--sys/dev/pci/if_gx.c1904
-rw-r--r--sys/dev/pci/if_gxreg.h568
-rw-r--r--sys/dev/pci/if_gxvar.h178
4 files changed, 2656 insertions, 1 deletions
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci
index 91a84a85fc1..6680f5dd3e9 100644
--- a/sys/dev/pci/files.pci
+++ b/sys/dev/pci/files.pci
@@ -1,4 +1,4 @@
-# $OpenBSD: files.pci,v 1.128 2002/04/01 11:26:32 matthieu Exp $
+# $OpenBSD: files.pci,v 1.129 2002/04/02 13:03:31 nate Exp $
# $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $
#
# Config file and device description for machine-independent PCI code.
@@ -430,3 +430,8 @@ file dev/pci/if_bge.c bge
device stge: ether, ifnet, mii, ifmedia, mii_phy, mii_bitbang
attach stge at pci
file dev/pci/if_stge.c stge
+
+# Intel gigabit ethernet
+device gx: ether, ifnet, mii, ifmedia, mii_phy, mii_bitbang
+attach gx at pci
+file dev/pci/if_gx.c gx
diff --git a/sys/dev/pci/if_gx.c b/sys/dev/pci/if_gx.c
new file mode 100644
index 00000000000..10564235147
--- /dev/null
+++ b/sys/dev/pci/if_gx.c
@@ -0,0 +1,1904 @@
+/* $OpenBSD: if_gx.c,v 1.1 2002/04/02 13:03:31 nate Exp $ */
+/*-
+ * Copyright (c) 1999,2000,2001 Jonathan Lemon
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "bpfilter.h"
+#include "vlan.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/device.h>
+#include <sys/queue.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#if NVLAN > 0
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+#endif
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/if_gxreg.h>
+#include <dev/pci/if_gxvar.h>
+
+#include <uvm/uvm_extern.h>
+#include <uvm/uvm_pmap.h> /* for vtophys */
+
+#define TUNABLE_TX_INTR_DELAY 100
+#define TUNABLE_RX_INTR_DELAY 100
+
+#define GX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
+
+/*
+ * Various supported device vendors/types and their names.
+ */
+struct gx_device {
+ u_int16_t vendor;
+ u_int16_t device;
+ int version_flags;
+ u_int32_t version_ipg;
+ char *name;
+};
+
+struct gx_device gx_devs[] = {
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
+ GXF_WISEMAN | GXF_FORCE_TBI | GXF_OLD_REGS,
+ 10 | 2 << 10 | 10 << 20,
+ "Intel Gigabit Ethernet (82542)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_SC,
+ GXF_LIVENGOOD | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 6 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82543GC-F)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
+ GXF_LIVENGOOD | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 6 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82543GC-F)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_CU,
+ GXF_LIVENGOOD | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 8 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82543GC-T)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_CU,
+ GXF_CORDOVA | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 8 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82544EI-T)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_SC,
+ GXF_CORDOVA | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 6 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82544EI-F)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
+ GXF_CORDOVA | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 8 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82544GC-T)" },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
+ GXF_CORDOVA | GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
+ 8 | 8 << 10 | 6 << 20,
+ "Intel Gigabit Ethernet (82544GC-T)" },
+ { 0, 0, 0, NULL }
+};
+
+struct gx_regs new_regs = {
+ GX_RX_RING_BASE, GX_RX_RING_LEN,
+ GX_RX_RING_HEAD, GX_RX_RING_TAIL,
+ GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
+
+ GX_TX_RING_BASE, GX_TX_RING_LEN,
+ GX_TX_RING_HEAD, GX_TX_RING_TAIL,
+ GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
+};
+struct gx_regs old_regs = {
+ GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
+ GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
+ GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
+
+ GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
+ GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
+ GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
+};
+
+int gx_probe(struct device *, void *, void *);
+void gx_attach(struct device *, struct device *, void *);
+int gx_detach(void *xsc);
+void gx_shutdown(void *xsc);
+
+void gx_rxeof(struct gx_softc *gx);
+void gx_txeof(struct gx_softc *gx);
+int gx_encap(struct gx_softc *gx, struct mbuf *m_head);
+int gx_intr(void *xsc);
+void gx_init(void *xsc);
+
+struct gx_device *gx_match(void *aux);
+void gx_eeprom_getword(struct gx_softc *gx, int addr,
+ u_int16_t *dest);
+int gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
+ int cnt);
+int gx_ifmedia_upd(struct ifnet *ifp);
+void gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
+int gx_miibus_livengood_readreg(struct device *dev, int phy, int reg);
+void gx_miibus_livengood_writereg(struct device *dev, int phy, int reg, int value);
+int gx_miibus_cordova_readreg(struct device *dev, int phy, int reg);
+void gx_miibus_cordova_writereg(struct device *dev, int phy, int reg, int value);
+void gx_miibus_statchg(struct device *dev);
+void gx_mii_shiftin(struct gx_softc *gx, int data, int length);
+u_int16_t gx_mii_shiftout(struct gx_softc *gx);
+int gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
+void gx_setmulti(struct gx_softc *gx);
+void gx_reset(struct gx_softc *gx);
+void gx_phy_reset(struct gx_softc *gx);
+void gx_release(struct gx_softc *gx);
+void gx_stop(struct gx_softc *gx);
+void gx_watchdog(struct ifnet *ifp);
+void gx_start(struct ifnet *ifp);
+
+int gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m);
+int gx_init_rx_ring(struct gx_softc *gx);
+void gx_free_rx_ring(struct gx_softc *gx);
+int gx_init_tx_ring(struct gx_softc *gx);
+void gx_free_tx_ring(struct gx_softc *gx);
+
+#ifdef GX_DEBUG
+#define DPRINTF(x) if (gxdebug) printf x
+#define DPRINTFN(n,x) if (gxdebug >= (n)) printf x
+int gxdebug = 0;
+#else
+#define DPRINTF(x)
+#define DPRINTFN(n,x)
+#endif
+
+struct gx_device *
+gx_match(void *aux)
+{
+ struct pci_attach_args *pa = (struct pci_attach_args *)aux;
+ int i;
+
+ for (i = 0; gx_devs[i].name != NULL; i++) {
+ if ((PCI_VENDOR(pa->pa_id) == gx_devs[i].vendor) &&
+ (PCI_PRODUCT(pa->pa_id) == gx_devs[i].device))
+ return (&gx_devs[i]);
+ }
+ return (NULL);
+}
+
+int
+gx_probe(struct device *parent, void *match, void *aux)
+{
+ if (gx_match(aux) != NULL)
+ return (1);
+
+ return (0);
+}
+
+void
+gx_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct gx_softc *gx = (struct gx_softc *)self;
+ struct pci_attach_args *pa = aux;
+ pci_chipset_tag_t pc = pa->pa_pc;
+ pci_intr_handle_t ih;
+ const char *intrstr = NULL;
+ bus_addr_t iobase;
+ bus_size_t iosize;
+ bus_dma_segment_t seg;
+ int i, rseg;
+ u_int32_t command;
+ struct gx_device *gx_dev;
+ struct ifnet *ifp;
+ int s, error = 0;
+ caddr_t kva;
+
+ s = splimp();
+
+ gx_dev = gx_match(aux);
+ gx->gx_vflags = gx_dev->version_flags;
+ gx->gx_ipg = gx_dev->version_ipg;
+
+ mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
+
+ GX_LOCK(gx);
+
+ /*
+ * Map control/status registers.
+ */
+ command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+ command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
+ if (gx->gx_vflags & GXF_ENABLE_MWI)
+ command |= PCIM_CMD_MWIEN;
+ pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
+ command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
+
+/* XXX check cache line size? */
+
+ if ((command & PCI_COMMAND_MEM_ENABLE) == 0) {
+ printf(": failed to enable memory mapping!\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ if (pci_mem_find(pc, pa->pa_tag, GX_PCI_LOMEM, &iobase, &iosize,
+ NULL)) {
+ printf(": can't find mem space\n");
+ goto fail;
+ }
+
+ DPRINTFN(5, ("%s: bus_space_map\n", gx->gx_dev.dv_xname));
+ if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &gx->gx_bhandle)) {
+ printf(": can't map mem space\n");
+ goto fail;
+ }
+
+ gx->gx_btag = pa->pa_memt;
+
+ /* Allocate interrupt */
+ DPRINTFN(5, ("%s: pci_intr_map\n", gx->gx_dev.dv_xname));
+ if (pci_intr_map(pa, &ih)) {
+ printf(": couldn't map interrupt\n");
+ goto fail;
+ }
+
+ DPRINTFN(5, ("%s: pci_intr_string\n", gx->gx_dev.dv_xname));
+ intrstr = pci_intr_string(pc, ih);
+
+ DPRINTFN(5, ("%s: pci_intr_establish\n", gx->gx_dev.dv_xname));
+ gx->gx_intrhand = pci_intr_establish(pc, ih, IPL_NET, gx_intr, gx,
+ gx->gx_dev.dv_xname);
+
+ if (gx->gx_intrhand == NULL) {
+ printf(": couldn't establish interrupt");
+ if (intrstr != NULL)
+ printf(" at %s", intrstr);
+ printf("\n");
+ goto fail;
+ }
+ printf(": %s", intrstr);
+
+ /* compensate for different register mappings */
+ if (gx->gx_vflags & GXF_OLD_REGS)
+ gx->gx_reg = old_regs;
+ else
+ gx->gx_reg = new_regs;
+
+ if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
+ GX_EEMAP_MAC, 3)) {
+ printf("failed to read station address\n");
+ error = ENXIO;
+ goto fail;
+ }
+
+ printf(": address: %s\n", ether_sprintf(gx->arpcom.ac_enaddr));
+
+ /* Allocate the ring buffers. */
+ gx->gx_dmatag = pa->pa_dmat;
+ DPRINTFN(5, ("%s: bus_dmamem_alloc\n", gx->gx_dev.dv_xname));
+ if (bus_dmamem_alloc(gx->gx_dmatag, sizeof(struct gx_ring_data),
+ PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
+ printf("%s: can't alloc rx buffers\n", gx->gx_dev.dv_xname);
+ goto fail;
+ }
+ DPRINTFN(5, ("%s: bus_dmamem_map\n", gx->gx_dev.dv_xname));
+ if (bus_dmamem_map(gx->gx_dmatag, &seg, rseg,
+ sizeof(struct gx_ring_data), &kva,
+ BUS_DMA_NOWAIT)) {
+ printf("%s: can't map dma buffers (%d bytes)\n",
+ gx->gx_dev.dv_xname, sizeof(struct gx_ring_data));
+ bus_dmamem_free(gx->gx_dmatag, &seg, rseg);
+ goto fail;
+ }
+ DPRINTFN(5, ("%s: bus_dmamem_create\n", gx->gx_dev.dv_xname));
+ if (bus_dmamap_create(gx->gx_dmatag, sizeof(struct gx_ring_data), 1,
+ sizeof(struct gx_ring_data), 0,
+ BUS_DMA_NOWAIT, &gx->gx_ring_map)) {
+ printf("%s: can't create dma map\n", gx->gx_dev.dv_xname);
+ bus_dmamem_unmap(gx->gx_dmatag, kva,
+ sizeof(struct gx_ring_data));
+ bus_dmamem_free(gx->gx_dmatag, &seg, rseg);
+ goto fail;
+ }
+ DPRINTFN(5, ("%s: bus_dmamem_load\n", gx->gx_dev.dv_xname));
+ if (bus_dmamap_load(gx->gx_dmatag, gx->gx_ring_map, kva,
+ sizeof(struct gx_ring_data), NULL,
+ BUS_DMA_NOWAIT)) {
+ bus_dmamap_destroy(gx->gx_dmatag, gx->gx_ring_map);
+ bus_dmamem_unmap(gx->gx_dmatag, kva,
+ sizeof(struct gx_ring_data));
+ bus_dmamem_free(gx->gx_dmatag, &seg, rseg);
+ goto fail;
+ }
+
+ gx->gx_rdata = (struct gx_ring_data *)kva;
+ bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
+ bzero(&gx->gx_cdata, sizeof(struct gx_chain_data));
+
+ DPRINTFN(5, ("%s: gx->gx_rdata = 0x%x, size = %d\n",
+ gx->gx_dev.dv_xname, gx->gx_rdata,
+ sizeof(struct gx_ring_data)));
+
+ DPRINTFN(5, ("%s: gx = 0x%x, size = %d\n",
+ gx->gx_dev.dv_xname, gx, sizeof(struct gx_softc)));
+
+ for (i = 0; i < GX_RX_RING_CNT; i++) {
+ if (bus_dmamap_create(gx->gx_dmatag, MCLBYTES, 1, MCLBYTES,
+ 0, BUS_DMA_NOWAIT, &gx->gx_cdata.gx_rx_map[i]))
+ printf("%s: can't create dma map\n",
+ gx->gx_dev.dv_xname);
+ }
+
+ for (i = 0; i < GX_TX_RING_CNT; i++) {
+ if (bus_dmamap_create(gx->gx_dmatag, MCLBYTES, GX_NTXSEG,
+ MCLBYTES, 0, BUS_DMA_NOWAIT, &gx->gx_cdata.gx_tx_map[i]))
+ printf("%s: can't create dma map\n",
+ gx->gx_dev.dv_xname);
+ }
+
+ /* Set default tuneable values. */
+ gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
+ gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
+
+ /* Set up ifnet structure */
+ ifp = &gx->arpcom.ac_if;
+ ifp->if_softc = gx;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = gx_ioctl;
+ ifp->if_output = ether_output;
+ ifp->if_start = gx_start;
+ ifp->if_watchdog = gx_watchdog;
+ ifp->if_baudrate = 1000000000;
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_snd.ifq_maxlen = GX_TX_RING_CNT - 1;
+ DPRINTFN(5, ("%s: bcopy\n", gx->gx_dev.dv_xname));
+ bcopy(gx->gx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
+
+ /* figure out transciever type */
+ if (gx->gx_vflags & GXF_FORCE_TBI ||
+ CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
+ gx->gx_tbimode = 1;
+
+ /*
+ * Do MII setup.
+ */
+ DPRINTFN(5, ("%s: mii setup\n", gx->gx_dev.dv_xname));
+ if (!gx->gx_tbimode && (gx->gx_vflags & GXF_LIVENGOOD)) {
+ gx->gx_mii.mii_ifp = ifp;
+ gx->gx_mii.mii_readreg = gx_miibus_livengood_readreg;
+ gx->gx_mii.mii_writereg = gx_miibus_livengood_writereg;
+ gx->gx_mii.mii_statchg = gx_miibus_statchg;
+ } else if (!gx->gx_tbimode && (gx->gx_vflags & GXF_CORDOVA)) {
+ gx->gx_mii.mii_ifp = ifp;
+ gx->gx_mii.mii_readreg = gx_miibus_cordova_readreg;
+ gx->gx_mii.mii_writereg = gx_miibus_cordova_writereg;
+ gx->gx_mii.mii_statchg = gx_miibus_statchg;
+ } else {
+ gx->gx_mii.mii_ifp = NULL;
+ gx->gx_mii.mii_readreg = NULL;
+ gx->gx_mii.mii_writereg = NULL;
+ gx->gx_mii.mii_statchg = NULL;
+ }
+
+ if (gx->gx_tbimode) {
+ /* SERDES transceiver */
+ ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
+ gx_ifmedia_sts);
+ ifmedia_add(&gx->gx_media,
+ IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
+ ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
+ ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
+ } else {
+ /*
+ * Do transceiver setup.
+ */
+ if (gx->gx_vflags & GXF_LIVENGOOD) {
+ u_int32_t tmp;
+
+ /* settings to talk to PHY */
+ tmp = CSR_READ_4(gx, GX_CTRL);
+ tmp |= GX_CTRL_FORCESPEED | GX_CTRL_FORCEDUPLEX |
+ GX_CTRL_SET_LINK_UP;
+ CSR_WRITE_4(gx, GX_CTRL, tmp);
+ }
+
+ /* GMII/MII transceiver */
+ gx_phy_reset(gx);
+ ifmedia_init(&gx->gx_mii.mii_media, 0, gx_ifmedia_upd,
+ gx_ifmedia_sts);
+ mii_attach(&gx->gx_dev, &gx->gx_mii, 0xffffffff,
+ MII_PHY_ANY, MII_OFFSET_ANY, 0);
+
+
+ if (LIST_FIRST(&gx->gx_mii.mii_phys) == NULL) {
+ printf("%s: no PHY found!\n", gx->gx_dev.dv_xname);
+ ifmedia_add(&gx->gx_mii.mii_media,
+ IFM_ETHER|IFM_MANUAL, 0, NULL);
+ ifmedia_set(&gx->gx_mii.mii_media,
+ IFM_ETHER|IFM_MANUAL);
+ } else
+ ifmedia_set(&gx->gx_mii.mii_media,
+ IFM_ETHER|IFM_AUTO);
+ }
+
+ /*
+ * Call MI attach routines.
+ */
+ DPRINTFN(5, ("%s: if_attach\n", gx->gx_dev.dv_xname));
+ if_attach(ifp);
+ DPRINTFN(5, ("%s: ether_ifattach\n", gx->gx_dev.dv_xname));
+ ether_ifattach(ifp);
+ DPRINTFN(5, ("%s: timeout_set\n", gx->gx_dev.dv_xname));
+
+ GX_UNLOCK(gx);
+ splx(s);
+ return;
+
+fail:
+ GX_UNLOCK(gx);
+ gx_release(gx);
+ splx(s);
+}
+
+void
+gx_release(struct gx_softc *gx)
+{
+ int i;
+
+#ifdef notyet
+ bus_generic_detach(gx->gx_dev);
+ if (gx->gx_miibus)
+ device_delete_child(gx->gx_dev, gx->gx_miibus);
+
+ if (gx->gx_intrhand)
+ bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
+ if (gx->gx_irq)
+ bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
+ if (gx->gx_res)
+ bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
+ GX_PCI_LOMEM, gx->gx_res);
+
+ bus_dmamap_destroy(gx->gx_dmatag, gx->gx_ring_map);
+ bus_dmamem_unmap(gx->gx_dmatag, gx->gx_rdata,
+ sizeof(struct gx_ring_data));
+ bus_dmamem_free(gx->gx_dmatag, &seg, rseg);
+
+#endif
+
+ for (i = 0; i < GX_RX_RING_CNT; i++)
+ bus_dmamap_destroy(gx->gx_dmatag, gx->gx_cdata.gx_rx_map[i]);
+
+ for (i = 0; i < GX_TX_RING_CNT; i++)
+ bus_dmamap_destroy(gx->gx_dmatag, gx->gx_cdata.gx_tx_map[i]);
+}
+
+void
+gx_init(void *xsc)
+{
+ struct gx_softc *gx = (struct gx_softc *)xsc;
+ struct ifnet *ifp;
+ struct device *dev;
+ u_int16_t *m;
+ u_int32_t ctrl;
+ int s, i;
+
+ dev = &gx->gx_dev;
+ ifp = &gx->arpcom.ac_if;
+
+ s = splimp();
+ GX_LOCK(gx);
+
+ /* Disable host interrupts, halt chip. */
+ gx_reset(gx);
+
+ /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
+ gx_stop(gx);
+
+ /* Load our MAC address, invalidate other 15 RX addresses. */
+ m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
+ if (gx->gx_vflags & GXF_CORDOVA) {
+ CSR_WRITE_4(gx, GX_RX_CORDOVA_ADDR_BASE, (m[1] << 16) | m[0]);
+ CSR_WRITE_4(gx, GX_RX_CORDOVA_ADDR_BASE + 4, m[2] | GX_RA_VALID);
+ for (i = 1; i < 16; i++)
+ CSR_WRITE_8(gx, GX_RX_CORDOVA_ADDR_BASE + i * 8, (u_quad_t)0);
+ } else {
+ CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
+ CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
+ for (i = 1; i < 16; i++)
+ CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
+ }
+
+ /* Program multicast filter. */
+ gx_setmulti(gx);
+
+#if 1
+ /* Init RX ring. */
+ gx_init_rx_ring(gx);
+
+ /* Init TX ring. */
+ gx_init_tx_ring(gx);
+#endif
+
+ if (gx->gx_vflags & GXF_DMA) {
+ /* set up DMA control */
+ CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
+ CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
+ }
+
+ /* enable receiver */
+ ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_HALF | GX_RXC_RX_BSIZE_2K;
+ ctrl |= GX_RXC_BCAST_ACCEPT;
+
+ /* Enable or disable promiscuous mode as needed. */
+ if (ifp->if_flags & IFF_PROMISC)
+ ctrl |= GX_RXC_UNI_PROMISC;
+
+ /* This is required if we want to accept jumbo frames */
+ if (ifp->if_mtu > ETHERMTU)
+ ctrl |= GX_RXC_LONG_PKT_ENABLE;
+
+#ifdef notyet
+ /* setup receive checksum control */
+ if (ifp->if_capenable & IFCAP_RXCSUM)
+ CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
+ GX_CSUM_TCP/* | GX_CSUM_IP*/);
+
+ /* setup transmit checksum control */
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist = GX_CSUM_FEATURES;
+
+ ctrl |= GX_RXC_STRIP_ETHERCRC; /* not on 82542? */
+#endif
+ CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
+
+ /* enable transmitter */
+ ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
+
+ /* XXX we should support half-duplex here too... */
+ ctrl |= GX_TXC_COLL_TIME_FDX;
+
+ CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
+
+ /*
+ * set up recommended IPG times, which vary depending on chip type:
+ * IPG transmit time: 80ns
+ * IPG receive time 1: 20ns
+ * IPG receive time 2: 80ns
+ */
+ CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
+
+ /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
+ CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, GX_FLOW_CTRL_CONST);
+ CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, GX_FLOW_CTRL_CONST_HIGH);
+
+ /* set up 802.3x MAC flow control type -- 88:08 */
+ CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, GX_FLOW_CTRL_TYPE_CONST);
+
+ /* Set up tuneables */
+ CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
+ CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
+
+ ctrl = 0;
+
+#if 0
+ if (gx->gx_vflags & GXF_CORDOVA) {
+ u_int16_t cfg1, cfg2, gpio;
+ gx_read_eeprom(gx, (caddr_t)&cfg1, GX_EEMAP_INIT1, 1);
+ gx_read_eeprom(gx, (caddr_t)&cfg2, GX_EEMAP_INIT2, 1);
+ gx_read_eeprom(gx, (caddr_t)&gpio, GX_EEMAP_SWDPIN, 1);
+
+ if (cfg1 & GX_EEMAP_INIT1_ILOS)
+ ctrl |= GX_CTRL_INVERT_LOS;
+
+ ctrl |= ((gpio >> GX_EEMAP_GPIO_DIR_SHIFT) & 0xf) <<
+ GX_CTRL_GPIO_DIR_SHIFT;
+
+ ctrl |= ((gpio >> GX_EEMAP_GPIO_SHIFT) & 0xf) <<
+ GX_CTRL_GPIO_SHIFT;
+ }
+#endif
+
+ /*
+ * Configure chip for correct operation.
+ */
+ ctrl |= GX_CTRL_DUPLEX;
+#if BYTE_ORDER == BIG_ENDIAN
+ ctrl |= GX_CTRL_BIGENDIAN;
+#endif
+ ctrl |= GX_CTRL_VLAN_ENABLE;
+
+ if (gx->gx_tbimode) {
+ /*
+ * It seems that TXCW must be initialized from the EEPROM
+ * manually.
+ *
+ * XXX
+ * should probably read the eeprom and re-insert the
+ * values here.
+ */
+#define TXCONFIG_WORD 0x000001A0
+ CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
+
+ /* turn on hardware autonegotiate */
+ GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
+ } else {
+ /*
+ * Auto-detect speed from PHY, instead of using direct
+ * indication. The SLU bit doesn't force the link, but
+ * must be present for ASDE to work.
+ */
+ gx_phy_reset(gx);
+ ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
+ }
+
+ /*
+ * Take chip out of reset and start it running.
+ */
+ CSR_WRITE_4(gx, GX_CTRL, ctrl);
+
+ /* Turn interrupts on. */
+ CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ /*
+ * Set the current media.
+ */
+ if (gx->gx_mii.mii_ifp != NULL) {
+ mii_mediachg(&gx->gx_mii);
+ } else {
+ struct ifmedia *ifm = &gx->gx_media;
+ int tmp = ifm->ifm_media;
+ ifm->ifm_media = ifm->ifm_cur->ifm_media;
+ gx_ifmedia_upd(ifp);
+ ifm->ifm_media = tmp;
+ }
+
+ /*
+ * XXX
+ * Have the LINK0 flag force the link in TBI mode.
+ */
+ if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
+ GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
+ GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
+ }
+
+#if 0
+printf("66mhz: %s 64bit: %s\n",
+ CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
+ CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
+#endif
+
+ GX_UNLOCK(gx);
+ splx(s);
+}
+
+/*
+ * Stop all chip I/O so that the kernel's probe routines don't
+ * get confused by errant DMAs when rebooting.
+ */
+void
+gx_shutdown(void *xsc)
+{
+ struct gx_softc *gx = (struct gx_softc *)xsc;
+
+ gx_reset(gx);
+ gx_stop(gx);
+}
+
+#ifdef notyet
+int
+gx_detach(void *xsc)
+{
+ struct gx_softc *gx = (struct gx_softc *)xsc;
+ struct ifnet *ifp;
+ int s;
+
+ s = splimp();
+
+ ifp = &gx->arpcom.ac_if;
+ GX_LOCK(gx);
+
+ ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
+ gx_reset(gx);
+ gx_stop(gx);
+ ifmedia_removeall(&gx->gx_media);
+ gx_release(gx);
+
+ contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF);
+
+ GX_UNLOCK(gx);
+ mtx_destroy(&gx->gx_mtx);
+ splx(s);
+
+ return (0);
+}
+#endif
+
+void
+gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
+{
+ u_int16_t word = 0;
+ u_int32_t base, reg;
+ int x;
+
+ addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
+ (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
+
+ base = CSR_READ_4(gx, GX_EEPROM_CTRL);
+ base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
+ base |= GX_EE_SELECT;
+
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
+
+ for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
+ reg = base | (addr & x ? GX_EE_DATA_IN : 0);
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
+ DELAY(10);
+ }
+
+ for (x = 1 << 15; x; x >>= 1) {
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
+ DELAY(10);
+ reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
+ if (reg & GX_EE_DATA_OUT)
+ word |= x;
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
+ DELAY(10);
+ }
+
+ CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
+ DELAY(10);
+
+ *dest = word;
+}
+
+int
+gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
+{
+ u_int16_t *word;
+ int i;
+
+ word = (u_int16_t *)dest;
+ for (i = 0; i < cnt; i ++) {
+ gx_eeprom_getword(gx, off + i, word);
+ word++;
+ }
+ return (0);
+}
+
+/*
+ * Set media options.
+ */
+int
+gx_ifmedia_upd(struct ifnet *ifp)
+{
+ struct gx_softc *gx;
+ struct ifmedia *ifm;
+ struct mii_data *mii;
+
+ gx = ifp->if_softc;
+
+ if (gx->gx_tbimode) {
+ ifm = &gx->gx_media;
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+ switch (IFM_SUBTYPE(ifm->ifm_media)) {
+ case IFM_AUTO:
+ GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
+ GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
+ GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
+ break;
+ case IFM_1000_SX:
+ printf("%s: manual config not supported yet.\n",
+ gx->gx_dev.dv_xname);
+#if 0
+ GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
+ config = /* bit symbols for 802.3z */0;
+ ctrl |= GX_CTRL_SET_LINK_UP;
+ if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
+ ctrl |= GX_CTRL_DUPLEX;
+#endif
+ break;
+ default:
+ return (EINVAL);
+ }
+ } else {
+ ifm = &gx->gx_media;
+
+ /*
+ * 1000TX half duplex does not work.
+ */
+ if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
+ IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_TX &&
+ (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
+ return (EINVAL);
+ mii = &gx->gx_mii;
+ mii_mediachg(mii);
+ }
+ return (0);
+}
+
+/*
+ * Report current media status.
+ */
+void
+gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct gx_softc *gx;
+ struct mii_data *mii;
+ u_int32_t status;
+
+ gx = ifp->if_softc;
+
+ if (gx->gx_tbimode) {
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ status = CSR_READ_4(gx, GX_STATUS);
+ if ((status & GX_STAT_LINKUP) == 0)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
+ } else {
+ mii = &gx->gx_mii;
+ mii_pollstat(mii);
+ if ((mii->mii_media_active & (IFM_1000_TX | IFM_HDX)) ==
+ (IFM_1000_TX | IFM_HDX))
+ mii->mii_media_active = IFM_ETHER | IFM_NONE;
+ ifmr->ifm_active = mii->mii_media_active;
+ ifmr->ifm_status = mii->mii_media_status;
+ }
+}
+
+void
+gx_mii_shiftin(struct gx_softc *gx, int data, int length)
+{
+ u_int32_t reg, x;
+
+ /*
+ * Set up default GPIO direction + PHY data out.
+ */
+ reg = CSR_READ_4(gx, GX_CTRL);
+ reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
+ reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
+
+ /*
+ * Shift in data to PHY.
+ */
+ for (x = 1 << (length - 1); x; x >>= 1) {
+ if (data & x)
+ reg |= GX_CTRL_PHY_IO;
+ else
+ reg &= ~GX_CTRL_PHY_IO;
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+ DELAY(10);
+ }
+}
+
+u_int16_t
+gx_mii_shiftout(struct gx_softc *gx)
+{
+ u_int32_t reg;
+ u_int16_t data;
+ int x;
+
+ /*
+ * Set up default GPIO direction + PHY data in.
+ */
+ reg = CSR_READ_4(gx, GX_CTRL);
+ reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
+ reg |= GX_CTRL_GPIO_DIR;
+
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+ DELAY(10);
+ /*
+ * Shift out data from PHY.
+ */
+ data = 0;
+ for (x = 1 << 15; x; x >>= 1) {
+ CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
+ DELAY(10);
+ if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
+ data |= x;
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+ DELAY(10);
+ }
+ CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+ DELAY(10);
+
+ return (data);
+}
+
+int
+gx_miibus_livengood_readreg(struct device *dev, int phy, int reg)
+{
+ struct gx_softc *gx = (struct gx_softc *)dev;
+
+ if (gx->gx_tbimode)
+ return (0);
+
+ gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
+ gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
+ (phy << 5) | reg, GX_PHY_READ_LEN);
+ return (gx_mii_shiftout(gx));
+}
+
+void
+gx_miibus_livengood_writereg(struct device *dev, int phy, int reg, int value)
+{
+ struct gx_softc *gx = (struct gx_softc *)dev;
+
+ if (gx->gx_tbimode)
+ return;
+
+ gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
+ gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
+ (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
+ (value & 0xffff), GX_PHY_WRITE_LEN);
+}
+
+/*
+ * gx_miibus_cordova_readreg: [mii interface function]
+ *
+ * Read a PHY register on the GMII.
+ */
+int
+gx_miibus_cordova_readreg(struct device *self, int phy, int reg)
+{
+ struct gx_softc *sc = (void *) self;
+ uint32_t mdic;
+ int i, rv;
+
+ CSR_WRITE_4(sc, GX_MDIC, GX_MDIC_OP_READ | GX_MDIC_PHYADD(phy) |
+ GX_MDIC_REGADD(reg));
+
+ for (i = 0; i < 100; i++) {
+ mdic = CSR_READ_4(sc, GX_MDIC);
+ if (mdic & GX_MDIC_READY)
+ break;
+ delay(10);
+ }
+
+ if ((mdic & GX_MDIC_READY) == 0) {
+ printf("%s: GX_MDIC read timed out: phy %d reg %d\n",
+ sc->gx_dev.dv_xname, phy, reg);
+ rv = 0;
+ } else if (mdic & GX_MDIC_E) {
+ /* This is normal if no PHY is present. */
+ DPRINTFN(2, ("%s: GX_MDIC read error: phy %d reg %d\n",
+ sc->gx_dev.dv_xname, phy, reg));
+ rv = 0;
+ } else {
+ rv = GX_MDIC_DATA(mdic);
+ if (rv == 0xffff)
+ rv = 0;
+ }
+
+ return (rv);
+}
+
+/*
+ * gx_miibus_cordova_writereg: [mii interface function]
+ *
+ * Write a PHY register on the GMII.
+ */
+void
+gx_miibus_cordova_writereg(struct device *self, int phy, int reg, int val)
+{
+ struct gx_softc *sc = (void *) self;
+ uint32_t mdic;
+ int i;
+
+ CSR_WRITE_4(sc, GX_MDIC, GX_MDIC_OP_WRITE | GX_MDIC_PHYADD(phy) |
+ GX_MDIC_REGADD(reg) | GX_MDIC_DATA(val));
+
+ for (i = 0; i < 100; i++) {
+ mdic = CSR_READ_4(sc, GX_MDIC);
+ if (mdic & GX_MDIC_READY)
+ break;
+ delay(10);
+ }
+
+ if ((mdic & GX_MDIC_READY) == 0)
+ printf("%s: GX_MDIC write timed out: phy %d reg %d\n",
+ sc->gx_dev.dv_xname, phy, reg);
+ else if (mdic & GX_MDIC_E)
+ printf("%s: GX_MDIC write error: phy %d reg %d\n",
+ sc->gx_dev.dv_xname, phy, reg);
+}
+
+void
+gx_miibus_statchg(struct device *dev)
+{
+ struct gx_softc *gx = (struct gx_softc *)dev;
+ struct mii_data *mii;
+ int reg, s;
+
+ if (gx->gx_tbimode)
+ return;
+
+ /*
+ * Set flow control behavior to mirror what PHY negotiated.
+ */
+ mii = &gx->gx_mii;
+
+ s = splimp();
+ GX_LOCK(gx);
+
+ reg = CSR_READ_4(gx, GX_CTRL);
+ if (mii->mii_media_active & IFM_FLAG0)
+ reg |= GX_CTRL_RX_FLOWCTRL;
+ else
+ reg &= ~GX_CTRL_RX_FLOWCTRL;
+ if (mii->mii_media_active & IFM_FLAG1)
+ reg |= GX_CTRL_TX_FLOWCTRL;
+ else
+ reg &= ~GX_CTRL_TX_FLOWCTRL;
+ CSR_WRITE_4(gx, GX_CTRL, reg);
+
+ GX_UNLOCK(gx);
+ splx(s);
+}
+
+int
+gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct gx_softc *gx = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ int s, error = 0;
+ struct mii_data *mii;
+
+ s = splimp();
+ GX_LOCK(gx);
+
+ if ((error = ether_ioctl(ifp, &gx->arpcom, command, data)) > 0) {
+ splx(s);
+ return (error);
+ }
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ gx_init(gx);
+ arp_ifinit(&gx->arpcom, ifa);
+ break;
+#endif /* INET */
+ default:
+ gx_init(gx);
+ break;
+ }
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu > GX_MAX_MTU) {
+ error = EINVAL;
+ } else {
+ ifp->if_mtu = ifr->ifr_mtu;
+ gx_init(gx);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_flags & IFF_RUNNING &&
+ ((ifp->if_flags & IFF_PROMISC) !=
+ (gx->gx_if_flags & IFF_PROMISC))) {
+ if (ifp->if_flags & IFF_PROMISC)
+ GX_SETBIT(gx, GX_RX_CONTROL,
+ GX_RXC_UNI_PROMISC);
+ else
+ GX_CLRBIT(gx, GX_RX_CONTROL,
+ GX_RXC_UNI_PROMISC);
+ } else
+ gx_init(gx);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING) {
+ gx_stop(gx);
+ }
+ }
+
+ gx->gx_if_flags = ifp->if_flags;
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (ifp->if_flags & IFF_RUNNING)
+ gx_setmulti(gx);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ if (gx->gx_tbimode) {
+ error = ifmedia_ioctl(ifp, ifr, &gx->gx_media,
+ command);
+ } else {
+ mii = &gx->gx_mii;
+ error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
+ command);
+ }
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ GX_UNLOCK(gx);
+ splx(s);
+ return (error);
+}
+
+void
+gx_phy_reset(struct gx_softc *gx)
+{
+ int reg;
+
+ GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
+
+ if (gx->gx_vflags & GXF_CORDOVA) {
+ /* post-livingood (cordova) only */
+ GX_SETBIT(gx, GX_CTRL, GX_CTRL_PHY_RESET);
+ DELAY(1000);
+ GX_CLRBIT(gx, GX_CTRL, GX_CTRL_PHY_RESET);
+ } else {
+ /*
+ * PHY reset is active low.
+ */
+ reg = CSR_READ_4(gx, GX_CTRL_EXT);
+ reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
+ reg |= GX_CTRLX_GPIO_DIR;
+
+ CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
+ DELAY(10);
+ CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
+ DELAY(10);
+ }
+}
+
+void
+gx_reset(struct gx_softc *gx)
+{
+
+ /* Disable host interrupts. */
+ CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
+
+ /* reset chip (THWAP!) */
+ GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
+ DELAY(10);
+}
+
+void
+gx_stop(struct gx_softc *gx)
+{
+ struct ifnet *ifp;
+
+ ifp = &gx->arpcom.ac_if;
+
+ /* reset and flush transmitter */
+ CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
+
+ /* reset and flush receiver */
+ CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
+
+ /* reset link */
+ if (gx->gx_tbimode)
+ GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
+
+#if 1
+ /* Free the RX lists. */
+ gx_free_rx_ring(gx);
+
+ /* Free TX buffers. */
+ gx_free_tx_ring(gx);
+#endif
+
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+}
+
+void
+gx_watchdog(struct ifnet *ifp)
+{
+ struct gx_softc *gx;
+
+ gx = ifp->if_softc;
+
+ printf("%s: watchdog timeout -- resetting\n", gx->gx_dev.dv_xname);
+ gx_reset(gx);
+ gx_init(gx);
+
+ ifp->if_oerrors++;
+}
+
+/*
+ * Intialize a receive ring descriptor.
+ */
+int
+gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
+{
+ struct mbuf *m_new = NULL;
+ struct gx_rx_desc *r;
+ bus_dmamap_t rxmap = gx->gx_cdata.gx_rx_map[idx];
+
+
+ if (m == NULL) {
+ MGETHDR(m_new, M_DONTWAIT, MT_DATA);
+ if (m_new == NULL) {
+ printf("%s: mbuf alloc failed -- packet dropped\n",
+ gx->gx_dev.dv_xname);
+ return (ENOBUFS);
+ }
+ MCLGET(m_new, M_DONTWAIT);
+ if ((m_new->m_flags & M_EXT) == 0) {
+ printf("%s: cluster alloc failed -- packet dropped\n",
+ gx->gx_dev.dv_xname);
+ m_freem(m_new);
+ return (ENOBUFS);
+ }
+ m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
+ } else {
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_next = NULL;
+ m_new = m;
+ }
+
+ if (bus_dmamap_load_mbuf(gx->gx_dmatag, rxmap, m_new, BUS_DMA_NOWAIT))
+ return(ENOBUFS);
+
+ /*
+ * XXX
+ * this will _NOT_ work for large MTU's; it will overwrite
+ * the end of the buffer. E.g.: take this out for jumbograms,
+ * but then that breaks alignment.
+ */
+ if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
+ m_adj(m_new, ETHER_ALIGN);
+
+ gx->gx_cdata.gx_rx_chain[idx] = m_new;
+ r = &gx->gx_rdata->gx_rx_ring[idx];
+ r->rx_addr = rxmap->dm_segs[0].ds_addr;
+ if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
+ r->rx_addr += ETHER_ALIGN;
+ r->rx_staterr = 0;
+
+ return (0);
+}
+
+/*
+ * The receive ring can have up to 64K descriptors, which at 2K per mbuf
+ * cluster, could add up to 128M of memory. Due to alignment constraints,
+ * the number of descriptors must be a multiple of 8. For now, we
+ * allocate 256 entries and hope that our CPU is fast enough to keep up
+ * with the NIC.
+ */
+int
+gx_init_rx_ring(struct gx_softc *gx)
+{
+ int i, error;
+
+ for (i = 0; i < GX_RX_RING_CNT; i++) {
+ error = gx_newbuf(gx, i, NULL);
+ if (error)
+ return (error);
+ }
+
+ /* bring receiver out of reset state, leave disabled */
+ CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
+
+ /* set up ring registers */
+ CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
+ (u_quad_t)(gx->gx_ring_map->dm_segs[0].ds_addr +
+ offsetof(struct gx_ring_data, gx_rx_ring)));
+
+ CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
+ GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
+ CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
+ CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
+ gx->gx_rx_tail_idx = 0;
+
+ return (0);
+}
+
+void
+gx_free_rx_ring(struct gx_softc *gx)
+{
+ int i;
+ for (i = 0; i < GX_RX_RING_CNT; i++) {
+ if (gx->gx_cdata.gx_rx_chain[i] != NULL) {
+ bus_dmamap_unload(gx->gx_dmatag,
+ gx->gx_cdata.gx_rx_map[i]);
+ m_freem(gx->gx_cdata.gx_rx_chain[i]);
+ gx->gx_cdata.gx_rx_chain[i] = NULL;
+ }
+ }
+
+ bzero((void *)gx->gx_rdata->gx_rx_ring,
+ GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
+
+ /* release any partially-received packet chain */
+ if (gx->gx_pkthdr != NULL) {
+ m_freem(gx->gx_pkthdr);
+ gx->gx_pkthdr = NULL;
+ }
+}
+
+int
+gx_init_tx_ring(struct gx_softc *gx)
+{
+ /* bring transmitter out of reset state, leave disabled */
+ CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
+
+ /* set up ring registers */
+ CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
+ (u_quad_t)(gx->gx_ring_map->dm_segs[0].ds_addr +
+ offsetof(struct gx_ring_data, gx_tx_ring)));
+ CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
+ GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
+ CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
+ CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
+ gx->gx_tx_head_idx = 0;
+ gx->gx_tx_tail_idx = 0;
+ gx->gx_txcnt = 0;
+
+ /* set up initial TX context */
+ gx->gx_txcontext = GX_TXCONTEXT_NONE;
+
+ return (0);
+}
+
+void
+gx_free_tx_ring(struct gx_softc *gx)
+{
+ int i;
+
+ for (i = 0; i < GX_TX_RING_CNT; i++) {
+ if (gx->gx_cdata.gx_tx_chain[i] != NULL) {
+ bus_dmamap_unload(gx->gx_dmatag,
+ gx->gx_cdata.gx_tx_map[i]);
+ m_freem(gx->gx_cdata.gx_tx_chain[i]);
+ gx->gx_cdata.gx_tx_chain[i] = NULL;
+ }
+ }
+
+ bzero((void *)gx->gx_rdata->gx_tx_ring,
+ GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
+}
+
+void
+gx_setmulti(struct gx_softc *gx)
+{
+ int i;
+
+ if (gx->gx_vflags & GXF_CORDOVA) {
+ /* wipe out the multicast table */
+ for (i = 1; i < 128; i++)
+ CSR_WRITE_4(gx, GX_CORDOVA_MULTICAST_BASE + i * 4, 0);
+ } else {
+ /* wipe out the multicast table */
+ for (i = 1; i < 128; i++)
+ CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
+ }
+}
+
+void
+gx_rxeof(struct gx_softc *gx)
+{
+ struct gx_rx_desc *rx;
+ struct ifnet *ifp;
+ int idx, staterr, len;
+ struct mbuf *m;
+
+ gx->gx_rx_interrupts++;
+
+ ifp = &gx->arpcom.ac_if;
+ idx = gx->gx_rx_tail_idx;
+
+ while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr &
+ GX_RXSTAT_COMPLETED) {
+
+ rx = &gx->gx_rdata->gx_rx_ring[idx];
+ m = gx->gx_cdata.gx_rx_chain[idx];
+ /*
+ * gx_newbuf overwrites status and length bits, so we
+ * make a copy of them here.
+ */
+ len = rx->rx_len;
+ staterr = rx->rx_staterr;
+
+ if (staterr & GX_INPUT_ERROR)
+ goto ierror;
+
+ if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
+ goto ierror;
+
+ GX_INC(idx, GX_RX_RING_CNT);
+
+ if (staterr & GX_RXSTAT_INEXACT_MATCH) {
+ /*
+ * multicast packet, must verify against
+ * multicast address.
+ */
+ }
+
+ if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
+ if (gx->gx_pkthdr == NULL) {
+ m->m_len = len;
+ m->m_pkthdr.len = len;
+ gx->gx_pkthdr = m;
+ gx->gx_pktnextp = &m->m_next;
+ } else {
+ m->m_len = len;
+ m->m_flags &= ~M_PKTHDR;
+ gx->gx_pkthdr->m_pkthdr.len += len;
+ *(gx->gx_pktnextp) = m;
+ gx->gx_pktnextp = &m->m_next;
+ }
+ continue;
+ }
+
+ if (gx->gx_pkthdr == NULL) {
+ m->m_len = len;
+ m->m_pkthdr.len = len;
+ } else {
+ m->m_len = len;
+ m->m_flags &= ~M_PKTHDR;
+ gx->gx_pkthdr->m_pkthdr.len += len;
+ *(gx->gx_pktnextp) = m;
+ m = gx->gx_pkthdr;
+ gx->gx_pkthdr = NULL;
+ }
+
+ ifp->if_ipackets++;
+ m->m_pkthdr.rcvif = ifp;
+
+#ifdef notyet
+#define IP_CSMASK (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
+#define TCP_CSMASK \
+ (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
+ if (ifp->if_capenable & IFCAP_RXCSUM) {
+#if 0
+ /*
+ * Intel Erratum #23 indicates that the Receive IP
+ * Checksum offload feature has been completely
+ * disabled.
+ */
+ if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ if ((staterr & GX_RXERR_IP_CSUM) == 0)
+ m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+ }
+#endif
+ if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
+ m->m_pkthdr.csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ }
+ }
+
+#if NVLAN > 0
+ /*
+ * If we received a packet with a vlan tag, pass it
+ * to vlan_input() instead of ether_input().
+ */
+ if (staterr & GX_RXSTAT_VLAN_PKT) {
+ VLAN_INPUT_TAG(eh, m, rx->rx_special);
+ continue;
+ }
+#endif
+#endif
+
+#if NBPFILTER > 0
+ /*
+ * Handle BPF listeners. Let the BPF user see the packet.
+ */
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+
+ ether_input_mbuf(ifp, m);
+ continue;
+
+ ierror:
+ ifp->if_ierrors++;
+ gx_newbuf(gx, idx, m);
+
+ /*
+ * XXX
+ * this isn't quite right. Suppose we have a packet that
+ * spans 5 descriptors (9K split into 2K buffers). If
+ * the 3rd descriptor sets an error, we need to ignore
+ * the last two. The way things stand now, the last two
+ * will be accepted as a single packet.
+ *
+ * we don't worry about this -- the chip may not set an
+ * error in this case, and the checksum of the upper layers
+ * will catch the error.
+ */
+ if (gx->gx_pkthdr != NULL) {
+ m_freem(gx->gx_pkthdr);
+ gx->gx_pkthdr = NULL;
+ }
+ GX_INC(idx, GX_RX_RING_CNT);
+ }
+
+ gx->gx_rx_tail_idx = idx;
+ if (--idx < 0)
+ idx = GX_RX_RING_CNT - 1;
+ CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
+}
+
+void
+gx_txeof(struct gx_softc *gx)
+{
+ struct ifnet *ifp;
+ int idx, cnt;
+
+ gx->gx_tx_interrupts++;
+
+ ifp = &gx->arpcom.ac_if;
+ idx = gx->gx_tx_head_idx;
+ cnt = gx->gx_txcnt;
+
+ /*
+ * If the system chipset performs I/O write buffering, it is
+ * possible for the PIO read of the head descriptor to bypass the
+ * memory write of the descriptor, resulting in reading a descriptor
+ * which has not been updated yet.
+ */
+ while (cnt) {
+ struct gx_tx_desc_old *tx;
+
+ tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
+ cnt--;
+
+ if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
+ GX_INC(idx, GX_TX_RING_CNT);
+ continue;
+ }
+
+ if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
+ break;
+
+ ifp->if_opackets++;
+
+ m_freem(gx->gx_cdata.gx_tx_chain[idx]);
+ gx->gx_cdata.gx_tx_chain[idx] = NULL;
+ bus_dmamap_unload(gx->gx_dmatag, gx->gx_cdata.gx_tx_map[idx]);
+
+ gx->gx_txcnt = cnt;
+ ifp->if_timer = 0;
+
+ GX_INC(idx, GX_TX_RING_CNT);
+ gx->gx_tx_head_idx = idx;
+ }
+
+ if (gx->gx_txcnt == 0)
+ ifp->if_flags &= ~IFF_OACTIVE;
+}
+
+int
+gx_intr(void *xsc)
+{
+ struct gx_softc *gx;
+ struct ifnet *ifp;
+ u_int32_t intr;
+ int s;
+
+ gx = xsc;
+ ifp = &gx->arpcom.ac_if;
+
+ s = splimp();
+
+ gx->gx_interrupts++;
+
+ /* Disable host interrupts. */
+ CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
+
+ /*
+ * find out why we're being bothered.
+ * reading this register automatically clears all bits.
+ */
+ intr = CSR_READ_4(gx, GX_INT_READ);
+
+ if (intr) {
+ DPRINTFN(8, ("%s: gx_intr. intr = 0x%x\n",
+ gx->gx_dev.dv_xname, intr));
+ }
+
+ /* Check RX return ring producer/consumer */
+ if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
+ gx_rxeof(gx);
+
+ /* Check TX ring producer/consumer */
+ if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
+ gx_txeof(gx);
+
+ /*
+ * handle other interrupts here.
+ */
+
+ /*
+ * Link change interrupts are not reliable; the interrupt may
+ * not be generated if the link is lost. However, the register
+ * read is reliable, so check that. Use SEQ errors to possibly
+ * indicate that the link has changed.
+ */
+#ifdef GX_DEBUG
+ if (gxdebug >= 1) {
+ if (intr & GX_INT_LINK_CHANGE) {
+ int status = CSR_READ_4(gx, GX_STATUS);
+ printf("%s: link %s\n", gx->gx_dev.dv_xname,
+ (status & GX_STAT_LINKUP) ? "up" :
+ "down");
+ }
+ }
+#endif
+
+ /* Turn interrupts on. */
+ CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
+
+ if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
+ gx_start(ifp);
+
+ splx(s);
+
+ return (1);
+}
+
+/*
+ * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
+ * pointers to descriptors.
+ */
+int
+gx_encap(struct gx_softc *gx, struct mbuf *m_head)
+{
+ struct gx_tx_desc_data *tx = NULL;
+#ifdef notyet
+ struct gx_tx_desc_ctx *tctx;
+#endif
+ struct mbuf *m;
+ bus_dmamap_t txmap;
+ int i = 0;
+ int idx, cnt, /*csumopts, */ txcontext;
+
+#if NVLAN > 0
+ struct ifvlan *ifv = NULL;
+
+ if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
+ m_head->m_pkthdr.rcvif != NULL &&
+ m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
+ ifv = m_head->m_pkthdr.rcvif->if_softc;
+#endif
+
+ cnt = gx->gx_txcnt;
+ idx = gx->gx_tx_tail_idx;
+ txcontext = gx->gx_txcontext;
+
+ /*
+ * Insure we have at least 4 descriptors pre-allocated.
+ */
+ if (cnt >= GX_TX_RING_CNT - 4)
+ return (ENOBUFS);
+
+#ifdef notyet
+ /*
+ * Set up the appropriate offload context if necessary.
+ */
+ csumopts = 0;
+ if (m_head->m_pkthdr.csum_flags) {
+ if (m_head->m_pkthdr.csum_flags & CSUM_IP)
+ csumopts |= GX_TXTCP_OPT_IP_CSUM;
+ if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
+ csumopts |= GX_TXTCP_OPT_TCP_CSUM;
+ txcontext = GX_TXCONTEXT_TCPIP;
+ } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
+ csumopts |= GX_TXTCP_OPT_TCP_CSUM;
+ txcontext = GX_TXCONTEXT_UDPIP;
+ } else if (txcontext == GX_TXCONTEXT_NONE)
+ txcontext = GX_TXCONTEXT_TCPIP;
+ if (txcontext == gx->gx_txcontext)
+ goto context_done;
+
+ tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
+ tctx->tx_ip_csum_start = ETHER_HDR_LEN;
+ tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
+ tctx->tx_ip_csum_offset =
+ ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
+ tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
+ tctx->tx_tcp_csum_end = 0;
+ if (txcontext == GX_TXCONTEXT_TCPIP)
+ tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
+ sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
+ else
+ tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
+ sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
+ tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
+ tctx->tx_type = 0;
+ tctx->tx_status = 0;
+ GX_INC(idx, GX_TX_RING_CNT);
+ cnt++;
+ }
+context_done:
+#endif
+
+ /*
+ * Start packing the mbufs in this chain into the transmit
+ * descriptors. Stop when we run out of descriptors or hit
+ * the end of the mbuf chain.
+ */
+ txmap = gx->gx_cdata.gx_tx_map[idx];
+ if (bus_dmamap_load_mbuf(gx->gx_dmatag, txmap, m_head, BUS_DMA_NOWAIT))
+ return(ENOBUFS);
+
+ for (m = m_head; m != NULL; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+
+ if (cnt == GX_TX_RING_CNT) {
+ printf("%s: overflow(2): %d, %d\n", cnt,
+ GX_TX_RING_CNT, gx->gx_dev.dv_xname);
+ return (ENOBUFS);
+ }
+
+ tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
+ tx->tx_addr = txmap->dm_segs[i++].ds_addr;
+ tx->tx_status = 0;
+ tx->tx_len = m->m_len;
+#ifdef notyet
+ if (gx->arpcom.ac_if.if_hwassist) {
+ tx->tx_type = 1;
+ tx->tx_command = GX_TXTCP_EXTENSION;
+ tx->tx_options = csumopts;
+ } else {
+#endif
+ /*
+ * This is really a struct gx_tx_desc_old.
+ */
+ tx->tx_command = 0;
+#ifdef notyet
+ }
+#endif
+ GX_INC(idx, GX_TX_RING_CNT);
+ cnt++;
+ }
+
+ if (tx != NULL) {
+ tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
+ GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
+#if NVLAN > 0
+ if (ifv != NULL) {
+ tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
+ tx->tx_vlan = ifv->ifv_tag;
+ }
+#endif
+ gx->gx_txcnt = cnt;
+ gx->gx_tx_tail_idx = idx;
+ gx->gx_txcontext = txcontext;
+ idx = GX_PREV(idx, GX_TX_RING_CNT);
+ gx->gx_cdata.gx_tx_chain[idx] = m_head;
+
+ CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
+ }
+
+ return (0);
+}
+
+/*
+ * Main transmit routine. To avoid having to do mbuf copies, we put pointers
+ * to the mbuf data regions directly in the transmit descriptors.
+ */
+void
+gx_start(struct ifnet *ifp)
+{
+ struct gx_softc *gx;
+ struct mbuf *m_head;
+ int s;
+
+ s = splimp();
+
+ gx = ifp->if_softc;
+
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m_head);
+ if (m_head == NULL)
+ break;
+
+ /*
+ * Pack the data into the transmit ring. If we
+ * don't have room, set the OACTIVE flag and wait
+ * for the NIC to drain the ring.
+ */
+ if (gx_encap(gx, m_head) != 0) {
+ IF_PREPEND(&ifp->if_snd, m_head);
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
+
+#if NBPFILTER > 0
+ /*
+ * If there's a BPF listener, bounce a copy of this frame
+ * to him.
+ */
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m_head);
+#endif
+
+ /*
+ * Set a timeout in case the chip goes out to lunch.
+ */
+ ifp->if_timer = 5;
+ }
+
+ splx(s);
+}
+
+struct cfattach gx_ca = {
+ sizeof(struct gx_softc), gx_probe, gx_attach
+};
+
+struct cfdriver gx_cd = {
+ 0, "gx", DV_IFNET
+};
diff --git a/sys/dev/pci/if_gxreg.h b/sys/dev/pci/if_gxreg.h
new file mode 100644
index 00000000000..8f238acda04
--- /dev/null
+++ b/sys/dev/pci/if_gxreg.h
@@ -0,0 +1,568 @@
+/* $OpenBSD: if_gxreg.h,v 1.1 2002/04/02 13:03:31 nate Exp $ */
+/*-
+ * Copyright (c) 1999,2000,2001 Jonathan Lemon
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef PCIM_CMD_MWIEN
+#define PCIM_CMD_MWIEN 0x0010
+#endif
+
+/*
+ * chip register offsets. These are memory mapped registers
+ * which can be accessed with the CSR_READ_4()/CSR_WRITE_4() macros.
+ * Each register must be accessed using 32 bit operations.
+ */
+
+#define GX_CTRL 0x0000 /* control register */
+#define GX_STATUS 0x0008 /* status register */
+#define GX_EEPROM_CTRL 0x0010 /* EEPROM/Flash control/data */
+#define GX_CTRL_EXT 0x0018 /* extended device control */
+#define GX_MDIC 0x0020 /* MDI control */
+#define GX_FLOW_CTRL_BASE 0x0028 /* flow control address low/high */
+#define GX_FLOW_CTRL_TYPE 0x0030 /* flow control type */
+#define GX_VET 0x0038 /* VLAN ethertype */
+#define GX_RX_ADDR_BASE 0x0040 /* 16 pairs of receive address low/high */
+
+#define GX_INT_READ 0x00C0 /* read interrupts */
+#define GX_INT_FORCE 0x00C8 /* force an interrupt */
+#define GX_INT_MASK_SET 0x00D0 /* interrupt mask set/read */
+#define GX_INT_MASK_CLR 0x00D8 /* interrupt mask clear */
+
+#define GX_RX_CONTROL 0x0100 /* RX control */
+
+/* 82542 and older 82543 chips */
+#define GX_RX_OLD_INTR_DELAY 0x0108 /* RX delay timer */
+#define GX_RX_OLD_RING_BASE 0x0110 /* RX descriptor base address */
+#define GX_RX_OLD_RING_LEN 0x0118 /* RX descriptor length */
+#define GX_RX_OLD_RING_HEAD 0x0120 /* RX descriptor head */
+#define GX_RX_OLD_RING_TAIL 0x0128 /* RX descriptor tail */
+
+/* 82542 and older 82543 chips */
+#define GX_OLD_FCRTH 0x0160 /* flow control rcv threshhold high */
+#define GX_OLD_FCRTL 0x0168 /* flow control rcv threshhold low */
+
+#define GX_FCTTV 0x0170 /* flow control xmit timer value */
+#define GX_TX_CONFIG 0x0178 /* xmit configuration (tbi mode) */
+#define GX_RX_CONFIG 0x0180 /* recv configuration word */
+
+#define GX_MULTICAST_BASE 0x0200 /* multicast table array base */
+
+#define GX_TX_CONTROL 0x0400 /* TX control */
+#define GX_TX_IPG 0x0410 /* TX interpacket gap */
+
+/* 82542 and older 82543 chips */
+#define GX_TX_OLD_RING_BASE 0x0420 /* TX descriptor base address */
+#define GX_TX_OLD_RING_LEN 0x0428 /* TX descriptor length */
+#define GX_TX_OLD_RING_HEAD 0x0430 /* TX descriptor head */
+#define GX_TX_OLD_RING_TAIL 0x0438 /* TX descriptor tail */
+#define GX_TX_OLD_INTR_DELAY 0x0440 /* TX interrupt delay value */
+
+#define GX_TBT 0x0448 /* TX burst timer */
+#define GX_AIT 0x0458 /* adaptive IFS throttle */
+
+#define GX_VFTA_BASE 0x0600 /* VLAN filter table array base */
+
+#define GX_PKT_BUFFER_ALLOC 0x1000 /* Packet buffer allocation */
+#define GX_ERT 0x2000 /* Early receive threshold */
+#define GX_RX_OLD_DMA_CTRL 0x2028 /* RX descriptor control */
+
+/* newer 82543 chips */
+#define GX_FCRTH 0x2160 /* flow control rcv threshhold high */
+#define GX_FCRTL 0x2168 /* flow control rcv threshhold low */
+
+/* newer 82543 chips */
+#define GX_RX_RING_BASE 0x2800 /* RX descriptor base address */
+#define GX_RX_RING_LEN 0x2808 /* RX descriptor length */
+#define GX_RX_RING_HEAD 0x2810 /* RX descriptor head */
+#define GX_RX_RING_TAIL 0x2818 /* RX descriptor tail */
+#define GX_RX_INTR_DELAY 0x2820 /* RX delay timer */
+#define GX_RX_DMA_CTRL 0x2828 /* RX descriptor control */
+
+#define GX_EARLY_TX_THRESH 0x3000 /* early transmit threshold */
+#define GX_TX_OLD_DMA_CTRL 0x3028 /* TX descriptor control */
+
+/* newer 82543 chips */
+#define GX_TX_RING_BASE 0x3800 /* TX descriptor base address */
+#define GX_TX_RING_LEN 0x3808 /* TX descriptor length */
+#define GX_TX_RING_HEAD 0x3810 /* TX descriptor head */
+#define GX_TX_RING_TAIL 0x3818 /* TX descriptor tail */
+#define GX_TX_INTR_DELAY 0x3820 /* TX interrupt delay value */
+#define GX_TX_DMA_CTRL 0x3828 /* TX descriptor control */
+
+#define GX_CRCERRS 0x4000 /* CRC error count */
+#define GX_ALGNERRC 0x4004 /* alignment error count */
+#define GX_SYMERRS 0x4008 /* symbol error count */
+#define GX_RXERRC 0x400C /* RX error count */
+#define GX_MPC 0x4010 /* missed packets count */
+#define GX_SCC 0x4014 /* single collision count */
+#define GX_ECOL 0x4018 /* excessive collision count */
+#define GX_MCC 0x401C /* multiple collision count */
+#define GX_LATECOL 0x4020 /* late collision count */
+#define GX_COLC 0x4020 /* collision count */
+#define GX_TUC 0x402C /* transmit underrun count */
+#define GX_DC 0x4030 /* defer count */
+#define GX_TNCRS 0x4034 /* transmit - no CRS */
+#define GX_SEC 0x4038 /* sequence error count */
+#define GX_CEXTERR 0x403C /* carrier extension error count */
+#define GX_RLEC 0x4040 /* receive length error count */
+#define GX_RDMAUC 0x4044 /* receive DMA underrun count */
+#define GX_XONRXC 0x4048 /* XON received count */
+#define GX_XONTXC 0x404C /* XON transmitted count */
+#define GX_XOFFRXC 0x4050 /* XOFF received count */
+#define GX_XOFFTXC 0x4054 /* XOFF transmitted count */
+#define GX_FCRUC 0x4058 /* FC received unsupported count */
+#define GX_PRC64 0x405C /* packets rcvd (64 bytes) */
+#define GX_PRC127 0x4060 /* packets rcvd (65 - 127 bytes) */
+#define GX_PRC255 0x4064 /* packets rcvd (128 - 255 bytes) */
+#define GX_PRC511 0x4068 /* packets rcvd (256 - 511 bytes) */
+#define GX_PRC1023 0x406C /* packets rcvd (512 - 1023 bytes) */
+#define GX_PRC1522 0x4070 /* packets rcvd (1023 - 1522 bytes) */
+#define GX_GPRC 0x4074 /* good packets received */
+#define GX_BPRC 0x4078 /* broadcast packets received */
+#define GX_MPRC 0x407C /* multicast packets received */
+#define GX_GPTC 0x4080 /* good packets transmitted */
+#define GX_GORC 0x4088 /* good octets received (low/high) */
+#define GX_GOTC 0x4090 /* good octets transmitted (low/high) */
+#define GX_RNBC 0x40A0 /* receive no buffers count */
+#define GX_RUC 0x40A4 /* receive undersize count */
+#define GX_RFC 0x40A8 /* receive fragment count */
+#define GX_ROC 0x40AC /* receive oversize count */
+#define GX_RJC 0x40B0 /* receive jabber count */
+#define GX_TOR 0x40C0 /* total octets received (low/high) */
+#define GX_TOT 0x40C8 /* total octets transmitted (low/high) */
+#define GX_TPR 0x40D0 /* total packets received */
+#define GX_TPT 0x40D4 /* total packets transmitted */
+#define GX_PTC64 0x40D8 /* packets transmitted (64 B) */
+#define GX_PTC127 0x40DC /* packets xmitted (65 - 127 B) */
+#define GX_PTC255 0x40E0 /* packets xmitted (128 - 255 B) */
+#define GX_PTC511 0x40E4 /* packets xmitted (256 - 511 B) */
+#define GX_PTC1023 0x40E8 /* packets xmitted (512 - 1023 B) */
+#define GX_PTC1522 0x40EC /* packets xmitted (1023 - 1522 B) */
+#define GX_MPTC 0x40F0 /* multicast packets transmitted */
+#define GX_BPTC 0x40F4 /* broadcast packets transmitted */
+#define GX_TSCTC 0x40F8 /* TCP segmentation context xmitted */
+#define GX_TSCTFC 0x40FC /* TCP segmentation context fail */
+
+#define GX_RX_CSUM_CONTROL 0x5000 /* receive checksum control */
+#define GX_CORDOVA_MULTICAST_BASE 0x5200
+#define GX_RX_CORDOVA_ADDR_BASE 0x5400 /* receive address low/high */
+
+#define GX_RDFH 0x8000 /* RX data fifo head */
+#define GX_RDFT 0x8008 /* RX data fifo tail */
+#define GX_TDFH 0x8010 /* TX data fifo head */
+#define GX_TDFT 0x8018 /* TX data fifo tail */
+
+#define GX_RBM_BASE 0x10000 /* packet buffer memory */
+
+/* GX_RX_CSUM_CONTROL */
+#define GX_CSUM_START_MASK 0x000ff
+#define GX_CSUM_IP 0x00100
+#define GX_CSUM_TCP 0x00200
+
+/* GX_CTRL register */
+#define GX_CTRL_DUPLEX 0x00000001 /* full duplex */
+#define GX_CTRL_BIGENDIAN 0x00000002 /* 1 == big endian */
+#define GX_CTRL_PCI_PRIORITY 0x00000004 /* 1 == fairness */
+#define GX_CTRL_LINK_RESET 0x00000008
+#define GX_CTRL_TEST_MODE 0x00000010
+#define GX_CTRL_AUTOSPEED 0x00000020
+#define GX_CTRL_SET_LINK_UP 0x00000040
+#define GX_CTRL_INVERT_LOS 0x00000080 /* invert loss of signal */
+#define GX_CTRL_SPEEDMASK 0x00000300 /* 2 bits */
+#define GX_CTRL_FORCESPEED 0x00000800 /* (Livengood) */
+#define GX_CTRL_FORCEDUPLEX 0x00001000 /* (Livengood) */
+#define GX_CTRL_GPIO_0 0x00040000 /* Software defined pin #0 */
+#define GX_CTRL_GPIO_1 0x00080000 /* Software defined pin #1 */
+#define GX_CTRL_GPIO_2 0x00100000 /* Software defined pin #2 */
+#define GX_CTRL_GPIO_3 0x00200000 /* Software defined pin #3 */
+#define GX_CTRL_GPIO_DIR_0 0x00400000 /* Pin is Input(0)/Output(1) */
+#define GX_CTRL_GPIO_DIR_1 0x00800000 /* Pin is Input(0)/Output(1) */
+#define GX_CTRL_GPIO_DIR_2 0x01000000 /* Pin is Input(0)/Output(1) */
+#define GX_CTRL_GPIO_DIR_3 0x02000000 /* Pin is Input(0)/Output(1) */
+#define GX_CTRL_DEVICE_RESET 0x04000000 /* Device Reset */
+#define GX_CTRL_RX_FLOWCTRL 0x08000000 /* RX flowcontrol enable */
+#define GX_CTRL_TX_FLOWCTRL 0x10000000 /* TX flowcontrol enable */
+#define GX_CTRL_VLAN_ENABLE 0x40000000 /* VLAN Mode Enable */
+#define GX_CTRL_PHY_RESET 0x80000000 /* PHY reset (Cordova) */
+
+/* GX_STATUS register */
+#define GX_STAT_DUPLEX 0x00000001
+#define GX_STAT_LINKUP 0x00000002
+#define GX_STAT_XMITCLK_OK 0x00000004
+#define GX_STAT_RECVCLK_OK 0x00000008
+#define GX_STAT_XMIT_OFF 0x00000010
+#define GX_STAT_TBIMODE 0x00000020
+#define GX_STAT_SPEED_MASK 0x000000C0 /* 2 bits, not valid w/TBI */
+#define GX_STAT_AUTOSPEED_MASK 0x00000300 /* 2 bits, not valid w/TBI */
+#define GX_STAT_MTXCLK_OK 0x00000400
+#define GX_STAT_PCI66 0x00000800
+#define GX_STAT_BUS64 0x00001000
+
+#define GX_SPEED_10MB 0x00000000
+#define GX_SPEED_100MB 0x00000040
+#define GX_SPEED_1000MB 0x00000080
+
+/* GX_EEPROM_CTRL register */
+#define GX_EE_CLOCK 0x0001 /* software clock */
+#define GX_EE_SELECT 0x0002 /* chip select */
+#define GX_EE_DATA_IN 0x0004
+#define GX_EE_DATA_OUT 0x0008
+#define GX_EE_FLASH_CTRL 0x0030 /* 0x02 == enable writes */
+
+/* GX_MDIC register */
+#define GX_MDIC_OP_WRITE 0x04000000
+#define GX_MDIC_OP_READ 0x08000000
+#define GX_MDIC_READY 0x10000000
+#define GX_MDIC_I 0x20000000 /* interrupt on MDI complete */
+#define GX_MDIC_E 0x40000000 /* MDI error */
+#define GX_MDIC_DATA(x) ((x) & 0xffff)
+#define GX_MDIC_REGADD(x) ((x) << 16)
+#define GX_MDIC_PHYADD(x) ((x) << 21)
+
+/* GX_FLOW_CTRL register */
+/* */
+#define GX_FLOW_CTRL_CONST 0x00C28001 /* flow control constant low */
+#define GX_FLOW_CTRL_CONST_HIGH 0x00000100 /* flow control constant high*/
+
+/* GX_FLOW_CTRL_TYPE register */
+#define GX_FLOW_CTRL_TYPE_CONST 0x8808 /* flow control type constant*/
+
+/* serial EEPROM opcodes */
+#define GX_EE_OPC_WRITE 0x5
+#define GX_EE_OPC_READ 0x6
+#define GX_EE_OPC_ERASE 0x7
+
+#define GX_EE_OPC_SIZE 3 /* bits of opcode */
+#define GX_EE_ADDR_SIZE 6 /* bits of address */
+
+/* EEPROM map offsets */
+#define GX_EEMAP_MAC 0x00 /* station address (6 bytes) */
+#define GX_EEMAP_INIT1 0x0A /* init control 1 (2 bytes) */
+#define GX_EEMAP_INIT2 0x0F /* init control 2 */
+#define GX_EEMAP_SWDPIN 0x20 /* GPIO Pins (Cordova) */
+
+#define GX_EEMAP_INIT1_LVDID 0x0001
+#define GX_EEMAP_INIT1_LSSID 0x0002
+#define GX_EEMAP_INIT1_PME_CLOCK 0x0004
+#define GX_EEMAP_INIT1_PM 0x0008
+#define GX_EEMAP_INIT1_ILOS 0x0010
+#define GX_EEMAP_INIT1_SWDPIO_SHIFT 5
+#define GX_EEMAP_INIT1_SWDPIO_MASK (0xf << GX_EEMAP_INIT1_SWDPIO_SHIFT)
+#define GX_EEMAP_INIT1_IPS1 0x0100
+#define GX_EEMAP_INIT1_LRST 0x0200
+#define GX_EEMAP_INIT1_FD 0x0400
+#define GX_EEMAP_INIT1_FRCSPD 0x0800
+#define GX_EEMAP_INIT1_IPS0 0x1000
+#define GX_EEMAP_INIT1_64_32_BAR 0x2000
+
+#define GX_EEMAP_INIT2_CSR_RD_SPLIT 0x0002
+#define GX_EEMAP_INIT2_APM_EN 0x0004
+#define GX_EEMAP_INIT2_64_BIT 0x0008
+#define GX_EEMAP_INIT2_MAX_READ 0x0010
+#define GX_EEMAP_INIT2_DMCR_MAP 0x0020
+#define GX_EEMAP_INIT2_133_CAP 0x0040
+#define GX_EEMAP_INIT2_MSI_DIS 0x0080
+#define GX_EEMAP_INIT2_FLASH_DIS 0x0100
+#define GX_EEMAP_INIT2_FLASH_SIZE(x) (((x) & 3) >> 9)
+#define GX_EEMAP_INIT2_ANE 0x0800
+#define GX_EEMAP_INIT2_PAUSE(x) (((x) & 3) >> 12)
+#define GX_EEMAP_INIT2_ASDE 0x4000
+#define GX_EEMAP_INIT2_APM_PME 0x8000
+#define GX_EEMAP_INIT2_SWDPIO_SHIFT 4
+#define GX_EEMAP_INIT2_SWDPIO_MASK (0xf << GX_EEMAP_INIT2_SWDPIO_SHIFT)
+
+#define GX_EEMAP_GPIO_MASK 0xdf
+#define GX_EEMAP_GPIO_SHIFT 0
+#define GX_EEMAP_GPIO_DIR_SHIFT 8
+
+
+/* GX_CTRL_EXT register */
+#define GX_CTRLX_GPIO_4 0x00000010 /* Software defined pin #4 */
+#define GX_CTRLX_GPIO_5 0x00000020 /* Software defined pin #5 */
+#define GX_CTRLX_GPIO_6 0x00000040 /* Software defined pin #6 */
+#define GX_CTRLX_GPIO_7 0x00000080 /* Software defined pin #7 */
+#define GX_CTRLX_GPIO_DIR_4 0x00000100 /* Pin is Input(0)/Output(1) */
+#define GX_CTRLX_GPIO_DIR_5 0x00000200 /* Pin is Input(0)/Output(1) */
+#define GX_CTRLX_GPIO_DIR_6 0x00000400 /* Pin is Input(0)/Output(1) */
+#define GX_CTRLX_GPIO_DIR_7 0x00000800 /* Pin is Input(0)/Output(1) */
+#define GX_CTRLX_EEPROM_RESET 0x00002000 /* PCI_RST type EEPROM reset */
+#define GX_CTRLX_SPEED_BYPASS 0x00008000 /* use CTRL.SPEED setting */
+
+/*
+ * Defines for MII/GMII PHY.
+ *
+ * GPIO bits 0-3 are controlled by GX_CTRL, 4-7 by GX_CTRL_EXT.
+ */
+#define GX_CTRL_GPIO_DIR_SHIFT 22
+#define GX_CTRL_GPIO_SHIFT 18
+#define GX_CTRL_GPIO_DIR (GX_CTRL_GPIO_DIR_3)
+#define GX_CTRL_GPIO_DIR_MASK (GX_CTRL_GPIO_DIR_0 | GX_CTRL_GPIO_DIR_1 | \
+ GX_CTRL_GPIO_DIR_2 | GX_CTRL_GPIO_DIR_3)
+#define GX_CTRL_PHY_IO GX_CTRL_GPIO_2
+#define GX_CTRL_PHY_IO_DIR GX_CTRL_GPIO_DIR_2
+#define GX_CTRL_PHY_CLK GX_CTRL_GPIO_3
+
+#define GX_CTRLX_GPIO_DIR (GX_CTRLX_GPIO_DIR_4)
+#define GX_CTRLX_GPIO_DIR_MASK (GX_CTRLX_GPIO_DIR_4 | GX_CTRLX_GPIO_DIR_5 | \
+ GX_CTRLX_GPIO_DIR_6 | GX_CTRLX_GPIO_DIR_7)
+#define GX_CTRLX_PHY_RESET GX_CTRLX_GPIO_4
+
+#define GX_PHY_PREAMBLE 0xffffffff
+#define GX_PHY_PREAMBLE_LEN 32
+#define GX_PHY_SOF 0x01
+#define GX_PHY_TURNAROUND 0x02
+#define GX_PHY_OP_WRITE 0x01
+#define GX_PHY_OP_READ 0x02
+#define GX_PHY_READ_LEN 14
+#define GX_PHY_WRITE_LEN 32
+
+/* GX_RX_ADDR registers */
+#define GX_RA_VALID 0x80000000
+
+/* GX_TX_CONFIG register */
+#define GX_TXCFG_AUTONEG 0x80000000
+#define GX_TXCFG_SWCONFIG 0x80000000
+
+/* GX_RX_CONFIG register */
+#define GX_RXCFG_INVALID 0x08000000
+
+/* GX_RX_CONTROL register */
+#define GX_RXC_RESET 0x00000001
+#define GX_RXC_ENABLE 0x00000002
+#define GX_RXC_STORE_BAD_PKT 0x00000004
+#define GX_RXC_UNI_PROMISC 0x00000008
+#define GX_RXC_MULTI_PROMISC 0x00000010
+#define GX_RXC_LONG_PKT_ENABLE 0x00000020
+#define GX_RXC_LOOPBACK 0x000000C0
+#define GX_RXC_RX_THOLD_MASK 0x00000300
+#define GX_RXC_MCAST_OFF_MASK 0x00003000
+#define GX_RXC_BCAST_ACCEPT 0x00008000
+#define GX_RXC_RX_BSIZE_MASK 0x00030000
+#define GX_RXC_VLAN_ENABLE 0x00040000
+#define GX_RXC_CFI_ENABLE 0x00080000 /* canonical form enable */
+#define GX_RXC_CFI 0x00100000
+#define GX_RXC_DISCARD_PAUSE 0x00400000
+#define GX_RXC_PASS_MAC 0x00800000
+#define GX_RXC_RX_BSIZE_SCALE 0x02000000 /* multiply BSIZE by 16 */
+#define GX_RXC_STRIP_ETHERCRC 0x04000000
+
+/* bits for GX_RXC_RX_THOLD */
+#define GX_RXC_RX_THOLD_HALF 0x00000000
+#define GX_RXC_RX_THOLD_QUARTER 0x00000100
+#define GX_RXC_RX_THOLD_EIGHTH 0x00000200
+
+/* bits for GX_RXC_RX_BSIZE_MASK */
+#define GX_RXC_RX_BSIZE_2K 0x00000000
+#define GX_RXC_RX_BSIZE_1K 0x00010000
+#define GX_RXC_RX_BSIZE_512 0x00020000
+#define GX_RXC_RX_BSIZE_256 0x00030000
+
+/* GX_TX_CONTROL register */
+#define GX_TXC_RESET 0x00000001
+#define GX_TXC_ENABLE 0x00000002
+#define GX_TXC_PAD_SHORT_PKTS 0x00000008
+#define GX_TXC_COLL_RETRY_MASK 0x00000FF0
+#define GX_TXC_COLL_TIME_MASK 0x003FF000
+#define GX_TXC_XMIT_XOFF 0x00400000
+#define GX_TXC_PKT_BURST_ENABLE 0x00800000
+#define GX_TXC_REXMT_LATE_COLL 0x01000000
+#define GX_TXC_NO_REXMT_UNDERRN 0x02000000
+
+/* bits for GX_TXC_COLL_RETRY_MASK */
+#define GX_TXC_COLL_RETRY_16 0x000000F0 /* 16 attempts at retransmit */
+
+/* bits for GX_TXC_COLL_TIME_MASK */
+#define GX_TXC_COLL_TIME_HDX 0x00200000
+#define GX_TXC_COLL_TIME_FDX 0x00040000
+
+/* GX_INT bits */
+#define GX_INT_XMIT_DONE 0x00000001
+#define GX_INT_XMIT_EMPTY 0x00000002
+#define GX_INT_LINK_CHANGE 0x00000004
+#define GX_INT_RCV_SEQ_ERR 0x00000008
+#define GX_INT_RCV_THOLD 0x00000010
+#define GX_INT_RCV_OVERRUN 0x00000040
+#define GX_INT_RCV_TIMER 0x00000080
+#define GX_INT_MDIO_DONE 0x00000200
+#define GX_INT_C_SETS 0x00000400
+#define GX_INT_GPI_MASK 0x00007800
+
+#define GX_INT_ALL \
+ (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY | GX_INT_LINK_CHANGE | \
+ GX_INT_RCV_SEQ_ERR | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN | \
+ GX_INT_RCV_TIMER | GX_INT_MDIO_DONE | GX_INT_C_SETS | GX_INT_GPI_MASK)
+
+#if 0
+#define GX_INT_WANTED \
+ (GX_INT_XMIT_DONE | /*GX_INT_XMIT_EMPTY |*/ GX_INT_LINK_CHANGE | \
+ GX_INT_RCV_SEQ_ERR | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN | \
+ GX_INT_RCV_TIMER | GX_INT_C_SETS)
+#else
+#define GX_INT_WANTED \
+ (GX_INT_XMIT_DONE | GX_INT_RCV_THOLD | GX_INT_RCV_TIMER | \
+ GX_INT_LINK_CHANGE)
+#endif
+
+/* PCI space */
+#define GX_PCI_VENDOR_ID 0x0000
+#define GX_PCI_DEVICE_ID 0x0002
+#define GX_PCI_COMMAND 0x0004
+#define GX_PCI_STATUS 0x0006
+#define GX_PCI_REVID 0x0008
+#define GX_PCI_CLASSCODE 0x0009
+#define GX_PCI_CACHELEN 0x000C
+#define GX_PCI_LATENCY_TIMER 0x000D
+#define GX_PCI_HEADER_TYPE 0x000E
+#define GX_PCI_LOMEM 0x0010
+#define GX_PCI_SUBVEN_ID 0x002C
+#define GX_PCI_SYBSYS_ID 0x002E
+#define GX_PCI_BIOSROM 0x0030
+#define GX_PCI_CAPABILITY_PTR 0x0034
+#define GX_PCI_INTLINE 0x003C
+#define GX_PCI_INTPIN 0x003D
+#define GX_PCI_MINGNT 0x003E
+#define GX_PCI_MINLAT 0x003F
+
+/* generic TX descriptor */
+struct gx_tx_desc {
+ u_int64_t :64;
+ u_int16_t :16;
+ u_int8_t :4,
+ tx_type:4,
+ u_int8_t :5,
+ tx_extended:1,
+ :2;
+ u_int32_t :32;
+};
+
+/* legacy TX descriptor */
+struct gx_tx_desc_old {
+ u_int64_t tx_addr;
+ u_int16_t tx_len;
+ u_int8_t tx_csum_offset;
+ u_int8_t tx_command;
+ u_int8_t tx_status;
+ u_int8_t tx_csum_start;
+ u_int16_t tx_vlan;
+};
+
+#define GX_TXOLD_END_OF_PKT 0x01 /* end of packet */
+#define GX_TXOLD_ETHER_CRC 0x02 /* insert ethernet CRC */
+#define GX_TXOLD_INSERT_CSUM 0x04 /* insert checksum */
+#define GX_TXOLD_REPORT_STATUS 0x08 /* report packet status */
+#define GX_TXOLD_REPORT_SENT 0x10 /* report packet sent */
+#define GX_TXOLD_EXTENSION 0x20 /* extended format */
+#define GX_TXOLD_VLAN_ENABLE 0x40 /* use vlan */
+#define GX_TXOLD_INT_DELAY 0x80 /* delay interrupt */
+
+/* bits for tx_status */
+#define GX_TXSTAT_DONE 0x01 /* descriptor done */
+#define GX_TXSTAT_EXCESS_COLL 0x02 /* excess collisions */
+#define GX_TXSTAT_LATE_COLL 0x04 /* late collision */
+#define GX_TXSTAT_UNDERRUN 0x08 /* transmit underrun */
+
+/* TX descriptor for checksum offloading context */
+struct gx_tx_desc_ctx {
+ u_int8_t tx_ip_csum_start;
+ u_int8_t tx_ip_csum_offset;
+ u_int16_t tx_ip_csum_end;
+ u_int8_t tx_tcp_csum_start;
+ u_int8_t tx_tcp_csum_offset;
+ u_int16_t tx_tcp_csum_end;
+ u_int32_t tx_len:20,
+ tx_type:4,
+ tx_command:8;
+ u_int8_t tx_status;
+ u_int8_t tx_hdrlen;
+ u_int16_t tx_mss;
+};
+
+#define GX_TXCTX_TCP_PKT 0x01 /* its a TCP packet */
+#define GX_TXCTX_IP_PKT 0x02 /* its an IP packet */
+#define GX_TXCTX_TCP_SEG_EN 0x04 /* TCP segmentation enable */
+#define GX_TXCTX_REPORT_STATUS 0x08 /* report packet status */
+#define GX_TXCTX_EXTENSION 0x20 /* extended format */
+#define GX_TXCTX_INT_DELAY 0x80 /* delay interrupt */
+
+/* TX descriptor for data */
+struct gx_tx_desc_data {
+ u_int64_t tx_addr;
+ u_int32_t tx_len:20,
+ tx_type:4,
+ tx_command:8;
+ u_int8_t tx_status;
+ u_int8_t tx_options;
+ u_int16_t tx_vlan;
+};
+
+#define GX_TXTCP_END_OF_PKT 0x01 /* end of packet */
+#define GX_TXTCP_ETHER_CRC 0x02 /* insert ethernet CRC */
+#define GX_TXTCP_TCP_SEG_EN 0x04 /* TCP segmentation enable */
+#define GX_TXTCP_REPORT_STATUS 0x08 /* report packet status */
+#define GX_TXTCP_REPORT_SENT 0x10 /* report packet sent */
+#define GX_TXTCP_EXTENSION 0x20 /* extended format */
+#define GX_TXTCP_VLAN_ENABLE 0x40 /* use vlan */
+#define GX_TXTCP_INT_DELAY 0x80 /* delay interrupt */
+
+/* bits for tx_options */
+#define GX_TXTCP_OPT_IP_CSUM 0x01 /* insert IP checksum */
+#define GX_TXTCP_OPT_TCP_CSUM 0x02 /* insert UDP/TCP checksum */
+
+/* RX descriptor data structure */
+struct gx_rx_desc {
+ u_int64_t rx_addr;
+ u_int16_t rx_len;
+ u_int16_t rx_csum;
+ u_int16_t rx_staterr; /* status + error fields */
+ u_int16_t rx_special;
+};
+
+/* bits for rx_status portion of rx_staterr */
+#define GX_RXSTAT_COMPLETED 0x01 /* completed */
+#define GX_RXSTAT_END_OF_PACKET 0x02 /* end of this packet */
+#define GX_RXSTAT_IGNORE_CSUM 0x04 /* ignore computed checksum */
+#define GX_RXSTAT_VLAN_PKT 0x08 /* matched vlan */
+#define GX_RXSTAT_HAS_TCP_CSUM 0x20 /* TCP checksum calculated */
+#define GX_RXSTAT_HAS_IP_CSUM 0x40 /* IP checksum calculated */
+#define GX_RXSTAT_INEXACT_MATCH 0x80 /* must check address */
+
+/* bits for rx_error portion of rx_staterr */
+#define GX_RXERR_CRC 0x0100 /* CRC or alignment error */
+#define GX_RXERR_SYMBOL 0x0200 /* symbol error */
+#define GX_RXERR_SEQUENCE 0x0400 /* sequence error */
+#define GX_RXERR_CARRIER 0x1000 /* carrier extension error */
+#define GX_RXERR_TCP_CSUM 0x2000 /* TCP/UDP checksum error */
+#define GX_RXERR_IP_CSUM 0x4000 /* IP checksum error */
+#define GX_RXERR_RX_DATA 0x8000 /* RX data error */
+
+/* drop packet on these errors */
+#define GX_INPUT_ERROR \
+ (GX_RXERR_CRC | GX_RXERR_SYMBOL | GX_RXERR_SEQUENCE | \
+ GX_RXERR_CARRIER | GX_RXERR_RX_DATA)
diff --git a/sys/dev/pci/if_gxvar.h b/sys/dev/pci/if_gxvar.h
new file mode 100644
index 00000000000..97c66dc710b
--- /dev/null
+++ b/sys/dev/pci/if_gxvar.h
@@ -0,0 +1,178 @@
+/* $OpenBSD: if_gxvar.h,v 1.1 2002/04/02 13:03:31 nate Exp $ */
+/*-
+ * Copyright (c) 1999,2000,2001 Jonathan Lemon
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#define GX_LOCK(gx)
+#define GX_UNLOCK(gx)
+#define mtx_init(a, b, c)
+#define mtx_destroy(a)
+struct mtx { int filler; };
+
+#ifndef PCIM_CMD_MWIEN
+#define PCIM_CMD_MWIEN 0x0010
+#endif
+
+#define ETHER_ALIGN 2
+
+/* CSR_WRITE_8 assumes the register is in low/high order */
+#define CSR_WRITE_8(gx, reg, val) do { \
+ bus_space_write_4(gx->gx_btag, gx->gx_bhandle, reg, val & 0xffffffff); \
+ bus_space_write_4(gx->gx_btag, gx->gx_bhandle, reg + 4, val >> 32); \
+} while (0)
+#define CSR_WRITE_4(gx, reg, val) \
+ bus_space_write_4(gx->gx_btag, gx->gx_bhandle, reg, val)
+#define CSR_WRITE_2(gx, reg, val) \
+ bus_space_write_2(gx->gx_btag, gx->gx_bhandle, reg, val)
+#define CSR_WRITE_1(gx, reg, val) \
+ bus_space_write_1(gx->gx_btag, gx->gx_bhandle, reg, val)
+
+#define CSR_READ_4(gx, reg) \
+ bus_space_read_4(gx->gx_btag, gx->gx_bhandle, reg)
+#define CSR_READ_2(gx, reg) \
+ bus_space_read_2(gx->gx_btag, gx->gx_bhandle, reg)
+#define CSR_READ_1(gx, reg) \
+ bus_space_read_1(gx->gx_btag, gx->gx_bhandle, reg)
+
+#define GX_SETBIT(gx, reg, x) \
+ CSR_WRITE_4(gx, reg, (CSR_READ_4(gx, reg) | (x)))
+#define GX_CLRBIT(gx, reg, x) \
+ CSR_WRITE_4(gx, reg, (CSR_READ_4(gx, reg) & ~(x)))
+
+/*
+ * In theory, these can go up to 64K each, but due to chip bugs,
+ * they are limited to 256 max. Descriptor counts should be a
+ * multiple of 8.
+ */
+#define GX_TX_RING_CNT 256
+#define GX_RX_RING_CNT 256
+
+#define GX_INC(x, y) (x) = (x + 1) % y
+#define GX_PREV(x, y) (x == 0 ? y - 1 : x - 1)
+
+#define GX_MAX_MTU (16 * 1024)
+
+struct gx_ring_data {
+ struct gx_rx_desc gx_rx_ring[GX_RX_RING_CNT];
+ struct gx_tx_desc gx_tx_ring[GX_TX_RING_CNT];
+};
+
+/*
+ * Number of DMA segments in a TxCB. Note that this is carefully
+ * chosen to make the total struct size an even power of two. It's
+ * critical that no TxCB be split across a page boundry since
+ * no attempt is made to allocate physically contiguous memory.
+ *
+ */
+#ifdef __alpha__ /* XXX - should be conditional on pointer size */
+#define GX_NTXSEG 30
+#else
+#define GX_NTXSEG 31
+#endif
+
+#define GX_NRXSEG GX_NTXSEG
+
+struct gx_chain_data {
+ struct mbuf *gx_rx_chain[GX_RX_RING_CNT];
+ struct mbuf *gx_tx_chain[GX_TX_RING_CNT];
+ bus_dmamap_t gx_rx_map[GX_RX_RING_CNT];
+ bus_dmamap_t gx_tx_map[GX_TX_RING_CNT];
+};
+
+struct gx_regs {
+ int r_rx_base;
+ int r_rx_length;
+ int r_rx_head;
+ int r_rx_tail;
+ int r_rx_delay;
+ int r_rx_dma_ctrl;
+
+ int r_tx_base;
+ int r_tx_length;
+ int r_tx_head;
+ int r_tx_tail;
+ int r_tx_delay;
+ int r_tx_dma_ctrl;
+};
+
+struct gx_softc {
+ struct device gx_dev;
+ struct arpcom arpcom; /* interface info */
+ struct ifmedia gx_media; /* media info */
+ bus_space_handle_t gx_bhandle; /* bus space handle */
+ bus_space_tag_t gx_btag; /* bus space tag */
+ void *gx_intrhand;
+ struct mii_data gx_mii;
+ u_int8_t gx_tbimode; /* transceiver flag */
+ int gx_vflags; /* version-specific flags */
+ u_int32_t gx_ipg; /* version-specific IPG */
+ bus_dma_tag_t gx_dmatag;
+ struct gx_ring_data *gx_rdata;
+ struct gx_chain_data gx_cdata;
+ bus_dmamap_t gx_ring_map;
+ int gx_if_flags;
+ struct mbuf *gx_pkthdr;
+ struct mbuf **gx_pktnextp;
+ int gx_rx_tail_idx; /* receive ring tail index */
+ int gx_tx_tail_idx; /* transmit ring tail index */
+ int gx_tx_head_idx; /* transmit ring tail index */
+ int gx_txcnt;
+ int gx_txcontext; /* current TX context */
+ struct gx_regs gx_reg;
+ struct mtx gx_mtx;
+
+/* tunables */
+ int gx_tx_intr_delay;
+ int gx_rx_intr_delay;
+
+/* statistics */
+ int gx_tx_interrupts;
+ int gx_rx_interrupts;
+ int gx_interrupts;
+};
+
+/*
+ * flags to compensate for differing chip variants
+ */
+#define GXF_FORCE_TBI 0x0001 /* force TBI mode on */
+#define GXF_DMA 0x0002 /* has DMA control registers */
+#define GXF_ENABLE_MWI 0x0004 /* supports MWI burst mode */
+#define GXF_OLD_REGS 0x0008 /* use old register mapping */
+#define GXF_CSUM 0x0010 /* hardware checksum offload */
+#define GXF_WISEMAN 0x0020 /* Wiseman variant */
+#define GXF_LIVENGOOD 0x0040 /* Livengood variant */
+#define GXF_CORDOVA 0x0080 /* Cordova variant */
+
+/*
+ * TX Context definitions.
+ */
+#define GX_TXCONTEXT_NONE 0
+#define GX_TXCONTEXT_TCPIP 1
+#define GX_TXCONTEXT_UDPIP 2