summaryrefslogtreecommitdiff
path: root/sys/dev/pci
diff options
context:
space:
mode:
authorBrad Smith <brad@cvs.openbsd.org>2006-05-01 00:34:13 +0000
committerBrad Smith <brad@cvs.openbsd.org>2006-05-01 00:34:13 +0000
commita86ae40b0df0a0e4bd0c3d2ed5e8240a0f9bdcd8 (patch)
treec0a0d880a7729da16979f558b297ccc063151791 /sys/dev/pci
parentc5a18b440d76f41ad6e200c013bac9ccb146ab59 (diff)
initial port of a driver for the Neterion Xframe-I 10Gb Ethernet adapter.
From NetBSD
Diffstat (limited to 'sys/dev/pci')
-rw-r--r--sys/dev/pci/files.pci7
-rw-r--r--sys/dev/pci/if_xge.c1323
-rw-r--r--sys/dev/pci/if_xgereg.h427
3 files changed, 1756 insertions, 1 deletions
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci
index 0bfbdd3797a..14949acd316 100644
--- a/sys/dev/pci/files.pci
+++ b/sys/dev/pci/files.pci
@@ -1,4 +1,4 @@
-# $OpenBSD: files.pci,v 1.203 2006/04/26 15:53:08 jason Exp $
+# $OpenBSD: files.pci,v 1.204 2006/05/01 00:34:12 brad Exp $
# $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $
#
# Config file and device description for machine-independent PCI code.
@@ -321,6 +321,11 @@ file dev/pci/if_ixgb.c ixgb
file dev/pci/ixgb_ee.c ixgb
file dev/pci/ixgb_hw.c ixgb
+# Neterion Xframe 10 Gigabit ethernet
+device xge: ether, ifnet, ifmedia
+attach xge at pci
+file dev/pci/if_xge.c xge
+
# DEC/Intel 21143 and "tulip" clone ethernet
attach dc at pci with dc_pci
file dev/pci/if_dc_pci.c dc_pci
diff --git a/sys/dev/pci/if_xge.c b/sys/dev/pci/if_xge.c
new file mode 100644
index 00000000000..6ef00961578
--- /dev/null
+++ b/sys/dev/pci/if_xge.c
@@ -0,0 +1,1323 @@
+/* $OpenBSD: if_xge.c,v 1.1 2006/05/01 00:34:12 brad Exp $ */
+/* $NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $ */
+
+/*
+ * Copyright (c) 2004, SUNET, Swedish University Computer Network.
+ * All rights reserved.
+ *
+ * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * SUNET, Swedish University Computer Network.
+ * 4. The name of SUNET may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Device driver for the Neterion Xframe Ten Gigabit Ethernet controller.
+ *
+ * TODO (in no specific order):
+ * HW VLAN support.
+ * IPv6 HW cksum.
+ */
+
+#include <sys/cdefs.h>
+#if 0
+__KERNEL_RCSID(0, "$NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $");
+#endif
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/device.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#endif
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <machine/endian.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcidevs.h>
+
+#include <sys/lock.h>
+#include <sys/proc.h>
+
+#include <dev/pci/if_xgereg.h>
+
+/*
+ * Some tunable constants, tune with care!
+ */
+#define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */
+#define NRXDESCS 1016 /* # of receive descriptors (requested) */
+#define NTXDESCS 8192 /* Number of transmit descriptors */
+#define NTXFRAGS 100 /* Max fragments per packet */
+
+/*
+ * Receive buffer modes; 1, 3 or 5 buffers.
+ */
+#define RX_MODE_1 1
+#define RX_MODE_3 3
+#define RX_MODE_5 5
+
+/*
+ * Use clever macros to avoid a bunch of #ifdef's.
+ */
+#define XCONCAT3(x,y,z) x ## y ## z
+#define CONCAT3(x,y,z) XCONCAT3(x,y,z)
+#define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE)
+#define rxd_4k CONCAT3(rxd,RX_MODE,_4k)
+/* XXX */
+#if 0
+#define rxdesc ___CONCAT(rxd,RX_MODE)
+#endif
+#define rxdesc rxd1
+
+#define NEXTTX(x) (((x)+1) % NTXDESCS)
+#define NRXFRAGS RX_MODE /* hardware imposed frags */
+#define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1)
+#define NRXREAL (NRXPAGES*NDESC_BUFMODE)
+#define RXMAPSZ (NRXPAGES*PAGE_SIZE)
+
+/*
+ * Magics to fix a bug when the mac address can't be read correctly.
+ * Comes from the Linux driver.
+ */
+static uint64_t fix_mac[] = {
+ 0x0060000000000000ULL, 0x0060600000000000ULL,
+ 0x0040600000000000ULL, 0x0000600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0060600000000000ULL,
+ 0x0020600000000000ULL, 0x0000600000000000ULL,
+ 0x0040600000000000ULL, 0x0060600000000000ULL,
+};
+
+
+struct xge_softc {
+ struct device sc_dev;
+ struct arpcom sc_arpcom;
+#define sc_if sc_arpcom.ac_if
+ bus_dma_tag_t sc_dmat;
+ bus_space_tag_t sc_st;
+ bus_space_handle_t sc_sh;
+ bus_space_tag_t sc_txt;
+ bus_space_handle_t sc_txh;
+ void *sc_ih;
+
+ struct ifmedia xena_media;
+ pcireg_t sc_pciregs[16];
+
+ /* Transmit structures */
+ struct txd *sc_txd[NTXDESCS]; /* transmit frags array */
+ bus_addr_t sc_txdp[NTXDESCS]; /* bus address of transmit frags */
+ bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */
+ struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */
+ int sc_nexttx, sc_lasttx;
+ bus_dmamap_t sc_txmap; /* transmit descriptor map */
+
+ /* Receive data */
+ bus_dmamap_t sc_rxmap; /* receive descriptor map */
+ struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */
+ bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */
+ struct mbuf *sc_rxb[NRXREAL]; /* mbufs on receive descriptors */
+ int sc_nextrx; /* next descriptor to check */
+};
+
+int xge_match(struct device *, void *, void *);
+void xge_attach(struct device *, struct device *, void *);
+int xge_alloc_txmem(struct xge_softc *);
+int xge_alloc_rxmem(struct xge_softc *);
+void xge_start(struct ifnet *);
+void xge_stop(struct ifnet *, int);
+int xge_add_rxbuf(struct xge_softc *, int);
+void xge_mcast_filter(struct xge_softc *);
+int xge_setup_xgxs(struct xge_softc *);
+int xge_ioctl(struct ifnet *, u_long, caddr_t);
+int xge_init(struct ifnet *);
+void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
+int xge_xgmii_mediachange(struct ifnet *);
+void xge_enable(struct xge_softc *);
+int xge_intr(void *);
+
+/*
+ * Helpers to address registers.
+ */
+#define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val)
+#define PIF_RCSR(csr) pif_rcsr(sc, csr)
+#define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val)
+#define PIF_WKEY(csr, val) pif_wkey(sc, csr, val)
+
+static inline void
+pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
+{
+ uint32_t lval, hval;
+
+ lval = val&0xffffffff;
+ hval = val>>32;
+ bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
+ bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
+}
+
+static inline uint64_t
+pif_rcsr(struct xge_softc *sc, bus_size_t csr)
+{
+ uint64_t val, val2;
+ val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr);
+ val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4);
+ val |= (val2 << 32);
+ return val;
+}
+
+static inline void
+txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val)
+{
+ uint32_t lval, hval;
+
+ lval = val&0xffffffff;
+ hval = val>>32;
+ bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval);
+ bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval);
+}
+
+
+static inline void
+pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val)
+{
+ uint32_t lval, hval;
+
+ lval = val&0xffffffff;
+ hval = val>>32;
+ PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
+ bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval);
+ PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE);
+ bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval);
+}
+
+struct cfattach xge_ca = {
+ sizeof(struct xge_softc), xge_match, xge_attach
+};
+
+struct cfdriver xge_cd = {
+ 0, "xge", DV_IFNET
+};
+
+#define XNAME sc->sc_dev.dv_xname
+
+#define XGE_RXSYNC(desc, what) \
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \
+ (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \
+ (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what)
+#define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \
+ r4_rxd[desc%NDESC_BUFMODE]
+
+/*
+ * Non-tunable constants.
+ */
+#define XGE_MAX_MTU 9600
+
+const struct pci_matchid xge_devices[] = {
+ { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME },
+};
+
+int
+xge_match(struct device *parent, void *match, void *aux)
+{
+ return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices,
+ sizeof(xge_devices)/sizeof(xge_devices[0])));
+}
+
+void
+xge_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct pci_attach_args *pa = aux;
+ struct xge_softc *sc;
+ struct ifnet *ifp;
+ pcireg_t memtype;
+ pci_intr_handle_t ih;
+ const char *intrstr = NULL;
+ pci_chipset_tag_t pc = pa->pa_pc;
+ uint8_t enaddr[ETHER_ADDR_LEN];
+ uint64_t val;
+ int i;
+
+ sc = (struct xge_softc *)self;
+
+ sc->sc_dmat = pa->pa_dmat;
+
+ /* Get BAR0 address */
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR);
+ if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0,
+ &sc->sc_st, &sc->sc_sh, 0, 0, 0)) {
+ printf("%s: unable to map PIF BAR registers\n", XNAME);
+ return;
+ }
+
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR);
+ if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0,
+ &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) {
+ printf("%s: unable to map TXP BAR registers\n", XNAME);
+ return;
+ }
+
+ /* Save PCI config space */
+ for (i = 0; i < 64; i += 4)
+ sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+ val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
+ val &= ~(TxF_R_SE|RxF_W_SE);
+ PIF_WCSR(SWAPPER_CTRL, val);
+ PIF_WCSR(SWAPPER_CTRL, val);
+#elif BYTE_ORDER == BIG_ENDIAN
+ /* do nothing */
+#else
+#error bad endianness!
+#endif
+
+ if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
+ printf("%s: failed configuring endian, %llx != %llx!\n",
+ XNAME, (unsigned long long)val, SWAPPER_MAGIC);
+ return;
+ }
+
+ /*
+ * The MAC addr may be all FF's, which is not good.
+ * Resolve it by writing some magics to GPIO_CONTROL and
+ * force a chip reset to read in the serial eeprom again.
+ */
+ for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) {
+ PIF_WCSR(GPIO_CONTROL, fix_mac[i]);
+ PIF_RCSR(GPIO_CONTROL);
+ }
+
+ /*
+ * Reset the chip and restore the PCI registers.
+ */
+ PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL);
+ DELAY(500000);
+ for (i = 0; i < 64; i += 4)
+ pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]);
+
+ /*
+ * Restore the byte order registers.
+ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ val = (uint64_t)0xFFFFFFFFFFFFFFFFULL;
+ val &= ~(TxF_R_SE|RxF_W_SE);
+ PIF_WCSR(SWAPPER_CTRL, val);
+ PIF_WCSR(SWAPPER_CTRL, val);
+#elif BYTE_ORDER == BIG_ENDIAN
+ /* do nothing */
+#else
+#error bad endianness!
+#endif
+
+ if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) {
+ printf("%s: failed configuring endian2, %llx != %llx!\n",
+ XNAME, (unsigned long long)val, SWAPPER_MAGIC);
+ return;
+ }
+
+ /*
+ * XGXS initialization.
+ */
+ /* 29, reset */
+ PIF_WCSR(SW_RESET, 0);
+ DELAY(500000);
+
+ /* 30, configure XGXS transceiver */
+ xge_setup_xgxs(sc);
+
+ /* 33, program MAC address (not needed here) */
+ /* Get ethernet address */
+ PIF_WCSR(RMAC_ADDR_CMD_MEM,
+ RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0));
+ while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
+ ;
+ val = PIF_RCSR(RMAC_ADDR_DATA0_MEM);
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ enaddr[i] = (uint8_t)(val >> (56 - (8*i)));
+
+ /*
+ * Get memory for transmit descriptor lists.
+ */
+ if (xge_alloc_txmem(sc)) {
+ printf("%s: failed allocating txmem.\n", XNAME);
+ return;
+ }
+
+ /* 9 and 10 - set FIFO number/prio */
+ PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS));
+ PIF_WCSR(TX_FIFO_P1, 0ULL);
+ PIF_WCSR(TX_FIFO_P2, 0ULL);
+ PIF_WCSR(TX_FIFO_P3, 0ULL);
+
+ /* 11, XXX set round-robin prio? */
+
+ /* 12, enable transmit FIFO */
+ val = PIF_RCSR(TX_FIFO_P0);
+ val |= TX_FIFO_ENABLE;
+ PIF_WCSR(TX_FIFO_P0, val);
+
+ /* 13, disable some error checks */
+ PIF_WCSR(TX_PA_CFG,
+ TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE);
+
+ /* Create transmit DMA maps */
+ for (i = 0; i < NTXDESCS; i++) {
+ if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_MTU,
+ NTXFRAGS, MCLBYTES, 0, 0, &sc->sc_txm[i])) {
+ printf("%s: cannot create TX DMA maps\n", XNAME);
+ return;
+ }
+ }
+
+ sc->sc_lasttx = NTXDESCS-1;
+
+ /*
+ * RxDMA initialization.
+ * Only use one out of 8 possible receive queues.
+ */
+ /* allocate rx descriptor memory */
+ if (xge_alloc_rxmem(sc)) {
+ printf("%s: failed allocating rxmem\n", XNAME);
+ return;
+ }
+
+ /* Create receive buffer DMA maps */
+ for (i = 0; i < NRXREAL; i++) {
+ if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_MTU,
+ NRXFRAGS, MCLBYTES, 0, 0, &sc->sc_rxm[i])) {
+ printf("%s: cannot create RX DMA maps\n", XNAME);
+ return;
+ }
+ }
+
+ /* allocate mbufs to receive descriptors */
+ for (i = 0; i < NRXREAL; i++)
+ if (xge_add_rxbuf(sc, i))
+ panic("out of mbufs too early");
+
+ /* 14, setup receive ring priority */
+ PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */
+
+ /* 15, setup receive ring round-robin calendar */
+ PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */
+ PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL);
+ PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL);
+ PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL);
+ PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL);
+
+ /* 16, write receive ring start address */
+ PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr);
+ /* PRC_RXD0_[1-7] are not used */
+
+ /* 17, Setup alarm registers */
+ PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */
+
+ /* 18, init receive ring controller */
+#if RX_MODE == RX_MODE_1
+ val = RING_MODE_1;
+#elif RX_MODE == RX_MODE_3
+ val = RING_MODE_3;
+#else /* RX_MODE == RX_MODE_5 */
+ val = RING_MODE_5;
+#endif
+ PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val);
+ /* leave 1-7 disabled */
+ /* XXXX snoop configuration? */
+
+ /* 19, set chip memory assigned to the queue */
+ PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); /* all 64M to queue 0 */
+
+ /* 20, setup RLDRAM parameters */
+ /* do not touch it for now */
+
+ /* 21, setup pause frame thresholds */
+ /* so not touch the defaults */
+ /* XXX - must 0xff be written as stated in the manual? */
+
+ /* 22, configure RED */
+ /* we do not want to drop packets, so ignore */
+
+ /* 23, initiate RLDRAM */
+ val = PIF_RCSR(MC_RLDRAM_MRS);
+ val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE;
+ PIF_WCSR(MC_RLDRAM_MRS, val);
+ DELAY(1000);
+
+ /*
+ * Setup interrupt policies.
+ */
+ /* 40, Transmit interrupts */
+ PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC |
+ TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48));
+ PIF_WCSR(TTI_DATA2_MEM,
+ TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512));
+ PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE);
+ while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE)
+ ;
+
+ /* 41, Receive interrupts */
+ PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC |
+ RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50));
+ PIF_WCSR(RTI_DATA2_MEM,
+ RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512));
+ PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE);
+ while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE)
+ ;
+
+ /*
+ * Setup media stuff.
+ */
+ ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange,
+ xge_ifmedia_status);
+ ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_1000_SX, 0, NULL);
+ ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_1000_SX);
+
+ printf(", address %s\n", ether_sprintf(enaddr));
+
+ ifp = &sc->sc_arpcom.ac_if;
+ strlcpy(ifp->if_xname, XNAME, IFNAMSIZ);
+ strlcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
+ ifp->if_baudrate = 1000000000;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = xge_ioctl;
+ ifp->if_start = xge_start;
+ IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESCS - 1);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+ /*
+ * Attach the interface.
+ */
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ /*
+ * Setup interrupt vector before initializing.
+ */
+ if (pci_intr_map(pa, &ih)) {
+ printf("%s: unable to map interrupt\n",
+ XNAME);
+ return;
+ }
+ intrstr = pci_intr_string(pc, ih);
+ if ((sc->sc_ih =
+ pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc, XNAME)) == NULL) {
+ printf("%s: unable to establish interrupt at %s\n",
+ XNAME, intrstr ? intrstr : "<unknown>");
+ return;
+ }
+ printf("%s: interrupting at %s\n", XNAME, intrstr);
+}
+
+void
+xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct xge_softc *sc = ifp->if_softc;
+ uint64_t reg;
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER|IFM_1000_SX;
+
+ reg = PIF_RCSR(ADAPTER_STATUS);
+ if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
+ ifmr->ifm_status |= IFM_ACTIVE;
+}
+
+int
+xge_xgmii_mediachange(struct ifnet *ifp)
+{
+ return 0;
+}
+
+void
+xge_enable(struct xge_softc *sc)
+{
+ uint64_t val;
+
+ /* 2, enable adapter */
+ val = PIF_RCSR(ADAPTER_CONTROL);
+ val |= ADAPTER_EN;
+ PIF_WCSR(ADAPTER_CONTROL, val);
+
+ /* 3, light the card enable led */
+ val = PIF_RCSR(ADAPTER_CONTROL);
+ val |= LED_ON;
+ PIF_WCSR(ADAPTER_CONTROL, val);
+ printf("%s: link up\n", XNAME);
+
+}
+
+int
+xge_init(struct ifnet *ifp)
+{
+ struct xge_softc *sc = ifp->if_softc;
+ uint64_t val;
+
+ /* 31+32, setup MAC config */
+ PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS|
+ RMAC_BCAST_EN|RMAC_DISCARD_PFRM|RMAC_PROM_EN);
+
+ DELAY(1000);
+
+ /* 54, ensure that the adapter is 'quiescent' */
+ val = PIF_RCSR(ADAPTER_STATUS);
+ if ((val & QUIESCENT) != QUIESCENT) {
+#if 0
+ char buf[200];
+#endif
+ printf("%s: adapter not quiescent, aborting\n", XNAME);
+ val = (val & QUIESCENT) ^ QUIESCENT;
+#if 0
+ bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf);
+ printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf);
+#endif
+ return 1;
+ }
+
+ /* 56, enable the transmit laser */
+ val = PIF_RCSR(ADAPTER_CONTROL);
+ val |= EOI_TX_ON;
+ PIF_WCSR(ADAPTER_CONTROL, val);
+
+ xge_enable(sc);
+ /*
+ * Enable all interrupts
+ */
+ PIF_WCSR(TX_TRAFFIC_MASK, 0);
+ PIF_WCSR(RX_TRAFFIC_MASK, 0);
+ PIF_WCSR(GENERAL_INT_MASK, 0);
+ PIF_WCSR(TXPIC_INT_MASK, 0);
+ PIF_WCSR(RXPIC_INT_MASK, 0);
+ PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */
+ PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT);
+
+ /* Done... */
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ return 0;
+}
+
+void
+xge_stop(struct ifnet *ifp, int disable)
+{
+ struct xge_softc *sc = ifp->if_softc;
+ uint64_t val;
+
+ val = PIF_RCSR(ADAPTER_CONTROL);
+ val &= ~ADAPTER_EN;
+ PIF_WCSR(ADAPTER_CONTROL, val);
+
+ while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
+ ;
+}
+
+int
+xge_intr(void *pv)
+{
+ struct xge_softc *sc = pv;
+ struct txd *txd;
+ struct ifnet *ifp = &sc->sc_if;
+ bus_dmamap_t dmp;
+ uint64_t val;
+ int i, lasttx, plen;
+
+ val = PIF_RCSR(GENERAL_INT_STATUS);
+ if (val == 0)
+ return 0; /* no interrupt here */
+
+ PIF_WCSR(GENERAL_INT_STATUS, val);
+
+ if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) {
+ /* Wait for quiescence */
+ printf("%s: link down\n", XNAME);
+ while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT)
+ ;
+ PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT);
+
+ val = PIF_RCSR(ADAPTER_STATUS);
+ if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0)
+ xge_enable(sc); /* Only if link restored */
+ }
+
+ if ((val = PIF_RCSR(TX_TRAFFIC_INT)))
+ PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */
+ /*
+ * Collect sent packets.
+ */
+ lasttx = sc->sc_lasttx;
+ while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) {
+ txd = sc->sc_txd[i];
+ dmp = sc->sc_txm[i];
+
+ bus_dmamap_sync(sc->sc_dmat, dmp, 0,
+ dmp->dm_mapsize,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ if (txd->txd_control1 & TXD_CTL1_OWN) {
+ bus_dmamap_sync(sc->sc_dmat, dmp, 0,
+ dmp->dm_mapsize, BUS_DMASYNC_PREREAD);
+ break;
+ }
+ bus_dmamap_unload(sc->sc_dmat, dmp);
+ m_freem(sc->sc_txb[i]);
+ ifp->if_opackets++;
+ sc->sc_lasttx = i;
+ }
+
+ if (sc->sc_lasttx != lasttx)
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ /* Try to get more packets on the wire */
+ xge_start(ifp);
+
+ /* clear interrupt bits */
+ if ((val = PIF_RCSR(RX_TRAFFIC_INT)))
+ PIF_WCSR(RX_TRAFFIC_INT, val);
+
+ for (;;) {
+ struct rxdesc *rxd;
+ struct mbuf *m;
+
+ XGE_RXSYNC(sc->sc_nextrx,
+ BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+
+ rxd = XGE_RXD(sc->sc_nextrx);
+ if (rxd->rxd_control1 & RXD_CTL1_OWN) {
+ XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD);
+ break;
+ }
+
+ /* got a packet */
+ m = sc->sc_rxb[sc->sc_nextrx];
+#if RX_MODE == RX_MODE_1
+ plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
+#elif RX_MODE == RX_MODE_3
+#error Fix rxmodes in xge_intr
+#elif RX_MODE == RX_MODE_5
+ plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2);
+ plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2);
+ plen += m->m_next->m_next->m_len =
+ RXD_CTL2_BUF2SIZ(rxd->rxd_control2);
+ plen += m->m_next->m_next->m_next->m_len =
+ RXD_CTL3_BUF3SIZ(rxd->rxd_control3);
+ plen += m->m_next->m_next->m_next->m_next->m_len =
+ RXD_CTL3_BUF4SIZ(rxd->rxd_control3);
+#endif
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = plen;
+
+ val = rxd->rxd_control1;
+
+ if (xge_add_rxbuf(sc, sc->sc_nextrx)) {
+ /* Failed, recycle this mbuf */
+#if RX_MODE == RX_MODE_1
+ rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0);
+ rxd->rxd_control1 = RXD_CTL1_OWN;
+#elif RX_MODE == RX_MODE_3
+#elif RX_MODE == RX_MODE_5
+#endif
+ XGE_RXSYNC(sc->sc_nextrx,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ ifp->if_ierrors++;
+ break;
+ }
+
+ ifp->if_ipackets++;
+
+#if XGE_CKSUM
+ if (RXD_CTL1_PROTOS(val) & (RXD_CTL1_P_IPv4|RXD_CTL1_P_IPv6)) {
+ m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
+ if (RXD_CTL1_L3CSUM(val) != 0xffff)
+ m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
+ }
+ if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) {
+ m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_TCPv6;
+ if (RXD_CTL1_L4CSUM(val) != 0xffff)
+ m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
+ }
+ if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) {
+ m->m_pkthdr.csum_flags |= M_CSUM_UDPv4|M_CSUM_UDPv6;
+ if (RXD_CTL1_L4CSUM(val) != 0xffff)
+ m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
+ }
+#endif
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
+#endif /* NBPFILTER > 0 */
+
+ ether_input_mbuf(ifp, m);
+
+ if (++sc->sc_nextrx == NRXREAL)
+ sc->sc_nextrx = 0;
+
+ }
+
+ return 0;
+}
+
+int
+xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+{
+ struct xge_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ int s, error = 0;
+
+ s = splnet();
+
+ if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
+ splx(s);
+ return (error);
+ }
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_flags & IFF_RUNNING))
+ xge_init(ifp);
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ arp_ifinit(&sc->sc_arpcom, ifa);
+#endif /* INET */
+ break;
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > XGE_MAX_MTU) {
+ error = EINVAL;
+ } else if (ifp->if_mtu != ifr->ifr_mtu) {
+ PIF_WCSR(RMAC_MAX_PYLD_LEN,
+ RMAC_PYLD_LEN(ifr->ifr_mtu));
+ ifp->if_mtu = ifr->ifr_mtu;
+ }
+ break;
+ case SIOCSIFFLAGS:
+ /*
+ * If interface is marked up and not running, then start it.
+ * If it is marked down and running, stop it.
+ * XXX If it's up then re-initialize it. This is so flags
+ * such as IFF_PROMISC are handled.
+ */
+ if (ifp->if_flags & IFF_UP) {
+ if (!(ifp->if_flags & IFF_RUNNING))
+ xge_init(ifp);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ xge_stop(ifp, 0);
+ }
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ error = (cmd == SIOCADDMULTI)
+ ? ether_addmulti(ifr, &sc->sc_arpcom)
+ : ether_delmulti(ifr, &sc->sc_arpcom);
+
+ if (error == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ xge_mcast_filter(sc);
+ error = 0;
+ }
+ break;
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd);
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ splx(s);
+ return(error);
+}
+
+void
+xge_mcast_filter(struct xge_softc *sc)
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct arpcom *ac = &sc->sc_arpcom;
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ int i, numaddr = 1; /* first slot used for card unicast address */
+ uint64_t val;
+
+ ETHER_FIRST_MULTI(step, ac, enm);
+ while (enm != NULL) {
+ if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
+ /* Skip ranges */
+ goto allmulti;
+ }
+ if (numaddr == MAX_MCAST_ADDR)
+ goto allmulti;
+ for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) {
+ val <<= 8;
+ val |= enm->enm_addrlo[i];
+ }
+ PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16);
+ PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
+ PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
+ RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr));
+ while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
+ ;
+ numaddr++;
+ ETHER_NEXT_MULTI(step, enm);
+ }
+ /* set the remaining entries to the broadcast address */
+ for (i = numaddr; i < MAX_MCAST_ADDR; i++) {
+ PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL);
+ PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL);
+ PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
+ RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i));
+ while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
+ ;
+ }
+ ifp->if_flags &= ~IFF_ALLMULTI;
+ return;
+
+allmulti:
+ /* Just receive everything with the multicast bit set */
+ ifp->if_flags |= IFF_ALLMULTI;
+ PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL);
+ PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL);
+ PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE|
+ RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1));
+ while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR)
+ ;
+}
+
+void
+xge_start(struct ifnet *ifp)
+{
+ struct xge_softc *sc = ifp->if_softc;
+ struct txd *txd = NULL; /* XXX - gcc */
+ bus_dmamap_t dmp;
+ struct mbuf *m;
+ uint64_t par, lcr;
+ int nexttx = 0, ntxd, error, i;
+
+ if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
+ return;
+
+ par = lcr = 0;
+ for (;;) {
+ IFQ_POLL(&ifp->if_snd, m);
+ if (m == NULL)
+ break; /* out of packets */
+
+ if (sc->sc_nexttx == sc->sc_lasttx)
+ break; /* No more space */
+
+ nexttx = sc->sc_nexttx;
+ dmp = sc->sc_txm[nexttx];
+
+ if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m,
+ BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0) {
+ printf("%s: bus_dmamap_load_mbuf error %d\n",
+ XNAME, error);
+ break;
+ }
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+
+ bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+
+ txd = sc->sc_txd[nexttx];
+ sc->sc_txb[nexttx] = m;
+ for (i = 0; i < dmp->dm_nsegs; i++) {
+ if (dmp->dm_segs[i].ds_len == 0)
+ continue;
+ txd->txd_control1 = dmp->dm_segs[i].ds_len;
+ txd->txd_control2 = 0;
+ txd->txd_bufaddr = dmp->dm_segs[i].ds_addr;
+ txd++;
+ }
+ ntxd = txd - sc->sc_txd[nexttx] - 1;
+ txd = sc->sc_txd[nexttx];
+ txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF;
+ txd->txd_control2 = TXD_CTL2_UTIL;
+
+#ifdef XGE_CKSUM
+ if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
+ txd->txd_control2 |= TXD_CTL2_CIPv4;
+ if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
+ txd->txd_control2 |= TXD_CTL2_CTCP;
+ if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
+ txd->txd_control2 |= TXD_CTL2_CUDP;
+#endif
+ txd[ntxd].txd_control1 |= TXD_CTL1_GCL;
+
+ bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ par = sc->sc_txdp[nexttx];
+ lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST;
+ TXP_WCSR(TXDL_PAR, par);
+ TXP_WCSR(TXDL_LCR, lcr);
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif /* NBPFILTER > 0 */
+
+ sc->sc_nexttx = NEXTTX(nexttx);
+ }
+}
+
+/*
+ * Allocate DMA memory for transmit descriptor fragments.
+ * Only one map is used for all descriptors.
+ */
+int
+xge_alloc_txmem(struct xge_softc *sc)
+{
+ struct txd *txp;
+ bus_dma_segment_t seg;
+ bus_addr_t txdp;
+ caddr_t kva;
+ int i, rseg, state;
+
+#define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd))
+ state = 0;
+ if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0,
+ &seg, 1, &rseg, BUS_DMA_NOWAIT))
+ goto err;
+ state++;
+ if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva,
+ BUS_DMA_NOWAIT))
+ goto err;
+
+ state++;
+ if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0,
+ BUS_DMA_NOWAIT, &sc->sc_txmap))
+ goto err;
+ state++;
+ if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap,
+ kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT))
+ goto err;
+
+ /* setup transmit array pointers */
+ txp = (struct txd *)kva;
+ txdp = seg.ds_addr;
+ for (txp = (struct txd *)kva, i = 0; i < NTXDESCS; i++) {
+ sc->sc_txd[i] = txp;
+ sc->sc_txdp[i] = txdp;
+ txp += NTXFRAGS;
+ txdp += (NTXFRAGS * sizeof(struct txd));
+ }
+
+ return 0;
+
+err:
+ if (state > 2)
+ bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
+ if (state > 1)
+ bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
+ if (state > 0)
+ bus_dmamem_free(sc->sc_dmat, &seg, rseg);
+ return ENOBUFS;
+}
+
+/*
+ * Allocate DMA memory for receive descriptor,
+ * only one map is used for all descriptors.
+ * link receive descriptor pages together.
+ */
+int
+xge_alloc_rxmem(struct xge_softc *sc)
+{
+ struct rxd_4k *rxpp;
+ bus_dma_segment_t seg;
+ caddr_t kva;
+ int i, rseg, state;
+
+ /* sanity check */
+ if (sizeof(struct rxd_4k) != XGE_PAGE) {
+ printf("bad compiler struct alignment, %d != %d\n",
+ (int)sizeof(struct rxd_4k), XGE_PAGE);
+ return EINVAL;
+ }
+
+ state = 0;
+ if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0,
+ &seg, 1, &rseg, BUS_DMA_NOWAIT))
+ goto err;
+ state++;
+ if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva,
+ BUS_DMA_NOWAIT))
+ goto err;
+
+ state++;
+ if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0,
+ BUS_DMA_NOWAIT, &sc->sc_rxmap))
+ goto err;
+ state++;
+ if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap,
+ kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT))
+ goto err;
+
+ /* setup receive page link pointers */
+ for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) {
+ sc->sc_rxd_4k[i] = rxpp;
+ rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr +
+ (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k);
+ }
+ sc->sc_rxd_4k[NRXPAGES-1]->r4_next =
+ (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr;
+
+ return 0;
+
+err:
+ if (state > 2)
+ bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap);
+ if (state > 1)
+ bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ);
+ if (state > 0)
+ bus_dmamem_free(sc->sc_dmat, &seg, rseg);
+ return ENOBUFS;
+}
+
+
+/*
+ * Add a new mbuf chain to descriptor id.
+ */
+int
+xge_add_rxbuf(struct xge_softc *sc, int id)
+{
+ struct rxdesc *rxd;
+ struct mbuf *m[5];
+ int page, desc, error;
+#if RX_MODE == RX_MODE_5
+ int i;
+#endif
+
+ page = id/NDESC_BUFMODE;
+ desc = id%NDESC_BUFMODE;
+
+ rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc];
+
+ /*
+ * Allocate mbufs.
+ * Currently five mbufs and two clusters are used,
+ * the hardware will put (ethernet, ip, tcp/udp) headers in
+ * their own buffer and the clusters are only used for data.
+ */
+#if RX_MODE == RX_MODE_1
+ MGETHDR(m[0], M_DONTWAIT, MT_DATA);
+ if (m[0] == NULL)
+ return ENOBUFS;
+ MCLGET(m[0], M_DONTWAIT);
+ if ((m[0]->m_flags & M_EXT) == 0) {
+ m_freem(m[0]);
+ return ENOBUFS;
+ }
+ m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size;
+#elif RX_MODE == RX_MODE_3
+#error missing rxmode 3.
+#elif RX_MODE == RX_MODE_5
+ MGETHDR(m[0], M_DONTWAIT, MT_DATA);
+ for (i = 1; i < 5; i++) {
+ MGET(m[i], M_DONTWAIT, MT_DATA);
+ }
+ if (m[3])
+ MCLGET(m[3], M_DONTWAIT);
+ if (m[4])
+ MCLGET(m[4], M_DONTWAIT);
+ if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] ||
+ ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) {
+ /* Out of something */
+ for (i = 0; i < 5; i++)
+ if (m[i] != NULL)
+ m_free(m[i]);
+ return ENOBUFS;
+ }
+ /* Link'em together */
+ m[0]->m_next = m[1];
+ m[1]->m_next = m[2];
+ m[2]->m_next = m[3];
+ m[3]->m_next = m[4];
+#else
+#error bad mode RX_MODE
+#endif
+
+ if (sc->sc_rxb[id])
+ bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]);
+ sc->sc_rxb[id] = m[0];
+
+ error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0],
+ BUS_DMA_READ|BUS_DMA_NOWAIT);
+ if (error)
+ return error;
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0,
+ sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD);
+
+#if RX_MODE == RX_MODE_1
+ rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0);
+ rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
+ rxd->rxd_control1 = RXD_CTL1_OWN;
+#elif RX_MODE == RX_MODE_3
+#elif RX_MODE == RX_MODE_5
+ rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len);
+ rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len);
+ rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr;
+ rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr;
+ rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr;
+ rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr;
+ rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr;
+ rxd->rxd_control1 = RXD_CTL1_OWN;
+#endif
+
+ XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ return 0;
+}
+
+/*
+ * These magics comes from the FreeBSD driver.
+ */
+int
+xge_setup_xgxs(struct xge_softc *sc)
+{
+ /* The magic numbers are described in the users guide */
+
+ /* Writing to MDIO 0x8000 (Global Config 0) */
+ PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50);
+
+ /* Writing to MDIO 0x8000 (Global Config 1) */
+ PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50);
+
+ /* Reset the Gigablaze */
+ PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50);
+
+ /* read the pole settings */
+ PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50);
+
+ PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50);
+
+ PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50);
+
+ /* Workaround for TX Lane XAUI initialization error.
+ Read Xpak PHY register 24 for XAUI lane status */
+ PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50);
+
+ /*
+ * Reading the MDIO control with value 0x1804001c0F001c
+ * means the TxLanes were already in sync
+ * Reading the MDIO control with value 0x1804000c0x001c
+ * means some TxLanes are not in sync where x is a 4-bit
+ * value representing each lanes
+ */
+#if 0
+ val = PIF_RCSR(MDIO_CONTROL);
+ if (val != 0x1804001c0F001cULL) {
+ printf("%s: MDIO_CONTROL: %llx != %llx\n",
+ XNAME, val, 0x1804001c0F001cULL);
+ return 1;
+ }
+#endif
+
+ /* Set and remove the DTE XS INTLoopBackN */
+ PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50);
+ PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50);
+
+#if 0
+ /* Reading the DTX control register Should be 0x5152040001c */
+ val = PIF_RCSR(DTX_CONTROL);
+ if (val != 0x5152040001cULL) {
+ printf("%s: DTX_CONTROL: %llx != %llx\n",
+ XNAME, val, 0x5152040001cULL);
+ return 1;
+ }
+#endif
+
+ PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50);
+ PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50);
+ PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50);
+
+#if 0
+ /* Reading the MIOD control should be 0x1804001c0f001c */
+ val = PIF_RCSR(MDIO_CONTROL);
+ if (val != 0x1804001c0f001cULL) {
+ printf("%s: MDIO_CONTROL2: %llx != %llx\n",
+ XNAME, val, 0x1804001c0f001cULL);
+ return 1;
+ }
+#endif
+ return 0;
+}
diff --git a/sys/dev/pci/if_xgereg.h b/sys/dev/pci/if_xgereg.h
new file mode 100644
index 00000000000..fac200e6d1c
--- /dev/null
+++ b/sys/dev/pci/if_xgereg.h
@@ -0,0 +1,427 @@
+/* $OpenBSD: if_xgereg.h,v 1.1 2006/05/01 00:34:12 brad Exp $ */
+/* $NetBSD: if_xgereg.h,v 1.1 2005/09/09 10:30:27 ragge Exp $ */
+
+/*
+ * Copyright (c) 2004, SUNET, Swedish University Computer Network.
+ * All rights reserved.
+ *
+ * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * SUNET, Swedish University Computer Network.
+ * 4. The name of SUNET may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Defines for the Neterion Xframe adapter.
+ */
+
+/* PCI address space */
+#define XGE_PIF_BAR 0x10
+#define XGE_TXP_BAR 0x18
+
+/* PIF register address calculation */
+#define DCSRB(x) (0x0000+(x)) /* 10GbE Device Control and Status Registers */
+#define PCIXB(x) (0x0800+(x)) /* PCI-X Interface Functional Registers */
+#define TDMAB(x) (0x1000+(x)) /* Transmit DMA Functional Registers */
+#define RDMAB(x) (0x1800+(x)) /* Receive DMA Functional Registers */
+#define MACRB(x) (0x2000+(x)) /* MAC functional registers */
+#define RLDRB(x) (0x2800+(x)) /* RLDRAM memory controller */
+#define XGXSB(x) (0x3000+(x)) /* XGXS functional Registers */
+
+/*
+ * Control and Status Registers
+ */
+#define GENERAL_INT_STATUS DCSRB(0x0000)
+#define GENERAL_INT_MASK DCSRB(0x0008)
+#define SW_RESET DCSRB(0x0100)
+#define XGXS_RESET(x) ((uint64_t)(x) << 32)
+#define ADAPTER_STATUS DCSRB(0x0108)
+#define TDMA_READY (1ULL<<63)
+#define RDMA_READY (1ULL<<62)
+#define PFC_READY (1ULL<<61)
+#define TMAC_BUF_EMPTY (1ULL<<60)
+#define PIC_QUIESCENT (1ULL<<58)
+#define RMAC_REMOTE_FAULT (1ULL<<57)
+#define RMAC_LOCAL_FAULT (1ULL<<56)
+#define MC_DRAM_READY (1ULL<<39)
+#define MC_QUEUES_READY (1ULL<<38)
+#define M_PLL_LOCK (1ULL<<33)
+#define P_PLL_LOCK (1ULL<<32)
+#define ADAPTER_CONTROL DCSRB(0x0110)
+#define ADAPTER_EN (1ULL<<56)
+#define EOI_TX_ON (1ULL<<48)
+#define LED_ON (1ULL<<40)
+#define WAIT_INT_EN (1ULL<<15)
+#define ECC_ENABLE_N (1ULL<<8)
+
+/* for debug of ADAPTER_STATUS */
+#define QUIESCENT (TDMA_READY|RDMA_READY|PFC_READY|TMAC_BUF_EMPTY|\
+ PIC_QUIESCENT|MC_DRAM_READY|MC_QUEUES_READY|M_PLL_LOCK|P_PLL_LOCK)
+#define QUIESCENT_BMSK \
+ "\177\20b\x3fTDMA_READY\0b\x3eRDMA_READY\0b\x3dPFC_READY\0" \
+ "b\x3cTMAC_BUF_EMPTY\0b\x3aPIC_QUIESCENT\0\x39RMAC_REMOTE_FAULT\0" \
+ "b\x38RMAC_LOCAL_FAULT\0b\x27MC_DRAM_READY\0b\x26MC_QUEUES_READY\0" \
+ "b\x21M_PLL_LOCK\0b\x20P_PLL_LOCK"
+
+/*
+ * PCI-X registers
+ */
+/* Interrupt control registers */
+#define PIC_INT_STATUS PCIXB(0)
+#define PIC_INT_MASK PCIXB(0x008)
+#define TXPIC_INT_MASK PCIXB(0x018)
+#define RXPIC_INT_MASK PCIXB(0x030)
+#define FLASH_INT_MASK PCIXB(0x048)
+#define MDIO_INT_MASK PCIXB(0x060)
+#define IIC_INT_MASK PCIXB(0x078)
+#define GPIO_INT_MASK PCIXB(0x098)
+#define TX_TRAFFIC_INT PCIXB(0x0e0)
+#define TX_TRAFFIC_MASK PCIXB(0x0e8)
+#define RX_TRAFFIC_INT PCIXB(0x0f0)
+#define RX_TRAFFIC_MASK PCIXB(0x0f8)
+#define PIC_CONTROL PCIXB(0x100)
+
+/* Byte swapping for little-endian */
+#define SWAPPER_CTRL PCIXB(0x108)
+#define PIF_R_FE (1ULL<<63)
+#define PIF_R_SE (1ULL<<62)
+#define PIF_W_FE (1ULL<<55)
+#define PIF_W_SE (1ULL<<54)
+#define TxP_FE (1ULL<<47)
+#define TxP_SE (1ULL<<46)
+#define TxD_R_FE (1ULL<<45)
+#define TxD_R_SE (1ULL<<44)
+#define TxD_W_FE (1ULL<<43)
+#define TxD_W_SE (1ULL<<42)
+#define TxF_R_FE (1ULL<<41)
+#define TxF_R_SE (1ULL<<40)
+#define RxD_R_FE (1ULL<<31)
+#define RxD_R_SE (1ULL<<30)
+#define RxD_W_FE (1ULL<<29)
+#define RxD_W_SE (1ULL<<28)
+#define RxF_W_FE (1ULL<<27)
+#define RxF_W_SE (1ULL<<26)
+#define XMSI_FE (1ULL<<23)
+#define XMSI_SE (1ULL<<22)
+#define STATS_FE (1ULL<<15)
+#define STATS_SE (1ULL<<14)
+
+/* Diagnostic register to check byte-swapping conf */
+#define PIF_RD_SWAPPER_Fb PCIXB(0x110)
+#define SWAPPER_MAGIC 0x0123456789abcdefULL
+
+/* Stats registers */
+#define STAT_CFG PCIXB(0x1d0)
+#define STAT_ADDR PCIXB(0x1d8)
+
+/* DTE-XGXS Interface */
+#define MDIO_CONTROL PCIXB(0x1e0)
+#define DTX_CONTROL PCIXB(0x1e8)
+#define I2C_CONTROL PCIXB(0x1f0)
+#define GPIO_CONTROL PCIXB(0x1f8)
+
+/*
+ * Transmit DMA registers.
+ */
+#define TXDMA_INT_MASK TDMAB(0x008)
+#define PFC_ERR_MASK TDMAB(0x018)
+#define TDA_ERR_MASK TDMAB(0x030)
+#define PCC_ERR_MASK TDMAB(0x048)
+#define TTI_ERR_MASK TDMAB(0x060)
+#define LSO_ERR_MASK TDMAB(0x078)
+#define TPA_ERR_MASK TDMAB(0x090)
+#define SM_ERR_MASK TDMAB(0x0a8)
+
+/* Transmit FIFO config */
+#define TX_FIFO_P0 TDMAB(0x0108)
+#define TX_FIFO_P1 TDMAB(0x0110)
+#define TX_FIFO_P2 TDMAB(0x0118)
+#define TX_FIFO_P3 TDMAB(0x0120)
+#define TX_FIFO_ENABLE (1ULL<<63)
+#define TX_FIFO_NUM0(x) ((uint64_t)(x) << 56)
+#define TX_FIFO_LEN0(x) ((uint64_t)((x)-1) << 32)
+#define TX_FIFO_NUM1(x) ((uint64_t)(x) << 24)
+#define TX_FIFO_LEN1(x) ((uint64_t)((x)-1) << 0)
+
+/* Transmit interrupts */
+#define TTI_COMMAND_MEM TDMAB(0x150)
+#define TTI_CMD_MEM_WE (1ULL<<56)
+#define TTI_CMD_MEM_STROBE (1ULL<<48)
+#define TTI_DATA1_MEM TDMAB(0x158)
+#define TX_TIMER_VAL(x) ((uint64_t)(x) << 32)
+#define TX_TIMER_AC (1ULL<<25)
+#define TX_TIMER_CI (1ULL<<24)
+#define TX_URNG_A(x) ((uint64_t)(x) << 16)
+#define TX_URNG_B(x) ((uint64_t)(x) << 8)
+#define TX_URNG_C(x) ((uint64_t)(x) << 0)
+#define TTI_DATA2_MEM TDMAB(0x160)
+#define TX_UFC_A(x) ((uint64_t)(x) << 48)
+#define TX_UFC_B(x) ((uint64_t)(x) << 32)
+#define TX_UFC_C(x) ((uint64_t)(x) << 16)
+#define TX_UFC_D(x) ((uint64_t)(x) << 0)
+
+
+/* Transmit protocol assist */
+#define TX_PA_CFG TDMAB(0x0168)
+#define TX_PA_CFG_IFR (1ULL<<62) /* Ignore frame error */
+#define TX_PA_CFG_ISO (1ULL<<61) /* Ignore snap OUI */
+#define TX_PA_CFG_ILC (1ULL<<60) /* Ignore LLC ctrl */
+#define TX_PA_CFG_ILE (1ULL<<57) /* Ignore L2 error */
+
+/*
+ * Transmit descriptor list (TxDL) pointer and control.
+ * There may be up to 8192 TxDL's per FIFO, but with a NIC total
+ * of 8192. The TxDL's are located in the NIC memory.
+ * Each TxDL can have up to 256 Transmit descriptors (TxD)
+ * that are located in host memory.
+ *
+ * The txdl struct fields must be written in order.
+ */
+#ifdef notdef /* Use bus_space stuff instead */
+struct txdl {
+ uint64_t txdl_pointer; /* address of TxD's */
+ uint64_t txdl_control;
+};
+#endif
+#define TXDLOFF1(x) (16*(x)) /* byte offset in txdl for list */
+#define TXDLOFF2(x) (16*(x)+8) /* byte offset in txdl for list */
+#define TXDL_NUMTXD(x) ((uint64_t)(x) << 56) /* # of TxD's in the list */
+#define TXDL_LGC_FIRST (1ULL << 49) /* First special list */
+#define TXDL_LGC_LAST (1ULL << 48) /* Last special list */
+#define TXDL_SFF (1ULL << 40) /* List is a special function list */
+#define TXDL_PAR 0 /* Pointer address register */
+#define TXDL_LCR 8 /* List control register */
+
+struct txd {
+ uint64_t txd_control1;
+ uint64_t txd_control2;
+ uint64_t txd_bufaddr;
+ uint64_t txd_hostctrl;
+};
+#define TXD_CTL1_OWN (1ULL << 56) /* Owner, 0 == host, 1 == NIC */
+#define TXD_CTL1_GCF (1ULL << 41) /* First frame or LSO */
+#define TXD_CTL1_GCL (1ULL << 40) /* Last frame or LSO */
+#define TXD_CTL1_LSO (1ULL << 33) /* LSO should be performed */
+#define TXD_CTL1_COF (1ULL << 32) /* UDP Checksum over fragments */
+#define TXD_CTL1_MSS(x) ((uint64_t)(x) << 16)
+
+#define TXD_CTL2_INTLST (1ULL << 16) /* Per-list interrupt */
+#define TXD_CTL2_UTIL (1ULL << 17) /* Utilization interrupt */
+#define TXD_CTL2_CIPv4 (1ULL << 58) /* Calculate IPv4 header checksum */
+#define TXD_CTL2_CTCP (1ULL << 57) /* Calculate TCP checksum */
+#define TXD_CTL2_CUDP (1ULL << 56) /* Calculate UDP checksum */
+/*
+ * Receive DMA registers
+ */
+/* Receive interrupt registers */
+#define RXDMA_INT_MASK RDMAB(0x008)
+#define RDA_ERR_MASK RDMAB(0x018)
+#define RC_ERR_MASK RDMAB(0x030)
+#define PRC_PCIX_ERR_MASK RDMAB(0x048)
+#define RPA_ERR_MASK RDMAB(0x060)
+#define RTI_ERR_MASK RDMAB(0x078)
+
+#define RX_QUEUE_PRIORITY RDMAB(0x100)
+#define RX_W_ROUND_ROBIN_0 RDMAB(0x108)
+#define RX_W_ROUND_ROBIN_1 RDMAB(0x110)
+#define RX_W_ROUND_ROBIN_2 RDMAB(0x118)
+#define RX_W_ROUND_ROBIN_3 RDMAB(0x120)
+#define RX_W_ROUND_ROBIN_4 RDMAB(0x128)
+#define PRC_RXD0_0 RDMAB(0x130)
+#define PRC_CTRL_0 RDMAB(0x170)
+#define RC_IN_SVC (1ULL << 56)
+#define RING_MODE_1 (0ULL << 48)
+#define RING_MODE_3 (1ULL << 48)
+#define RING_MODE_5 (2ULL << 48)
+#define RC_NO_SNOOP_D (1ULL << 41)
+#define RC_NO_SNOOP_B (1ULL << 40)
+#define PRC_ALARM_ACTION RDMAB(0x1b0)
+#define RTI_COMMAND_MEM RDMAB(0x1b8)
+#define RTI_CMD_MEM_WE (1ULL << 56)
+#define RTI_CMD_MEM_STROBE (1ULL << 48)
+#define RTI_DATA1_MEM RDMAB(0x1c0)
+#define RX_TIMER_VAL(x) ((uint64_t)(x) << 32)
+#define RX_TIMER_AC (1ULL << 25)
+#define RX_URNG_A(x) ((uint64_t)(x) << 16)
+#define RX_URNG_B(x) ((uint64_t)(x) << 8)
+#define RX_URNG_C(x) ((uint64_t)(x) << 0)
+#define RTI_DATA2_MEM RDMAB(0x1c8)
+#define RX_UFC_A(x) ((uint64_t)(x) << 48)
+#define RX_UFC_B(x) ((uint64_t)(x) << 32)
+#define RX_UFC_C(x) ((uint64_t)(x) << 16)
+#define RX_UFC_D(x) ((uint64_t)(x) << 0)
+#define RX_PA_CFG RDMAB(0x1d0)
+/*
+ * Receive descriptor (RxD) format.
+ * There are three formats of receive descriptors, 1, 3 and 5 buffer format.
+ */
+#define RX_MODE_1 1
+#define RX_MODE_3 3
+#define RX_MODE_5 5
+
+struct rxd1 {
+ uint64_t rxd_hcontrol;
+ uint64_t rxd_control1;
+ uint64_t rxd_control2;
+ uint64_t rxd_buf0;
+};
+
+/* 4k struct for 5 buffer mode */
+#define NDESC_1BUFMODE 127 /* # desc/page for 5-buffer mode */
+struct rxd1_4k {
+ struct rxd1 r4_rxd[NDESC_1BUFMODE];
+ uint64_t pad[3];
+ uint64_t r4_next; /* phys address of next 4k buffer */
+};
+
+struct rxd3 {
+ uint64_t rxd_hcontrol;
+ uint64_t rxd_control1;
+ uint64_t rxd_control2;
+ uint64_t rxd_buf0;
+ uint64_t rxd_buf1;
+ uint64_t rxd_buf2;
+};
+
+struct rxd5 {
+ uint64_t rxd_control3;
+ uint64_t rxd_control1;
+ uint64_t rxd_control2;
+ uint64_t rxd_buf0;
+ uint64_t rxd_buf1;
+ uint64_t rxd_buf2;
+ uint64_t rxd_buf3;
+ uint64_t rxd_buf4;
+};
+
+/* 4k struct for 5 buffer mode */
+#define NDESC_5BUFMODE 63 /* # desc/page for 5-buffer mode */
+#define XGE_PAGE 4096 /* page size used for receive */
+struct rxd5_4k {
+ struct rxd5 r4_rxd[NDESC_5BUFMODE];
+ uint64_t pad[7];
+ uint64_t r4_next; /* phys address of next 4k buffer */
+};
+
+#define RXD_MKCTL3(h,bs3,bs4) \
+ (((uint64_t)(h) << 32) | ((uint64_t)(bs3) << 16) | (uint64_t)(bs4))
+#define RXD_MKCTL2(bs0,bs1,bs2) \
+ (((uint64_t)(bs0) << 48) | ((uint64_t)(bs1) << 32) | \
+ ((uint64_t)(bs2) << 16))
+
+#define RXD_CTL2_BUF0SIZ(x) (((x) >> 48) & 0xffff)
+#define RXD_CTL2_BUF1SIZ(x) (((x) >> 32) & 0xffff)
+#define RXD_CTL2_BUF2SIZ(x) (((x) >> 16) & 0xffff)
+#define RXD_CTL3_BUF3SIZ(x) (((x) >> 16) & 0xffff)
+#define RXD_CTL3_BUF4SIZ(x) ((x) & 0xffff)
+#define RXD_CTL1_OWN (1ULL << 56)
+#define RXD_CTL1_XCODE(x) (((x) >> 48) & 0xf) /* Status bits */
+#define RXD_CTL1_X_OK 0
+#define RXD_CTL1_X_PERR 1 /* Parity error */
+#define RXD_CTL1_X_ABORT 2 /* Abort during xfer */
+#define RXD_CTL1_X_PA 3 /* Parity error and abort */
+#define RXD_CTL1_X_RDA 4 /* RDA failure */
+#define RXD_CTL1_X_UP 5 /* Unknown protocol */
+#define RXD_CTL1_X_FI 6 /* Frame integrity (FCS) error */
+#define RXD_CTL1_X_BSZ 7 /* Buffer size error */
+#define RXD_CTL1_X_ECC 8 /* Internal ECC */
+#define RXD_CTL1_X_UNK 15 /* Unknown error */
+#define RXD_CTL1_PROTOS(x) (((x) >> 32) & 0xff)
+#define RXD_CTL1_P_VLAN 0x80 /* VLAN tagged */
+#define RXD_CTL1_P_MSK 0x60 /* Mask for frame type */
+#define RXD_CTL1_P_DIX 0x00
+#define RXD_CTL1_P_LLC 0x20
+#define RXD_CTL1_P_SNAP 0x40
+#define RXD_CTL1_P_IPX 0x60
+#define RXD_CTL1_P_IPv4 0x10
+#define RXD_CTL1_P_IPv6 0x08
+#define RXD_CTL1_P_IPFRAG 0x04
+#define RXD_CTL1_P_TCP 0x02
+#define RXD_CTL1_P_UDP 0x01
+#define RXD_CTL1_L3CSUM(x) (((x) >> 16) & 0xffff)
+#define RXD_CTL1_L4CSUM(x) ((x) & 0xffff)
+#define RXD_CTL2_VLANTAG(x) ((x) & 0xffff)
+
+/*
+ * MAC Configuration/Status
+ */
+#define MAC_INT_STATUS MACRB(0x000)
+#define MAC_TMAC_INT (1ULL<<63)
+#define MAC_RMAC_INT (1ULL<<62)
+#define MAC_INT_MASK MACRB(0x008)
+#define MAC_TMAC_ERR_MASK MACRB(0x018)
+#define MAC_RMAC_ERR_REG MACRB(0x028)
+#define RMAC_LINK_STATE_CHANGE_INT (1ULL<<32)
+#define MAC_RMAC_ERR_MASK MACRB(0x030)
+
+#define MAC_CFG MACRB(0x0100)
+#define TMAC_EN (1ULL<<63)
+#define RMAC_EN (1ULL<<62)
+#define UTILZATION_CALC_SEL (1ULL<<61)
+#define TMAC_LOOPBACK (1ULL<<60)
+#define TMAC_APPEND_PAD (1ULL<<59)
+#define RMAC_STRIP_FCS (1ULL<<58)
+#define RMAC_STRIP_PAD (1ULL<<57)
+#define RMAC_PROM_EN (1ULL<<56)
+#define RMAC_DISCARD_PFRM (1ULL<<55)
+#define RMAC_BCAST_EN (1ULL<<54)
+#define RMAC_ALL_ADDR_EN (1ULL<<53)
+#define RMAC_MAX_PYLD_LEN MACRB(0x0110)
+#define RMAC_PYLD_LEN(x) ((uint64_t)(x) << 48)
+#define RMAC_CFG_KEY MACRB(0x0120)
+#define RMAC_KEY_VALUE (0x4c0dULL<<48)
+#define RMAC_ADDR_CMD_MEM MACRB(0x0128)
+#define RMAC_ADDR_CMD_MEM_WE (1ULL<<56)
+#define RMAC_ADDR_CMD_MEM_STR (1ULL<<48)
+#define RMAC_ADDR_CMD_MEM_OFF(x) ((uint64_t)(x) << 32)
+#define MAX_MCAST_ADDR 64 /* slots in mcast table */
+#define RMAC_ADDR_DATA0_MEM MACRB(0x0130)
+#define RMAC_ADDR_DATA1_MEM MACRB(0x0138)
+#define RMAC_PAUSE_CFG MACRB(0x150)
+#define RMAC_PAUSE_GEN_EN (1ULL<<63)
+#define RMAC_PAUSE_RCV_EN (1ULL<<62)
+
+/*
+ * RLDRAM registers.
+ */
+#define MC_INT_MASK RLDRB(0x008)
+#define MC_ERR_MASK RLDRB(0x018)
+
+#define RX_QUEUE_CFG RLDRB(0x100)
+#define MC_QUEUE(q,s) ((uint64_t)(s)<<(56-(q*8)))
+#define MC_RLDRAM_MRS RLDRB(0x108)
+#define MC_QUEUE_SIZE_ENABLE (1ULL<<24)
+#define MC_RLDRAM_MRS_ENABLE (1ULL<<16)
+
+/*
+ * XGXS registers.
+ */
+/* XGXS control/statue */
+#define XGXS_INT_MASK XGXSB(0x008)
+#define XGXS_TXGXS_ERR_MASK XGXSB(0x018)
+#define XGXS_RXGXS_ERR_MASK XGXSB(0x030)
+#define XGXS_CFG XGXSB(0x0100)