summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/sparc/dev/qe.c986
-rw-r--r--sys/arch/sparc/dev/qec.c23
-rw-r--r--sys/arch/sparc/dev/qecvar.h4
-rw-r--r--sys/arch/sparc/dev/qereg.h321
-rw-r--r--sys/arch/sparc/dev/qevar.h54
5 files changed, 1385 insertions, 3 deletions
diff --git a/sys/arch/sparc/dev/qe.c b/sys/arch/sparc/dev/qe.c
new file mode 100644
index 00000000000..3d9ab2caa12
--- /dev/null
+++ b/sys/arch/sparc/dev/qe.c
@@ -0,0 +1,986 @@
+/* $OpenBSD: qe.c,v 1.1 1998/10/19 05:41:19 jason Exp $ */
+
+/*
+ * Copyright (c) 1998 Jason L. Wright.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Driver for the SBus qec+qe QuadEthernet board.
+ *
+ * This driver was written using the AMD MACE Am79C940 documentation, some
+ * ideas gleaned from the S/Linux driver for this card, Solaris header files,
+ * and a loan of a card from Paul Southworth of the Internet Engineering
+ * Group (www.ieng.com).
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/errno.h>
+#include <sys/ioctl.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/syslog.h>
+#include <sys/device.h>
+#include <sys/malloc.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/netisr.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#include "bpfilter.h"
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#include <net/bpfdesc.h>
+#endif
+
+#include <machine/autoconf.h>
+#include <machine/cpu.h>
+
+#include <sparc/dev/sbusvar.h>
+#include <sparc/dev/dmareg.h>
+#include <sparc/dev/dmavar.h>
+
+#include <sparc/dev/qecvar.h>
+#include <sparc/dev/qecreg.h>
+#include <sparc/dev/qereg.h>
+#include <sparc/dev/qevar.h>
+
+int qematch __P((struct device *, void *, void *));
+void qeattach __P((struct device *, struct device *, void *));
+
+void qeinit __P((struct qesoftc *));
+void qestart __P((struct ifnet *));
+void qestop __P((struct qesoftc *));
+void qewatchdog __P((struct ifnet *));
+int qeioctl __P((struct ifnet *, u_long, caddr_t));
+void qereset __P((struct qesoftc *));
+
+int qeintr __P((void *));
+int qe_eint __P((struct qesoftc *, u_int32_t));
+int qe_rint __P((struct qesoftc *));
+int qe_tint __P((struct qesoftc *));
+int qe_put __P((struct qesoftc *, int, struct mbuf *));
+void qe_read __P((struct qesoftc *, int, int));
+struct mbuf * qe_get __P((struct qesoftc *, int, int));
+void qe_mcreset __P((struct qesoftc *));
+
+struct cfdriver qe_cd = {
+ NULL, "qe", DV_IFNET
+};
+
+struct cfattach qe_ca = {
+ sizeof(struct qesoftc), qematch, qeattach
+};
+
+int
+qematch(parent, vcf, aux)
+ struct device *parent;
+ void *vcf, *aux;
+{
+ struct cfdata *cf = vcf;
+ struct confargs *ca = aux;
+ register struct romaux *ra = &ca->ca_ra;
+
+ if (strcmp(cf->cf_driver->cd_name, ra->ra_name))
+ return (0);
+ return (1);
+}
+
+void
+qeattach(parent, self, aux)
+ struct device *parent, *self;
+ void *aux;
+{
+ struct qec_softc *qec = (struct qec_softc *)parent;
+ struct qesoftc *sc = (struct qesoftc *)self;
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct confargs *ca = aux;
+ extern void myetheraddr __P((u_char *));
+ int pri;
+
+ if (qec->sc_pri == 0) {
+ printf(": no interrupt found on parent\n");
+ return;
+ }
+ pri = qec->sc_pri;
+
+ sc->sc_rev = getpropint(ca->ca_ra.ra_node, "mace-version", -1);
+
+ sc->sc_cr = mapiodev(&ca->ca_ra.ra_reg[0], 0, sizeof(struct qe_cregs));
+ sc->sc_mr = mapiodev(&ca->ca_ra.ra_reg[1], 0, sizeof(struct qe_mregs));
+ sc->sc_qec = qec;
+ sc->sc_qr = qec->sc_regs;
+ qestop(sc);
+
+ sc->sc_mem = qec->sc_buffer;
+ sc->sc_memsize = qec->sc_bufsiz;
+
+ sc->sc_channel = getpropint(ca->ca_ra.ra_node, "channel#", -1);
+ sc->sc_burst = qec->sc_burst;
+
+ sc->sc_ih.ih_fun = qeintr;
+ sc->sc_ih.ih_arg = sc;
+ intr_establish(pri, &sc->sc_ih);
+
+ myetheraddr(sc->sc_arpcom.ac_enaddr);
+
+ bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
+ ifp->if_softc = sc;
+ ifp->if_start = qestart;
+ ifp->if_ioctl = qeioctl;
+ ifp->if_watchdog = qewatchdog;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS |
+ IFF_MULTICAST;
+
+ /* Attach the interface. */
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ printf(" pri %d: rev %x address %s\n", pri, sc->sc_rev,
+ ether_sprintf(sc->sc_arpcom.ac_enaddr));
+
+#if NBPFILTER > 0
+ bpfattach(&sc->sc_arpcom.ac_if.if_bpf, ifp, DLT_EN10MB,
+ sizeof(struct ether_header));
+#endif
+}
+
+/*
+ * Start output on interface.
+ * We make two assumptions here:
+ * 1) that the current priority is set to splnet _before_ this code
+ * is called *and* is returned to the appropriate priority after
+ * return
+ * 2) that the IFF_OACTIVE flag is checked before this code is called
+ * (i.e. that the output part of the interface is idle)
+ */
+void
+qestart(ifp)
+ struct ifnet *ifp;
+{
+ struct qesoftc *sc = (struct qesoftc *)ifp->if_softc;
+ struct mbuf *m;
+ int bix, len;
+
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+ return;
+
+ bix = sc->sc_last_td;
+
+ for (;;) {
+ IF_DEQUEUE(&ifp->if_snd, m);
+ if (m == 0)
+ break;
+
+#if NBPFILTER > 0
+ /*
+ * If BPF is listening on this interface, let it see the
+ * packet before we commit it to the wire.
+ */
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+
+ /*
+ * Copy the mbuf chain into the transmit buffer.
+ */
+ len = qe_put(sc, bix, m);
+
+ /*
+ * Initialize transmit registers and start transmission
+ */
+ sc->sc_desc->qe_txd[bix].tx_flags =
+ QE_TXD_OWN | QE_TXD_SOP | QE_TXD_EOP |
+ (len & QE_TXD_LENGTH);
+ sc->sc_cr->ctrl = QE_CR_CTRL_TWAKEUP;
+
+ if (++bix == QE_TX_RING_MAXSIZE)
+ bix = 0;
+
+ if (++sc->sc_no_td == QE_TX_RING_SIZE) {
+ ifp->if_flags |= IFF_OACTIVE;
+ break;
+ }
+ }
+
+ sc->sc_last_td = bix;
+}
+
+void
+qestop(sc)
+ struct qesoftc *sc;
+{
+ struct qe_cregs *cr = sc->sc_cr;
+ struct qe_mregs *mr = sc->sc_mr;
+ int tries;
+
+ tries = 200;
+ mr->biucc = QE_MR_BIUCC_SWRST;
+ while ((mr->biucc & QE_MR_BIUCC_SWRST) && --tries)
+ DELAY(20);
+
+ tries = 200;
+ cr->ctrl = QE_CR_CTRL_RESET;
+ while ((cr->ctrl & QE_CR_CTRL_RESET) && --tries)
+ DELAY(20);
+}
+
+/*
+ * Reset interface.
+ */
+void
+qereset(sc)
+ struct qesoftc *sc;
+{
+ int s;
+
+ s = splnet();
+ qestop(sc);
+ qeinit(sc);
+ splx(s);
+}
+
+void
+qewatchdog(ifp)
+ struct ifnet *ifp;
+{
+ struct qesoftc *sc = ifp->if_softc;
+
+ log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
+ ++sc->sc_arpcom.ac_if.if_oerrors;
+
+ qereset(sc);
+}
+
+/*
+ * Interrupt dispatch.
+ */
+int
+qeintr(v)
+ void *v;
+{
+ struct qesoftc *sc = (struct qesoftc *)v;
+ u_int32_t qecstat, qestat;
+ int r = 0;
+
+ qecstat = sc->sc_qr->stat >> (4 * sc->sc_channel);
+ qestat = sc->sc_cr->stat;
+ if ((qecstat & 0xf) == 0)
+ return r;
+
+ if (qecstat & QEC_STAT_ER || qecstat & QEC_STAT_BM) {
+ r |= qe_eint(sc, qestat);
+ if (r == -1)
+ return 1;
+ }
+
+ if (qecstat & QEC_STAT_TX && qestat & QE_CR_STAT_TXIRQ)
+ r |= qe_tint(sc);
+
+ if (qecstat & QEC_STAT_RX && qestat & QE_CR_STAT_RXIRQ)
+ r |= qe_rint(sc);
+
+ return r;
+}
+
+/*
+ * Transmit interrupt.
+ */
+int
+qe_tint(sc)
+ struct qesoftc *sc;
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ int bix;
+ struct qe_txd txd;
+
+ bix = sc->sc_first_td;
+
+ for (;;) {
+ if (sc->sc_no_td <= 0)
+ break;
+
+ txd.tx_flags = sc->sc_desc->qe_txd[bix].tx_flags;
+ if (txd.tx_flags & QE_TXD_OWN)
+ break;
+
+ ifp->if_flags &= ~IFF_OACTIVE;
+ ifp->if_opackets++;
+
+ if (++bix == QE_TX_RING_MAXSIZE)
+ bix = 0;
+
+ --sc->sc_no_td;
+ }
+
+ sc->sc_first_td = bix;
+
+ qestart(ifp);
+
+ if (sc->sc_no_td == 0)
+ ifp->if_timer = 0;
+
+ return 1;
+}
+
+/*
+ * Receive interrupt.
+ */
+int
+qe_rint(sc)
+ struct qesoftc *sc;
+{
+ int bix, len;
+
+ bix = sc->sc_last_rd;
+
+ /*
+ * Process all buffers with valid data.
+ */
+ for (;;) {
+ if (sc->sc_desc->qe_rxd[bix].rx_flags & QE_RXD_OWN)
+ break;
+
+ len = (sc->sc_desc->qe_rxd[bix].rx_flags & QE_RXD_LENGTH) - 4;
+ qe_read(sc, bix, len);
+ sc->sc_desc->qe_rxd[(bix + QE_RX_RING_SIZE) % QE_RX_RING_MAXSIZE].rx_flags =
+ QE_RXD_OWN | QE_RXD_LENGTH;
+
+ if (++bix == QE_RX_RING_MAXSIZE)
+ bix = 0;
+ }
+
+ sc->sc_last_rd = bix;
+
+ return 1;
+}
+
+/*
+ * Error interrupt.
+ */
+int
+qe_eint(sc, why)
+ struct qesoftc *sc;
+ u_int32_t why;
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ int r = 0, rst = 0;
+
+ if (why & QE_CR_STAT_EDEFER) {
+ printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
+ r |= 1;
+ ifp->if_oerrors++;
+ }
+
+ if (why & QE_CR_STAT_CLOSS) {
+ printf("%s: no carrier, link down?\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_ERETRIES) {
+ printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+
+ if (why & QE_CR_STAT_LCOLL) {
+ printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+ if (why & QE_CR_STAT_FUFLOW) {
+ printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+ if (why & QE_CR_STAT_JERROR) {
+ printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_BERROR) {
+ printf("%s: babble seen\n", sc->sc_dev.dv_xname);
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_TCCOFLOW) {
+ ifp->if_collisions += 256;
+ ifp->if_oerrors += 256;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_TXDERROR) {
+ printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
+ rst = 1;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_TXLERR) {
+ printf("%s: tx late error\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ rst = 1;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_TXPERR) {
+ printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ rst = 1;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_TXSERR) {
+ printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
+ ifp->if_oerrors++;
+ rst = 1;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_RCCOFLOW) {
+ ifp->if_collisions += 256;
+ ifp->if_ierrors += 256;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_RUOFLOW) {
+ ifp->if_ierrors += 256;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_MCOFLOW) {
+ ifp->if_ierrors += 256;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_RXFOFLOW) {
+ printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_RLCOLL) {
+ printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ ifp->if_collisions++;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_FCOFLOW) {
+ ifp->if_ierrors += 256;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_CECOFLOW) {
+ ifp->if_ierrors += 256;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_RXDROP) {
+ printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ r |= 1;
+ }
+
+ if (why & QE_CR_STAT_RXSMALL) {
+ printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+ if (why & QE_CR_STAT_RXLERR) {
+ printf("%s: rx late error\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+ if (why & QE_CR_STAT_RXPERR) {
+ printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+ if (why & QE_CR_STAT_RXSERR) {
+ printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
+ ifp->if_ierrors++;
+ r |= 1;
+ rst = 1;
+ }
+
+ if (r == 0)
+ printf("%s: unexpected interrupt error: %08x\n",
+ sc->sc_dev.dv_xname, why);
+
+ if (rst) {
+ printf("%s: resetting...\n", sc->sc_dev.dv_xname);
+ qereset(sc);
+ return -1;
+ }
+
+ return r;
+}
+
+int
+qeioctl(ifp, cmd, data)
+ struct ifnet *ifp;
+ u_long cmd;
+ caddr_t data;
+{
+ struct qesoftc *sc = ifp->if_softc;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int s, error = 0;
+
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+ case AF_INET:
+ qeinit(sc);
+ arp_ifinit(&sc->sc_arpcom, ifa);
+ break;
+#endif /* INET */
+#ifdef NS
+ /* XXX - This code is probably wrong. */
+ case AF_NS:
+ {
+ struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
+
+ if (ns_nullhost(*ina))
+ ina->x_host = *(union ns_host *)
+ (sc->sc_arpcom.ac_enaddr);
+ else
+ bcopy(ina->x_host.c_host,
+ sc->sc_arpcom.ac_enaddr,
+ sizeof(sc->sc_arpcom.ac_enaddr));
+ /* Set new address. */
+ qeinit(sc);
+ break;
+ }
+#endif /* NS */
+ default:
+ qeinit(sc);
+ break;
+ }
+ break;
+
+ case SIOCSIFFLAGS:
+ sc->sc_promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
+ if ((ifp->if_flags & IFF_UP) == 0 &&
+ (ifp->if_flags & IFF_RUNNING) != 0) {
+ /*
+ * If interface is marked down and it is running, then
+ * stop it.
+ */
+ qestop(sc);
+ ifp->if_flags &= ~IFF_RUNNING;
+ } else if ((ifp->if_flags & IFF_UP) != 0 &&
+ (ifp->if_flags & IFF_RUNNING) == 0) {
+ /*
+ * If interface is marked up and it is stopped, then
+ * start it.
+ */
+ qeinit(sc);
+ } else {
+ /*
+ * Reset the interface to pick up changes in any other
+ * flags that affect hardware registers.
+ */
+ qestop(sc);
+ qeinit(sc);
+ }
+#ifdef IEDEBUG
+ if (ifp->if_flags & IFF_DEBUG)
+ sc->sc_debug = IED_ALL;
+ else
+ sc->sc_debug = 0;
+#endif
+ break;
+
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ error = (cmd == SIOCADDMULTI) ?
+ ether_addmulti(ifr, &sc->sc_arpcom):
+ ether_delmulti(ifr, &sc->sc_arpcom);
+
+ if (error == ENETRESET) {
+ /*
+ * Multicast list has changed; set the hardware filter
+ * accordingly.
+ */
+ qe_mcreset(sc);
+ error = 0;
+ }
+ break;
+ default:
+ if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
+ splx(s);
+ return error;
+ }
+ error = EINVAL;
+ break;
+ }
+ splx(s);
+ return error;
+}
+
+void
+qeinit(sc)
+ struct qesoftc *sc;
+{
+ struct qe_mregs *mr = sc->sc_mr;
+ struct qe_cregs *cr = sc->sc_cr;
+ struct qecregs *qr = sc->sc_qr;
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ int s = splimp();
+ int i;
+
+ qec_reset(sc->sc_qec);
+
+ /*
+ * init QEC: 'qe' specific initializations
+ */
+ qr->msize = sc->sc_memsize / 4;
+ qr->rsize = sc->sc_memsize / 8;
+ qr->tsize = sc->sc_memsize / 8;
+ qr->psize = QEC_PSIZE_2048;
+ if (sc->sc_burst & SBUS_BURST_64)
+ i = QEC_CTRL_B64;
+ else if (sc->sc_burst & SBUS_BURST_32)
+ i = QEC_CTRL_B32;
+ else
+ i = QEC_CTRL_B16;
+ qr->ctrl = QEC_CTRL_MMODE | i;
+
+ /*
+ * Allocate descriptor ring and buffers, if not already done
+ */
+ if (sc->sc_desc == NULL)
+ sc->sc_desc_dva = (struct qe_desc *) dvma_malloc(
+ sizeof(struct qe_desc), &sc->sc_desc, M_NOWAIT);
+ if (sc->sc_bufs == NULL)
+ sc->sc_bufs_dva = (struct qe_bufs *) dvma_malloc(
+ sizeof(struct qe_bufs), &sc->sc_bufs, M_NOWAIT);
+
+ for (i = 0; i < QE_TX_RING_MAXSIZE; i++) {
+ sc->sc_desc->qe_txd[i].tx_addr =
+ (u_int32_t) &sc->sc_bufs_dva->tx_buf[i % QE_TX_RING_SIZE][0];
+ sc->sc_desc->qe_txd[i].tx_flags = 0;
+ }
+ for (i = 0; i < QE_RX_RING_MAXSIZE; i++) {
+ sc->sc_desc->qe_rxd[i].rx_addr =
+ (u_int32_t) &sc->sc_bufs_dva->rx_buf[i % QE_RX_RING_SIZE][0];
+ if ((i / QE_RX_RING_SIZE) == 0)
+ sc->sc_desc->qe_rxd[i].rx_flags =
+ QE_RXD_OWN | QE_RXD_LENGTH;
+ else
+ sc->sc_desc->qe_rxd[i].rx_flags = 0;
+ }
+
+ cr->rxds = (u_int32_t) &sc->sc_desc_dva->qe_rxd[0];
+ cr->txds = (u_int32_t) &sc->sc_desc_dva->qe_txd[0];
+
+ sc->sc_first_td = sc->sc_last_td = sc->sc_no_td = 0;
+ sc->sc_last_rd = 0;
+
+ qestop(sc);
+
+ cr->rimask = 0;
+ cr->timask = 0;
+ cr->qmask = 0;
+ cr->mmask = QE_CR_MMASK_RXCOLL;
+ cr->rxwbufptr = cr->rxrbufptr = sc->sc_channel * sc->sc_qr->msize;
+ cr->txwbufptr = cr->txrbufptr = cr->rxrbufptr + sc->sc_qr->rsize;
+ cr->ccnt = 0;
+ cr->pipg = 0;
+
+ mr->phycc = QE_MR_PHYCC_AUTO;
+ mr->xmtfc = QE_MR_XMTFC_APADXMT;
+ mr->rcvfc = 0;
+ mr->imr = QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM;
+ mr->biucc = QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS;
+ mr->fifofc = QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
+ QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU;
+ mr->plscc = QE_MR_PLSCC_TP;
+
+ mr->iac = QE_MR_IAC_ACHNGE | QE_MR_IAC_PARESET;
+ mr->padr = sc->sc_arpcom.ac_enaddr[0];
+ mr->padr = sc->sc_arpcom.ac_enaddr[1];
+ mr->padr = sc->sc_arpcom.ac_enaddr[2];
+ mr->padr = sc->sc_arpcom.ac_enaddr[3];
+ mr->padr = sc->sc_arpcom.ac_enaddr[4];
+ mr->padr = sc->sc_arpcom.ac_enaddr[5];
+
+ mr->iac = QE_MR_IAC_ACHNGE | QE_MR_IAC_LARESET;
+ for (i = 0; i < 8; i++)
+ mr->ladrf = 0;
+ mr->iac = 0;
+
+ delay(50000);
+ if ((mr->phycc & QE_MR_PHYCC_LSTAT) == QE_MR_PHYCC_LSTAT)
+ printf("%s: no carrier\n", sc->sc_dev.dv_xname);
+
+ i = mr->mpc; /* cleared on read */
+
+ mr->maccc = QE_MR_MACCC_TXENAB | QE_MR_MACCC_RXENAB;
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+ splx(s);
+}
+
+/*
+ * Routine to copy from mbuf chain to transmit buffer in
+ * network buffer memory.
+ */
+int
+qe_put(sc, idx, m)
+ struct qesoftc *sc;
+ int idx;
+ struct mbuf *m;
+{
+ struct mbuf *n;
+ int len, tlen = 0, boff = 0;
+
+ for (; m; m = n) {
+ len = m->m_len;
+ if (len == 0) {
+ MFREE(m, n);
+ continue;
+ }
+ bcopy(mtod(m, caddr_t),
+ &sc->sc_bufs->tx_buf[idx % QE_TX_RING_SIZE][boff], len);
+ boff += len;
+ tlen += len;
+ MFREE(m, n);
+ }
+ return tlen;
+}
+
+/*
+ * Pass a packet to the higher levels.
+ */
+void
+qe_read(sc, idx, len)
+ struct qesoftc *sc;
+ int idx, len;
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct ether_header *eh;
+ struct mbuf *m;
+
+ if (len <= sizeof(struct ether_header) ||
+ len > ETHERMTU + sizeof(struct ether_header)) {
+
+ printf("%s: invalid packet size %d; dropping\n",
+ ifp->if_xname, len);
+
+ ifp->if_ierrors++;
+ return;
+ }
+
+ /*
+ * Pull packet off interface.
+ */
+ m = qe_get(sc, idx, len);
+ if (m == NULL) {
+ ifp->if_ierrors++;
+ return;
+ }
+ ifp->if_ipackets++;
+
+ /* We assume that the header fit entirely in one mbuf. */
+ eh = mtod(m, struct ether_header *);
+
+#if NBPFILTER > 0
+ /*
+ * Check if there's a BPF listener on this interface.
+ * If so, hand off the raw packet to BPF.
+ */
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m);
+#endif
+ /* Pass the packet up, with the ether header sort-of removed. */
+ m_adj(m, sizeof(struct ether_header));
+ ether_input(ifp, eh, m);
+}
+
+/*
+ * Pull data off an interface.
+ * Len is the length of data, with local net header stripped.
+ * We copy the data into mbufs. When full cluster sized units are present,
+ * we copy into clusters.
+ */
+struct mbuf *
+qe_get(sc, idx, totlen)
+ struct qesoftc *sc;
+ int idx, totlen;
+{
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct mbuf *m;
+ struct mbuf *top, **mp;
+ int len, pad, boff = 0;
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL)
+ return (NULL);
+ m->m_pkthdr.rcvif = ifp;
+ m->m_pkthdr.len = totlen;
+ pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
+ m->m_data += pad;
+ len = MHLEN - pad;
+ top = NULL;
+ mp = &top;
+
+ while (totlen > 0) {
+ if (top) {
+ MGET(m, M_DONTWAIT, MT_DATA);
+ if (m == NULL) {
+ m_freem(top);
+ return NULL;
+ }
+ len = MLEN;
+ }
+ if (top && totlen >= MINCLSIZE) {
+ MCLGET(m, M_DONTWAIT);
+ if (m->m_flags & M_EXT)
+ len = MCLBYTES;
+ }
+ m->m_len = len = min(totlen, len);
+ bcopy(&sc->sc_bufs->rx_buf[idx % QE_RX_RING_SIZE][boff],
+ mtod(m, caddr_t), len);
+ boff += len;
+ totlen -= len;
+ *mp = m;
+ mp = &m->m_next;
+ }
+
+ return (top);
+}
+
+/*
+ * Reset multicast filter.
+ */
+void
+qe_mcreset(sc)
+ struct qesoftc *sc;
+{
+ struct arpcom *ac = &sc->sc_arpcom;
+ struct ifnet *ifp = &sc->sc_arpcom.ac_if;
+ struct qe_mregs *mr = sc->sc_mr;
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ u_int32_t crc;
+ u_int16_t hash[4];
+ u_int8_t octet, maccc = 0, *ladrp = (u_int8_t *)&hash[0];
+ int i, j;
+
+ if (ifp->if_flags & IFF_ALLMULTI) {
+ mr->iac = QE_MR_IAC_ACHNGE | QE_MR_IAC_LARESET;
+ for (i = 0; i < 8; i++)
+ mr->ladrf = 0xff;
+ mr->iac = 0;
+ }
+ else if (ifp->if_flags & IFF_PROMISC) {
+ maccc |= QE_MR_MACCC_PROMISC;
+ }
+ else {
+
+ hash[3] = hash[2] = hash[1] = hash[0] = 0;
+
+ ETHER_FIRST_MULTI(step, ac, enm);
+ while (enm != NULL) {
+ if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
+ ETHER_ADDR_LEN)) {
+ /*
+ * We must listen to a range of multicast
+ * addresses. For now, just accept all
+ * multicasts, rather than trying to set only
+ * those filter bits needed to match the range.
+ * (At this time, the only use of address
+ * ranges is for IP multicast routing, for
+ * which the range is big enough to require
+ * all bits set.)
+ */
+ mr->iac = QE_MR_IAC_ACHNGE | QE_MR_IAC_LARESET;
+ for (i = 0; i < 8; i++)
+ mr->ladrf = 0xff;
+ mr->iac = 0;
+ ifp->if_flags |= IFF_ALLMULTI;
+ break;
+ }
+
+ crc = 0xffffffff;
+
+ for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ octet = enm->enm_addrlo[i];
+
+ for (j = 0; j < 8; j++) {
+ if ((crc & 1) ^ (octet & 1)) {
+ crc >>= 1;
+ crc ^= MC_POLY_LE;
+ }
+ else
+ crc >>= 1;
+ octet >>= 1;
+ }
+ }
+
+ crc >>= 26;
+ hash[crc >> 4] |= 1 << (crc & 0xf);
+ ETHER_NEXT_MULTI(step, enm);
+ }
+
+ mr->iac = QE_MR_IAC_ACHNGE | QE_MR_IAC_LARESET;
+ for (i = 0; i < 8; i++)
+ mr->ladrf = ladrp[i];
+ mr->iac = 0;
+ }
+
+ mr->maccc = maccc | QE_MR_MACCC_TXENAB | QE_MR_MACCC_RXENAB;
+}
diff --git a/sys/arch/sparc/dev/qec.c b/sys/arch/sparc/dev/qec.c
index 8599a9183eb..0b5fd2de252 100644
--- a/sys/arch/sparc/dev/qec.c
+++ b/sys/arch/sparc/dev/qec.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: qec.c,v 1.5 1998/08/26 00:57:04 jason Exp $ */
+/* $OpenBSD: qec.c,v 1.6 1998/10/19 05:41:20 jason Exp $ */
/*
* Copyright (c) 1998 Theo de Raadt and Jason L. Wright.
@@ -98,6 +98,13 @@ qecattach(parent, self, aux)
ca->ca_ra.ra_reg[1].rr_len);
sc->sc_bufsiz = ca->ca_ra.ra_reg[1].rr_len;
+ /*
+ * On qec+qe, the qec has the interrupt priority, but we
+ * need to pass that down so that the qe's can handle them.
+ */
+ if (ca->ca_ra.ra_nintr == 1)
+ sc->sc_pri = ca->ca_ra.ra_intr[0].int_pri;
+
node = sc->sc_node = ca->ca_ra.ra_node;
qec_fix_range(sc, (struct sbus_softc *)parent);
@@ -109,6 +116,16 @@ qecattach(parent, self, aux)
if (sbusburst == 0)
sbusburst = SBUS_BURST_32 - 1; /* 1->16 */
+ sc->sc_nchannels = getpropint(ca->ca_ra.ra_node, "#channels", -1);
+ if (sc->sc_nchannels == -1) {
+ printf(": no channels\n");
+ return;
+ }
+ else if (sc->sc_nchannels < 1 || sc->sc_nchannels > 4) {
+ printf(": invalid number of channels: %d\n", sc->sc_nchannels);
+ return;
+ }
+
sc->sc_burst = getpropint(ca->ca_ra.ra_node, "burst-sizes", -1);
if (sc->sc_burst == -1)
/* take SBus burst sizes */
@@ -117,7 +134,9 @@ qecattach(parent, self, aux)
/* Clamp at parent's burst sizes */
sc->sc_burst &= sbusburst;
- printf(": %dK memory", sc->sc_bufsiz / 1024);
+ printf(": %dK memory %d %s",
+ sc->sc_bufsiz / 1024, sc->sc_nchannels,
+ (sc->sc_nchannels == 1) ? "channel" : "channels");
node = sc->sc_node = ca->ca_ra.ra_node;
diff --git a/sys/arch/sparc/dev/qecvar.h b/sys/arch/sparc/dev/qecvar.h
index f7a56440751..53a51cc5d40 100644
--- a/sys/arch/sparc/dev/qecvar.h
+++ b/sys/arch/sparc/dev/qecvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: qecvar.h,v 1.4 1998/08/26 00:57:07 jason Exp $ */
+/* $OpenBSD: qecvar.h,v 1.5 1998/10/19 05:41:20 jason Exp $ */
/*
* Copyright (c) 1998 Theo de Raadt and Jason L. Wright.
@@ -36,6 +36,8 @@ struct qec_softc {
caddr_t sc_buffer; /* VA of the buffer we provide */
int sc_bufsiz; /* Size of buffer */
int sc_nrange; /* number of ranges */
+ int sc_pri;
+ int sc_nchannels; /* number of channels on board */
struct rom_range *sc_range; /* array of ranges */
};
diff --git a/sys/arch/sparc/dev/qereg.h b/sys/arch/sparc/dev/qereg.h
new file mode 100644
index 00000000000..81eb88b590c
--- /dev/null
+++ b/sys/arch/sparc/dev/qereg.h
@@ -0,0 +1,321 @@
+/* $OpenBSD: qereg.h,v 1.1 1998/10/19 05:41:20 jason Exp $ */
+
+/*
+ * Copyright (c) 1998 Jason L. Wright.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * QE Channel registers
+ */
+struct qe_cregs {
+ volatile u_int32_t ctrl; /* control */
+ volatile u_int32_t stat; /* status */
+ volatile u_int32_t rxds; /* rx descriptor ring ptr */
+ volatile u_int32_t txds; /* tx descriptor ring ptr */
+ volatile u_int32_t rimask; /* rx interrupt mask */
+ volatile u_int32_t timask; /* tx interrupt mask */
+ volatile u_int32_t qmask; /* qec error interrupt mask */
+ volatile u_int32_t mmask; /* mace error interrupt mask */
+ volatile u_int32_t rxwbufptr; /* local memory rx write ptr */
+ volatile u_int32_t rxrbufptr; /* local memory rx read ptr */
+ volatile u_int32_t txwbufptr; /* local memory tx write ptr */
+ volatile u_int32_t txrbufptr; /* local memory tx read ptr */
+ volatile u_int32_t ccnt; /* collision counter */
+ volatile u_int32_t pipg; /* inter-frame gap */
+};
+
+/* qe_cregs.ctrl: control. */
+#define QE_CR_CTRL_RXOFF 0x00000004 /* disable receiver */
+#define QE_CR_CTRL_RESET 0x00000002 /* reset this channel */
+#define QE_CR_CTRL_TWAKEUP 0x00000001 /* tx dma wakeup */
+
+/* qe_cregs.stat: status. */
+#define QE_CR_STAT_EDEFER 0x10000000 /* excessive defers */
+#define QE_CR_STAT_CLOSS 0x08000000 /* loss of carrier */
+#define QE_CR_STAT_ERETRIES 0x04000000 /* >16 retries */
+#define QE_CR_STAT_LCOLL 0x02000000 /* late tx collision */
+#define QE_CR_STAT_FUFLOW 0x01000000 /* fifo underflow */
+#define QE_CR_STAT_JERROR 0x00800000 /* jabber error */
+#define QE_CR_STAT_BERROR 0x00400000 /* babble error */
+#define QE_CR_STAT_TXIRQ 0x00200000 /* tx interrupt */
+#define QE_CR_STAT_TCCOFLOW 0x00100000 /* tx collision cntr expired */
+#define QE_CR_STAT_TXDERROR 0x00080000 /* tx descriptor is bad */
+#define QE_CR_STAT_TXLERR 0x00040000 /* tx late error */
+#define QE_CR_STAT_TXPERR 0x00020000 /* tx parity error */
+#define QE_CR_STAT_TXSERR 0x00010000 /* tx sbus error ack */
+#define QE_CR_STAT_RCCOFLOW 0x00001000 /* rx collision cntr expired */
+#define QE_CR_STAT_RUOFLOW 0x00000800 /* rx runt counter expired */
+#define QE_CR_STAT_MCOFLOW 0x00000400 /* rx missed counter expired */
+#define QE_CR_STAT_RXFOFLOW 0x00000200 /* rx fifo over flow */
+#define QE_CR_STAT_RLCOLL 0x00000100 /* rx late collision */
+#define QE_CR_STAT_FCOFLOW 0x00000080 /* rx frame counter expired */
+#define QE_CR_STAT_CECOFLOW 0x00000040 /* rx crc error cntr expired */
+#define QE_CR_STAT_RXIRQ 0x00000020 /* rx interrupt */
+#define QE_CR_STAT_RXDROP 0x00000010 /* rx dropped packet */
+#define QE_CR_STAT_RXSMALL 0x00000008 /* rx buffer too small */
+#define QE_CR_STAT_RXLERR 0x00000004 /* rx late error */
+#define QE_CR_STAT_RXPERR 0x00000002 /* rx parity error */
+#define QE_CR_STAT_RXSERR 0x00000001 /* rx sbus error ack */
+
+/* qe_cregs.qmask: qec error interrupt mask. */
+#define QE_CR_QMASK_COFLOW 0x00100000 /* collision cntr overflow */
+#define QE_CR_QMASK_TXDERROR 0x00080000 /* tx descriptor error */
+#define QE_CR_QMASK_TXLERR 0x00040000 /* tx late error */
+#define QE_CR_QMASK_TXPERR 0x00020000 /* tx parity error */
+#define QE_CR_QMASK_TXSERR 0x00010000 /* tx sbus error ack */
+#define QE_CR_QMASK_RXDROP 0x00000010 /* rx packet dropped */
+#define QE_CR_QMASK_RXSMALL 0x00000008 /* rx buffer too small */
+#define QE_CR_QMASK_RXLERR 0x00000004 /* rx late error */
+#define QE_CR_QMASK_RXPERR 0x00000002 /* rx parity error */
+#define QE_CR_QMASK_RXSERR 0x00000001 /* rx sbus error ack */
+
+/* qe_cregs.mmask: MACE error interrupt mask. */
+#define QE_CR_MMASK_EDEFER 0x10000000 /* excess defer */
+#define QE_CR_MMASK_CLOSS 0x08000000 /* carrier loss */
+#define QE_CR_MMASK_ERETRY 0x04000000 /* excess retry */
+#define QE_CR_MMASK_LCOLL 0x02000000 /* late collision error */
+#define QE_CR_MMASK_UFLOW 0x01000000 /* underflow */
+#define QE_CR_MMASK_JABBER 0x00800000 /* jabber error */
+#define QE_CR_MMASK_BABBLE 0x00400000 /* babble error */
+#define QE_CR_MMASK_OFLOW 0x00000800 /* overflow */
+#define QE_CR_MMASK_RXCOLL 0x00000400 /* rx coll-cntr overflow */
+#define QE_CR_MMASK_RPKT 0x00000200 /* runt pkt overflow */
+#define QE_CR_MMASK_MPKT 0x00000100 /* missed pkt overflow */
+
+/* qe_cregs.pipg: inter-frame gap. */
+#define QE_CR_PIPG_TENAB 0x00000020 /* enable throttle */
+#define QE_CR_PIPG_MMODE 0x00000010 /* manual mode */
+#define QE_CR_PIPG_WMASK 0x0000000f /* sbus wait mask */
+
+/* MACE registers */
+struct qe_mregs {
+ volatile u_int8_t rcvfifo; /*0*/ /* receive fifo */
+ volatile u_int8_t xmtfifo; /*1*/ /* transmit fifo */
+ volatile u_int8_t xmtfc; /*2*/ /* transmit frame control */
+ volatile u_int8_t xmtfs; /*3*/ /* transmit frame status */
+ volatile u_int8_t xmtrc; /*4*/ /* tx retry count */
+ volatile u_int8_t rcvfc; /*5*/ /* receive frame control */
+ volatile u_int8_t rcvfs; /*6*/ /* receive frame status */
+ volatile u_int8_t fifofc; /*7*/ /* fifo frame count */
+ volatile u_int8_t ir; /*8*/ /* interrupt register */
+ volatile u_int8_t imr; /*9*/ /* interrupt mask register */
+ volatile u_int8_t pr; /*10*/ /* poll register */
+ volatile u_int8_t biucc; /*11*/ /* biu config control */
+ volatile u_int8_t fifocc; /*12*/ /* fifo config control */
+ volatile u_int8_t maccc; /*13*/ /* mac config control */
+ volatile u_int8_t plscc; /*14*/ /* pls config control */
+ volatile u_int8_t phycc; /*15*/ /* phy config control */
+ volatile u_int8_t chipid1; /*16*/ /* chipid, low byte */
+ volatile u_int8_t chipid2; /*17*/ /* chipid, high byte */
+ volatile u_int8_t iac; /*18*/ /* internal address config */
+ volatile u_int8_t _reserved0; /*19*/ /* reserved */
+ volatile u_int8_t ladrf; /*20*/ /* logical address filter */
+ volatile u_int8_t padr; /*21*/ /* physical address */
+ volatile u_int8_t _reserved1; /*22*/ /* reserved */
+ volatile u_int8_t _reserved2; /*23*/ /* reserved */
+ volatile u_int8_t mpc; /*24*/ /* missed packet count */
+ volatile u_int8_t _reserved3; /*25*/ /* reserved */
+ volatile u_int8_t rntpc; /*26*/ /* runt packet count */
+ volatile u_int8_t rcvcc; /*27*/ /* receive collision count */
+ volatile u_int8_t _reserved4; /*28*/ /* reserved */
+ volatile u_int8_t utr; /*29*/ /* user test register */
+ volatile u_int8_t rtr1; /*30*/ /* reserved test register 1 */
+ volatile u_int8_t rtr2; /*31*/ /* reserved test register 2 */
+};
+
+/* qe_mregs.xmtfc: transmit frame control. */
+#define QE_MR_XMTFC_DRETRY 0x80 /* disable retries */
+#define QE_MR_XMTFC_DXMTFCS 0x08 /* disable tx fcs */
+#define QE_MR_XMTFC_APADXMT 0x01 /* enable auto padding */
+
+/* qe_mregs.xmtfs: transmit frame status. */
+#define QE_MR_XMTFS_XMTSV 0x80 /* tx valid */
+#define QE_MR_XMTFS_UFLO 0x40 /* tx underflow */
+#define QE_MR_XMTFS_LCOL 0x20 /* tx late collision */
+#define QE_MR_XMTFS_MORE 0x10 /* tx > 1 retries */
+#define QE_MR_XMTFS_ONE 0x08 /* tx 1 retry */
+#define QE_MR_XMTFS_DEFER 0x04 /* tx pkt deferred */
+#define QE_MR_XMTFS_LCAR 0x02 /* tx carrier lost */
+#define QE_MR_XMTFS_RTRY 0x01 /* tx retry error */
+
+/* qe_mregs.xmtrc: transmit retry count. */
+#define QE_MR_XMTRC_EXDEF 0x80 /* tx excess defers */
+#define QE_MR_XMTRC_XMTRC 0x0f /* tx retry count mask */
+
+/* qe_mregs.rcvfc: receive frame control. */
+#define QE_MR_RCVFC_LLRCV 0x08 /* rx low latency */
+#define QE_MR_RCVFC_MR 0x04 /* rx addr match/reject */
+#define QE_MR_RCVFC_ASTRPRCV 0x01 /* rx auto strip */
+
+/* qe_mregs.rcvfs: receive frame status. */
+#define QE_MR_RCVFS_OFLO 0x80 /* rx overflow */
+#define QE_MR_RCVFS_CLSN 0x40 /* rx late collision */
+#define QE_MR_RCVFS_FRAM 0x20 /* rx framing error */
+#define QE_MR_RCVFS_FCS 0x10 /* rx fcs error */
+#define QE_MR_RCVFS_RCVCNT 0x0f /* rx msg byte count mask */
+
+/* qe_mregs.fifofc: fifo frame count. */
+#define QE_MR_FIFOFC_RCVFC 0xf0 /* rx fifo frame count */
+#define QE_MR_FIFOFC_XMTFC 0x0f /* tx fifo frame count */
+
+/* qe_mregs.ir: interrupt register. */
+#define QE_MR_IR_JAB 0x80 /* jabber error */
+#define QE_MR_IR_BABL 0x40 /* babble error */
+#define QE_MR_IR_CERR 0x20 /* collision error */
+#define QE_MR_IR_RCVCCO 0x10 /* collision cnt overflow */
+#define QE_MR_IR_RNTPCO 0x08 /* runt pkt cnt overflow */
+#define QE_MR_IR_MPCO 0x04 /* miss pkt cnt overflow */
+#define QE_MR_IR_RCVINT 0x02 /* packet received */
+#define QE_MR_IR_XMTINT 0x01 /* packet transmitted */
+
+/* qe_mregs.imr: interrupt mask register. */
+#define QE_MR_IMR_JABM 0x80 /* jabber errors */
+#define QE_MR_IMR_BABLM 0x40 /* babble errors */
+#define QE_MR_IMR_CERRM 0x20 /* collision errors */
+#define QE_MR_IMR_RCVCCOM 0x10 /* rx collision count oflow */
+#define QE_MR_IMR_RNTPCOM 0x08 /* runt pkt cnt ovrflw */
+#define QE_MR_IMR_MPCOM 0x04 /* miss pkt cnt ovrflw */
+#define QE_MR_IMR_RCVINTM 0x02 /* rx interrupts */
+#define QE_MR_IMR_XMTINTM 0x01 /* tx interrupts */
+
+/* qe_mregs.pr: poll register. */
+#define QE_MR_PR_XMTSV 0x80 /* tx status is valid */
+#define QE_MR_PR_TDTREQ 0x40 /* tx data xfer request */
+#define QE_MR_PR_RDTREQ 0x20 /* rx data xfer request */
+
+/* qe_mregs.biucc: biu config control. */
+#define QE_MR_BIUCC_BSWAP 0x40 /* byte swap */
+#define QE_MR_BIUCC_4TS 0x00 /* 4byte xmit start point */
+#define QE_MR_BIUCC_16TS 0x10 /* 16byte xmit start point */
+#define QE_MR_BIUCC_64TS 0x20 /* 64byte xmit start point */
+#define QE_MR_BIUCC_112TS 0x30 /* 112byte xmit start point */
+#define QE_MR_BIUCC_SWRST 0x01 /* sw-reset mace */
+
+/* qe_mregs.fifocc: fifo config control. */
+#define QE_MR_FIFOCC_TXF8 0x00 /* tx fifo 8 write cycles */
+#define QE_MR_FIFOCC_TXF32 0x80 /* tx fifo 32 write cycles */
+#define QE_MR_FIFOCC_TXF16 0x40 /* tx fifo 16 write cycles */
+#define QE_MR_FIFOCC_RXF64 0x20 /* rx fifo 64 write cycles */
+#define QE_MR_FIFOCC_RXF32 0x10 /* rx fifo 32 write cycles */
+#define QE_MR_FIFOCC_RXF16 0x00 /* rx fifo 16 write cycles */
+#define QE_MR_FIFOCC_TFWU 0x08 /* tx fifo watermark update */
+#define QE_MR_FIFOCC_RFWU 0x04 /* rx fifo watermark update */
+#define QE_MR_FIFOCC_TBENAB 0x02 /* tx burst enable */
+#define QE_MR_FIFOCC_RBENAB 0x01 /* rx burst enable */
+
+/* qe_mregs.maccc: mac config control. */
+#define QE_MR_MACCC_PROMISC 0x80 /* promiscuous mode enable */
+#define QE_MR_MACCC_TPDDISAB 0x40 /* tx 2part deferral enable */
+#define QE_MR_MACCC_MBAENAB 0x20 /* modified backoff enable */
+#define QE_MR_MACCC_RPADISAB 0x08 /* rx physical addr disable */
+#define QE_MR_MACCC_RBDISAB 0x04 /* rx broadcast disable */
+#define QE_MR_MACCC_TXENAB 0x02 /* enable transmitter */
+#define QE_MR_MACCC_RXENAB 0x01 /* enable receiver */
+
+/* qe_mregs.plscc: pls config control. */
+#define QE_MR_PLSCC_TXMS 0x08 /* tx mode select */
+#define QE_MR_PLSCC_GPSI 0x06 /* use gpsi connector */
+#define QE_MR_PLSCC_DAI 0x04 /* use dai connector */
+#define QE_MR_PLSCC_TP 0x02 /* use twistedpair connector */
+#define QE_MR_PLSCC_AUI 0x00 /* use aui connector */
+#define QE_MR_PLSCC_IOENAB 0x01 /* pls i/o enable */
+
+/* qe_mregs.phycc: phy config control. */
+#define QE_MR_PHYCC_LSTAT 0x80 /* link status */
+#define QE_MR_PHYCC_LTSTDIS 0x40 /* disable link test logic */
+#define QE_MR_PHYCC_RXPOLE 0x20 /* rx polarity */
+#define QE_MR_PHYCC_APCDISB 0x10 /* autopolaritycorrect disab */
+#define QE_MR_PHYCC_LTENAB 0x08 /* select low threshold */
+#define QE_MR_PHYCC_AUTO 0x04 /* connector port auto-sel */
+#define QE_MR_PHYCC_RWU 0x02 /* remote wakeup */
+#define QE_MR_PHYCC_AW 0x01 /* auto wakeup */
+
+/* qe_mregs.iac: internal address config. */
+#define QE_MR_IAC_ACHNGE 0x80 /* start address change */
+#define QE_MR_IAC_PARESET 0x04 /* physical address reset */
+#define QE_MR_IAC_LARESET 0x02 /* logical address reset */
+
+/* qe_mregs.utr: user test register. */
+#define QE_MR_UTR_RTRENAB 0x80 /* enable resv test register */
+#define QE_MR_UTR_RTRDISAB 0x40 /* disab resv test register */
+#define QE_MR_UTR_RPACCEPT 0x20 /* accept runt packets */
+#define QE_MR_UTR_FCOLL 0x10 /* force collision status */
+#define QE_MR_UTR_FCSENAB 0x08 /* enable fcs on rx */
+#define QE_MR_UTR_INTLOOPM 0x06 /* Internal loopback w/mandec */
+#define QE_MR_UTR_INTLOOP 0x04 /* Internal loopback */
+#define QE_MR_UTR_EXTLOOP 0x02 /* external loopback */
+#define QE_MR_UTR_NOLOOP 0x00 /* no loopback */
+
+/*
+ * QE receive descriptor
+ */
+struct qe_rxd {
+ volatile u_int32_t rx_flags; /* rx descriptor flags */
+ volatile u_int32_t rx_addr; /* rx buffer address */
+};
+
+#define QE_RXD_OWN 0x80000000 /* ownership: 1=hw, 0=sw */
+#define QE_RXD_UPDATE 0x10000000 /* being updated? */
+#define QE_RXD_LENGTH 0x000007ff /* packet length */
+
+/*
+ * QE transmit descriptor
+ */
+struct qe_txd {
+ volatile u_int32_t tx_flags; /* tx descriptor flags */
+ volatile u_int32_t tx_addr; /* tx buffer address */
+};
+
+#define QE_TXD_OWN 0x80000000 /* ownership: 1=hw, 0=sw */
+#define QE_TXD_SOP 0x40000000 /* start of packet marker */
+#define QE_TXD_EOP 0x20000000 /* end of packet marker */
+#define QE_TXD_UPDATE 0x10000000 /* being updated? */
+#define QE_TXD_LENGTH 0x000007ff /* packet length */
+
+/* Buffer and Ring sizes: fixed ring size */
+#define QE_TX_RING_MAXSIZE 256 /* maximum tx ring size */
+#define QE_RX_RING_MAXSIZE 256 /* maximum rx ring size */
+#define QE_TX_RING_SIZE 16
+#define QE_RX_RING_SIZE 16
+#define QE_PKT_BUF_SZ 2048
+
+/*
+ * QE descriptor rings
+ */
+struct qe_desc {
+ struct qe_rxd qe_rxd[QE_RX_RING_MAXSIZE];
+ struct qe_txd qe_txd[QE_TX_RING_MAXSIZE];
+};
+
+/*
+ * QE packet buffers
+ */
+struct qe_bufs {
+ char rx_buf[QE_RX_RING_SIZE][QE_PKT_BUF_SZ];
+ char tx_buf[QE_TX_RING_SIZE][QE_PKT_BUF_SZ];
+};
+
+#define MC_POLY_LE 0xedb88320 /* mcast crc, little endian */
diff --git a/sys/arch/sparc/dev/qevar.h b/sys/arch/sparc/dev/qevar.h
new file mode 100644
index 00000000000..035dab3894e
--- /dev/null
+++ b/sys/arch/sparc/dev/qevar.h
@@ -0,0 +1,54 @@
+/* $OpenBSD: qevar.h,v 1.1 1998/10/19 05:41:21 jason Exp $ */
+
+/*
+ * Copyright (c) 1998 Jason L. Wright.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+struct qesoftc {
+ struct device sc_dev;
+ struct sbusdev sc_sd; /* sbus device */
+ struct intrhand sc_ih; /* interrupt vectoring */
+ struct arpcom sc_arpcom; /* ethernet common */
+
+ struct qec_softc *sc_qec; /* QEC parent */
+ struct qecregs *sc_qr; /* QEC registers */
+ struct qe_mregs *sc_mr; /* MACE registers */
+ struct qe_cregs *sc_cr; /* Channel registers */
+
+ void *sc_mem;
+ int sc_memsize;
+ int sc_channel;
+ u_int sc_rev;
+
+ int sc_promisc;
+ int sc_burst;
+
+ struct qe_bufs *sc_bufs, *sc_bufs_dva;
+ struct qe_desc *sc_desc, *sc_desc_dva;
+
+ int sc_no_td, sc_first_td, sc_last_td;
+ int sc_last_rd;
+};