summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/dev/pci/files.pci8
-rw-r--r--sys/dev/pci/if_oce.c2695
-rw-r--r--sys/dev/pci/oce.c2178
-rw-r--r--sys/dev/pci/ocereg.h3409
-rw-r--r--sys/dev/pci/ocevar.h929
5 files changed, 9218 insertions, 1 deletions
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci
index 51acf641202..548f75b2af3 100644
--- a/sys/dev/pci/files.pci
+++ b/sys/dev/pci/files.pci
@@ -1,4 +1,4 @@
-# $OpenBSD: files.pci,v 1.282 2012/03/06 12:48:07 mikeb Exp $
+# $OpenBSD: files.pci,v 1.283 2012/08/02 17:35:52 mikeb Exp $
# $NetBSD: files.pci,v 1.20 1996/09/24 17:47:15 christos Exp $
#
# Config file and device description for machine-independent PCI code.
@@ -397,6 +397,12 @@ device myx: ether, ifnet, ifmedia
attach myx at pci
file dev/pci/if_myx.c myx
+# Emulex OneConnect 10Gb Ethernet
+device oce: ether, ifnet, ifmedia
+attach oce at pci
+file dev/pci/if_oce.c oce
+file dev/pci/oce.c oce
+
# DEC/Intel 21143 and "tulip" clone ethernet
attach dc at pci with dc_pci
file dev/pci/if_dc_pci.c dc_pci
diff --git a/sys/dev/pci/if_oce.c b/sys/dev/pci/if_oce.c
new file mode 100644
index 00000000000..2dd62038008
--- /dev/null
+++ b/sys/dev/pci/if_oce.c
@@ -0,0 +1,2695 @@
+/* $OpenBSD: if_oce.c,v 1.1 2012/08/02 17:35:52 mikeb Exp $ */
+
+/*
+ * Copyright (c) 2012 Mike Belopuhov
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "bpfilter.h"
+#include "vlan.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/socket.h>
+#include <sys/timeout.h>
+#include <sys/pool.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#if NVLAN > 0
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+#endif
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcidevs.h>
+
+#include <dev/pci/ocereg.h>
+#include <dev/pci/ocevar.h>
+
+int oce_probe(struct device *parent, void *match, void *aux);
+void oce_attach(struct device *parent, struct device *self, void *aux);
+void oce_attachhook(void *arg);
+int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
+void oce_init(void *xsc);
+void oce_stop(struct oce_softc *sc);
+void oce_iff(struct oce_softc *sc);
+
+int oce_intr(void *arg);
+int oce_alloc_intr(struct oce_softc *sc);
+
+void oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr);
+int oce_media_change(struct ifnet *ifp);
+void oce_update_link_status(struct oce_softc *sc);
+void oce_link_event(struct oce_softc *sc,
+ struct oce_async_cqe_link_state *acqe);
+
+int oce_get_buf(struct oce_rq *rq);
+int oce_alloc_rx_bufs(struct oce_rq *rq);
+void oce_refill_rx(void *arg);
+
+void oce_start(struct ifnet *ifp);
+int oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wq_index);
+void oce_txeof(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status);
+
+void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
+int oce_cqe_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe);
+int oce_cqe_portid_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe);
+void oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
+int oce_start_rx(struct oce_softc *sc);
+void oce_stop_rx(struct oce_softc *sc);
+void oce_free_posted_rxbuf(struct oce_rq *rq);
+
+int oce_attach_ifp(struct oce_softc *sc);
+int oce_vid_config(struct oce_softc *sc);
+void oce_mac_addr_set(struct oce_softc *sc);
+void oce_local_timer(void *arg);
+
+#if defined(INET6) || defined(INET)
+#ifdef OCE_LRO
+int oce_init_lro(struct oce_softc *sc);
+void oce_free_lro(struct oce_softc *sc);
+void oce_rx_flush_lro(struct oce_rq *rq);
+#endif
+#ifdef OCE_TSO
+struct mbuf * oce_tso_setup(struct oce_softc *sc, struct mbuf **mpp);
+#endif
+#endif
+
+void oce_mq_handler(void *arg);
+void oce_wq_handler(void *arg);
+void oce_rq_handler(void *arg);
+
+int oce_queue_init_all(struct oce_softc *sc);
+void oce_arm_eq(struct oce_softc *sc, int16_t qid, int npopped, uint32_t rearm,
+ uint32_t clearint);
+void oce_queue_release_all(struct oce_softc *sc);
+void oce_arm_cq(struct oce_softc *sc, int16_t qid, int npopped,
+ uint32_t rearm);
+void oce_drain_eq(struct oce_eq *eq);
+void oce_drain_mq_cq(void *arg);
+void oce_drain_rq_cq(struct oce_rq *rq);
+void oce_drain_wq_cq(struct oce_wq *wq);
+struct oce_wq *oce_wq_init(struct oce_softc *sc, uint32_t q_len,
+ uint32_t wq_type);
+int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
+void oce_wq_free(struct oce_wq *wq);
+void oce_wq_del(struct oce_wq *wq);
+struct oce_rq *oce_rq_init(struct oce_softc *sc, uint32_t q_len,
+ uint32_t frag_size, uint32_t mtu, uint32_t rss);
+int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
+void oce_rq_free(struct oce_rq *rq);
+void oce_rq_del(struct oce_rq *rq);
+struct oce_eq *oce_eq_create(struct oce_softc *sc, uint32_t q_len,
+ uint32_t item_size, uint32_t eq_delay);
+void oce_eq_del(struct oce_eq *eq);
+struct oce_mq *oce_mq_create(struct oce_softc *sc, struct oce_eq *eq,
+ uint32_t q_len);
+void oce_mq_free(struct oce_mq *mq);
+int oce_destroy_q(struct oce_softc *sc, struct oce_mbx *mbx, size_t req_size,
+ enum qtype qtype);
+struct oce_cq *oce_cq_create(struct oce_softc *sc, struct oce_eq *eq,
+ uint32_t q_len, uint32_t item_size, uint32_t is_eventable,
+ uint32_t nodelay, uint32_t ncoalesce);
+void oce_cq_del(struct oce_softc *sc, struct oce_cq *cq);
+
+struct cfdriver oce_cd = {
+ NULL, "oce", DV_IFNET
+};
+
+struct cfattach oce_ca = {
+ sizeof(struct oce_softc), oce_probe, oce_attach, NULL, NULL
+};
+
+const struct pci_matchid oce_devices[] = {
+ { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE2 },
+ { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_BE3 },
+ { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE2 },
+ { PCI_VENDOR_SERVERENGINES, PCI_PRODUCT_SERVERENGINES_OCBE3 },
+ { PCI_VENDOR_EMULEX, PCI_PRODUCT_EMULEX_XE201 },
+};
+
+int
+oce_probe(struct device *parent, void *match, void *aux)
+{
+ return (pci_matchbyid(aux, oce_devices, nitems(oce_devices)));
+}
+
+void
+oce_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct pci_attach_args *pa = (struct pci_attach_args *)aux;
+ struct oce_softc *sc = (struct oce_softc *)self;
+ int rc = 0;
+ uint16_t devid;
+
+ devid = PCI_PRODUCT(pa->pa_id);
+ switch (devid) {
+ case PCI_PRODUCT_SERVERENGINES_BE2:
+ case PCI_PRODUCT_SERVERENGINES_OCBE2:
+ sc->flags |= OCE_FLAGS_BE2;
+ break;
+ case PCI_PRODUCT_SERVERENGINES_BE3:
+ case PCI_PRODUCT_SERVERENGINES_OCBE3:
+ sc->flags |= OCE_FLAGS_BE3;
+ break;
+ case PCI_PRODUCT_EMULEX_XE201:
+ sc->flags |= OCE_FLAGS_XE201;
+ break;
+ }
+
+ sc->pa = *pa;
+ if (oce_hw_pci_alloc(sc))
+ return;
+
+ sc->rss_enable = OCE_MODCAP_RSS;
+ sc->tx_ring_size = OCE_TX_RING_SIZE;
+ sc->rx_ring_size = OCE_RX_RING_SIZE;
+ sc->rq_frag_size = OCE_RQ_BUF_SIZE;
+ sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
+ sc->promisc = OCE_DEFAULT_PROMISCUOUS;
+
+ /* initialise the hardware */
+ rc = oce_hw_init(sc);
+ if (rc)
+ return;
+
+ sc->nrqs = 1;
+ sc->nwqs = 1;
+ sc->rss_enable = 0;
+ sc->intr_count = 1;
+
+ rc = oce_alloc_intr(sc);
+ if (rc)
+ goto dma_free;
+
+ rc = oce_queue_init_all(sc);
+ if (rc)
+ goto dma_free;
+
+ bcopy(sc->macaddr.mac_addr, sc->arpcom.ac_enaddr, ETH_ADDR_LEN);
+
+ rc = oce_attach_ifp(sc);
+ if (rc)
+ goto queues_free;
+
+#ifdef OCE_LRO
+ rc = oce_init_lro(sc);
+ if (rc)
+ goto ifp_free;
+#endif
+
+ rc = oce_stats_init(sc);
+ if (rc)
+ goto stats_free;
+
+ timeout_set(&sc->timer, oce_local_timer, sc);
+ timeout_set(&sc->rxrefill, oce_refill_rx, sc);
+
+ mountroothook_establish(oce_attachhook, sc);
+
+ printf(", address %s\n", ether_sprintf(sc->macaddr.mac_addr));
+
+ return;
+
+stats_free:
+ oce_stats_free(sc);
+ oce_hw_intr_disable(sc);
+#ifdef OCE_LRO
+lro_free:
+ oce_free_lro(sc);
+ifp_free:
+#endif
+ ether_ifdetach(&sc->arpcom.ac_if);
+ if_detach(&sc->arpcom.ac_if);
+queues_free:
+ oce_queue_release_all(sc);
+dma_free:
+ oce_dma_free(sc, &sc->bsmbx);
+}
+
+void
+oce_attachhook(void *arg)
+{
+ struct oce_softc *sc = arg;
+
+ if (oce_get_link_status(sc))
+ goto error;
+
+ oce_arm_cq(sc->mq->parent, sc->mq->cq->cq_id, 0, TRUE);
+
+ /*
+ * We need to get MCC async events. So enable intrs and arm
+ * first EQ, Other EQs will be armed after interface is UP
+ */
+ oce_hw_intr_enable(sc);
+ oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
+
+ /* Send first mcc cmd and after that we get gracious
+ MCC notifications from FW
+ */
+ oce_first_mcc_cmd(sc);
+
+ return;
+
+ error:
+ timeout_del(&sc->rxrefill);
+ timeout_del(&sc->timer);
+ oce_stats_free(sc);
+ oce_hw_intr_disable(sc);
+ ether_ifdetach(&sc->arpcom.ac_if);
+ if_detach(&sc->arpcom.ac_if);
+ oce_queue_release_all(sc);
+ oce_dma_free(sc, &sc->bsmbx);
+}
+
+int
+oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct oce_softc *sc = ifp->if_softc;
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int s, error = 0;
+
+ s = splnet();
+
+ switch (command) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_flags & IFF_RUNNING))
+ oce_init(sc);
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ arp_ifinit(&sc->arpcom, ifa);
+#endif
+ break;
+
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu > OCE_MAX_MTU)
+ error = EINVAL;
+ else if (ifp->if_mtu != ifr->ifr_mtu) {
+ ifp->if_mtu = ifr->ifr_mtu;
+ oce_init(sc);
+ }
+ break;
+
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_flags & IFF_RUNNING)
+ error = ENETRESET;
+ else
+ oce_init(sc);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ oce_stop(sc);
+ }
+ break;
+
+ default:
+ error = ether_ioctl(ifp, &sc->arpcom, command, data);
+ break;
+ }
+
+ if (error == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ oce_iff(sc);
+ error = 0;
+ }
+
+ splx(s);
+
+ return error;
+}
+
+void
+oce_iff(struct oce_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+
+ ifp->if_flags &= ~IFF_ALLMULTI;
+
+ if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
+ sc->promisc = TRUE;
+ oce_rxf_set_promiscuous(sc, sc->promisc);
+ } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
+ sc->promisc = FALSE;
+ oce_rxf_set_promiscuous(sc, sc->promisc);
+ }
+ if (oce_hw_update_multicast(sc))
+ printf("%s: Update multicast address failed\n",
+ sc->dev.dv_xname);
+}
+
+int
+oce_intr(void *arg)
+{
+ struct oce_softc *sc = arg;
+ struct oce_eq *eq = sc->eq[0];
+ struct oce_eqe *eqe;
+ struct oce_cq *cq = NULL;
+ int i, claimed = 0, num_eqes = 0;
+
+ oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTWRITE);
+
+ do {
+ eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
+ if (eqe->evnt == 0)
+ break;
+ eqe->evnt = 0;
+ oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ RING_GET(eq->ring, 1);
+ num_eqes++;
+
+ } while (TRUE);
+
+ if (!num_eqes)
+ goto eq_arm; /* Spurious */
+
+ claimed = 1;
+
+ /* Clear EQ entries, but dont arm */
+ oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, TRUE);
+
+ /* Process TX, RX and MCC. But dont arm CQ */
+ for (i = 0; i < eq->cq_valid; i++) {
+ cq = eq->cq[i];
+ (*cq->cq_handler)(cq->cb_arg);
+ }
+
+ /* Arm all cqs connected to this EQ */
+ for (i = 0; i < eq->cq_valid; i++) {
+ cq = eq->cq[i];
+ oce_arm_cq(sc, cq->cq_id, 0, TRUE);
+ }
+
+eq_arm:
+ oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
+ return (claimed);
+}
+
+int
+oce_alloc_intr(struct oce_softc *sc)
+{
+ const char *intrstr = NULL;
+ struct pci_attach_args *pa = &sc->pa;
+ pci_intr_handle_t ih;
+
+ /* We allocate a single interrupt resource */
+ if (pci_intr_map_msi(pa, &ih) != 0 &&
+ pci_intr_map(pa, &ih) != 0) {
+ printf(": couldn't map interrupt\n");
+ return (ENXIO);
+ }
+
+ intrstr = pci_intr_string(pa->pa_pc, ih);
+ if (pci_intr_establish(pa->pa_pc, ih, IPL_NET, oce_intr, sc,
+ sc->dev.dv_xname) == NULL) {
+ printf(": couldn't establish interrupt");
+ if (intrstr != NULL)
+ printf(" at %s", intrstr);
+ printf("\n");
+ return (ENXIO);
+ }
+ printf(": %s", intrstr);
+
+ return (0);
+}
+
+void
+oce_update_link_status(struct oce_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ int speed = 0;
+
+ if (sc->link_status) {
+ if (sc->link_active == 0) {
+ switch (sc->link_speed) {
+ case 1: /* 10 Mbps */
+ speed = 10;
+ break;
+ case 2: /* 100 Mbps */
+ speed = 100;
+ break;
+ case 3: /* 1 Gbps */
+ speed = 1000;
+ break;
+ case 4: /* 10 Gbps */
+ speed = 10000;
+ break;
+ }
+ sc->link_active = 1;
+ ifp->if_baudrate = speed * 1000000ULL;
+ }
+ if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
+ ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
+ if_link_state_change(ifp);
+ }
+ } else {
+ if (sc->link_active == 1) {
+ ifp->if_baudrate = 0;
+ sc->link_active = 0;
+ }
+ if (ifp->if_link_state != LINK_STATE_DOWN) {
+ ifp->if_link_state = LINK_STATE_DOWN;
+ if_link_state_change(ifp);
+ }
+ }
+}
+
+void
+oce_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct oce_softc *sc = (struct oce_softc *)ifp->if_softc;
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (oce_get_link_status(sc) == 0)
+ oce_update_link_status(sc);
+
+ if (!sc->link_status) {
+ ifmr->ifm_active |= IFM_NONE;
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ switch (sc->link_speed) {
+ case 1: /* 10 Mbps */
+ ifmr->ifm_active |= IFM_10_T | IFM_FDX;
+ break;
+ case 2: /* 100 Mbps */
+ ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
+ break;
+ case 3: /* 1 Gbps */
+ ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+ break;
+ case 4: /* 10 Gbps */
+ ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
+ break;
+ }
+
+ if (sc->flow_control & OCE_FC_TX)
+ ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
+ if (sc->flow_control & OCE_FC_RX)
+ ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
+}
+
+int
+oce_media_change(struct ifnet *ifp)
+{
+ return 0;
+}
+
+int
+oce_encap(struct oce_softc *sc, struct mbuf **mpp, int wq_index)
+{
+ int rc = 0, i, retry_cnt = 0;
+ bus_dma_segment_t *segs;
+ struct mbuf *m;
+ struct oce_wq *wq = sc->wq[wq_index];
+ struct oce_packet_desc *pd;
+ uint32_t out;
+ struct oce_nic_hdr_wqe *nichdr;
+ struct oce_nic_frag_wqe *nicfrag;
+ int num_wqes;
+ uint32_t reg_value;
+
+ m = *mpp;
+ if (!m)
+ return EINVAL;
+
+ if (!(m->m_flags & M_PKTHDR)) {
+ rc = ENXIO;
+ goto free_ret;
+ }
+
+#ifdef OCE_TSO
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ /* consolidate packet buffers for TSO/LSO segment offload */
+#if defined(INET6) || defined(INET)
+ m = oce_tso_setup(sc, mpp);
+#else
+ m = NULL;
+#endif
+ if (m == NULL) {
+ rc = ENXIO;
+ goto free_ret;
+ }
+ }
+#endif
+
+ out = wq->packets_out + 1;
+ if (out == OCE_WQ_PACKET_ARRAY_SIZE)
+ out = 0;
+ if (out == wq->packets_in)
+ return EBUSY;
+
+ pd = &wq->pckts[wq->packets_out];
+retry:
+ rc = bus_dmamap_load_mbuf(wq->tag, pd->map, m, BUS_DMA_NOWAIT);
+ if (rc == EFBIG) {
+ if (retry_cnt == 0) {
+ if (m_defrag(m, M_DONTWAIT) != 0)
+ goto free_ret;
+ *mpp = m;
+ retry_cnt = retry_cnt + 1;
+ goto retry;
+ } else
+ goto free_ret;
+ } else if (rc == ENOMEM) {
+ printf("%s: failed to load mbuf: out of memory",
+ sc->dev.dv_xname);
+ return rc;
+ } else if (rc) {
+ printf("%s: failed to load mbuf: %d", sc->dev.dv_xname, rc);
+ goto free_ret;
+ }
+
+ segs = pd->map->dm_segs;
+ pd->nsegs = pd->map->dm_nsegs;
+
+ num_wqes = pd->nsegs + 1;
+ if (IS_BE(sc)) {
+ /*Dummy required only for BE3.*/
+ if (num_wqes & 1)
+ num_wqes++;
+ }
+ if (num_wqes >= RING_NUM_FREE(wq->ring)) {
+ bus_dmamap_unload(wq->tag, pd->map);
+ return EBUSY;
+ }
+
+ oce_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
+ pd->mbuf = m;
+ wq->packets_out = out;
+
+ nichdr = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
+ nichdr->u0.dw[0] = 0;
+ nichdr->u0.dw[1] = 0;
+ nichdr->u0.dw[2] = 0;
+ nichdr->u0.dw[3] = 0;
+
+ nichdr->u0.s.complete = 1;
+ nichdr->u0.s.event = 1;
+ nichdr->u0.s.crc = 1;
+ nichdr->u0.s.forward = 0;
+ nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) ? 1 : 0;
+ nichdr->u0.s.udpcs = (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) ? 1 : 0;
+ nichdr->u0.s.tcpcs = (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) ? 1 : 0;
+ nichdr->u0.s.num_wqe = num_wqes;
+ nichdr->u0.s.total_length = m->m_pkthdr.len;
+
+#if NVLAN > 0
+ if (m->m_flags & M_VLANTAG) {
+ nichdr->u0.s.vlan = 1; /* Vlan present */
+ nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
+ }
+#endif
+
+#ifdef OCE_TSO
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ if (m->m_pkthdr.tso_segsz) {
+ nichdr->u0.s.lso = 1;
+ nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
+ }
+ if (!IS_BE(sc))
+ nichdr->u0.s.ipcs = 1;
+ }
+#endif
+
+ RING_PUT(wq->ring, 1);
+ wq->ring->num_used++;
+
+ for (i = 0; i < pd->nsegs; i++) {
+ nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring,
+ struct oce_nic_frag_wqe);
+ nicfrag->u0.s.rsvd0 = 0;
+ nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
+ nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
+ nicfrag->u0.s.frag_len = segs[i].ds_len;
+ pd->wqe_idx = wq->ring->pidx;
+ RING_PUT(wq->ring, 1);
+ wq->ring->num_used++;
+ }
+ if (num_wqes > (pd->nsegs + 1)) {
+ nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring,
+ struct oce_nic_frag_wqe);
+ nicfrag->u0.dw[0] = 0;
+ nicfrag->u0.dw[1] = 0;
+ nicfrag->u0.dw[2] = 0;
+ nicfrag->u0.dw[3] = 0;
+ pd->wqe_idx = wq->ring->pidx;
+ RING_PUT(wq->ring, 1);
+ wq->ring->num_used++;
+ pd->nsegs++;
+ }
+
+ sc->arpcom.ac_if.if_opackets++;
+ wq->tx_stats.tx_reqs++;
+ wq->tx_stats.tx_wrbs += num_wqes;
+ wq->tx_stats.tx_bytes += m->m_pkthdr.len;
+ wq->tx_stats.tx_pkts++;
+
+ oce_dma_sync(&wq->ring->dma, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
+ reg_value = (num_wqes << 16) | wq->wq_id;
+ OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
+
+ return 0;
+
+free_ret:
+ m_freem(*mpp);
+ *mpp = NULL;
+ return rc;
+}
+
+void
+oce_txeof(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
+{
+ struct oce_softc *sc = (struct oce_softc *) wq->parent;
+ struct oce_packet_desc *pd;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct mbuf *m;
+ uint32_t in;
+
+ if (wq->packets_out == wq->packets_in)
+ printf("%s: WQ transmit descriptor missing\n");
+
+ in = wq->packets_in + 1;
+ if (in == OCE_WQ_PACKET_ARRAY_SIZE)
+ in = 0;
+
+ pd = &wq->pckts[wq->packets_in];
+ wq->packets_in = in;
+ wq->ring->num_used -= (pd->nsegs + 1);
+ oce_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(wq->tag, pd->map);
+
+ m = pd->mbuf;
+ m_freem(m);
+ pd->mbuf = NULL;
+
+ if (ifp->if_flags & IFF_OACTIVE) {
+ if (wq->ring->num_used < (wq->ring->num_items / 2)) {
+ ifp->if_flags &= ~(IFF_OACTIVE);
+ oce_start(ifp);
+ }
+ }
+}
+
+#if OCE_TSO
+#if defined(INET6) || defined(INET)
+struct mbuf *
+oce_tso_setup(struct oce_softc *sc, struct mbuf **mpp)
+{
+ struct mbuf *m;
+#ifdef INET
+ struct ip *ip;
+#endif
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+ struct ether_vlan_header *eh;
+ struct tcphdr *th;
+ uint16_t etype;
+ int total_len = 0, ehdrlen = 0;
+
+ m = *mpp;
+
+ if (M_WRITABLE(m) == 0) {
+ m = m_dup(*mpp, M_DONTWAIT);
+ if (!m)
+ return NULL;
+ m_freem(*mpp);
+ *mpp = m;
+ }
+
+ eh = mtod(m, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ etype = ntohs(eh->evl_proto);
+ ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ etype = ntohs(eh->evl_encap_proto);
+ ehdrlen = ETHER_HDR_LEN;
+ }
+
+ switch (etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(m->m_data + ehdrlen);
+ if (ip->ip_p != IPPROTO_TCP)
+ return NULL;
+ th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+
+ total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
+ if (ip6->ip6_nxt != IPPROTO_TCP)
+ return NULL;
+ th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
+
+ total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
+ break;
+#endif
+ default:
+ return NULL;
+ }
+
+ m = m_pullup(m, total_len);
+ if (!m)
+ return NULL;
+ *mpp = m;
+ return m;
+
+}
+#endif /* INET6 || INET */
+#endif
+
+void
+oce_start(struct ifnet *ifp)
+{
+ struct oce_softc *sc = ifp->if_softc;
+ struct mbuf *m;
+ int rc = 0;
+ int def_q = 0; /* Defualt tx queue is 0 */
+
+ if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
+ return;
+
+ do {
+ IFQ_POLL(&ifp->if_snd, m);
+ if (m == NULL)
+ break;
+
+ rc = oce_encap(sc, &m, def_q);
+ if (rc) {
+ if (m != NULL) {
+ sc->wq[def_q]->tx_stats.tx_stops++;
+ ifp->if_flags |= IFF_OACTIVE;
+ }
+ break;
+ }
+
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+ } while (TRUE);
+
+ return;
+}
+
+/* Handle the Completion Queue for transmit */
+void
+oce_wq_handler(void *arg)
+{
+ struct oce_wq *wq = (struct oce_wq *)arg;
+ struct oce_softc *sc = wq->parent;
+ struct oce_cq *cq = wq->cq;
+ struct oce_nic_tx_cqe *cqe;
+ int num_cqes = 0;
+
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
+ while (cqe->u0.dw[3]) {
+ DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
+
+ wq->ring->cidx = cqe->u0.s.wqe_index + 1;
+ if (wq->ring->cidx >= wq->ring->num_items)
+ wq->ring->cidx -= wq->ring->num_items;
+
+ oce_txeof(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
+ wq->tx_stats.tx_compl++;
+ cqe->u0.dw[3] = 0;
+ RING_GET(cq->ring, 1);
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ cqe =
+ RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
+ num_cqes++;
+ }
+
+ if (num_cqes)
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+}
+
+void
+oce_rxeof(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
+{
+ struct oce_softc *sc = (struct oce_softc *)rq->parent;
+ struct oce_packet_desc *pd;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct mbuf *m = NULL, *tail = NULL;
+ int i, len, frag_len;
+ uint32_t out;
+ uint16_t vtag;
+
+ len = cqe->u0.s.pkt_size;
+ if (!len) {
+ /*partial DMA workaround for Lancer*/
+ oce_discard_rx_comp(rq, cqe);
+ goto exit;
+ }
+
+ /* Get vlan_tag value */
+ if(IS_BE(sc))
+ vtag = BSWAP_16(cqe->u0.s.vlan_tag);
+ else
+ vtag = cqe->u0.s.vlan_tag;
+
+ for (i = 0; i < cqe->u0.s.num_fragments; i++) {
+ if (rq->packets_out == rq->packets_in) {
+ printf("%s: RQ transmit descriptor missing\n",
+ sc->dev.dv_xname);
+ }
+ out = rq->packets_out + 1;
+ if (out == OCE_RQ_PACKET_ARRAY_SIZE)
+ out = 0;
+ pd = &rq->pckts[rq->packets_out];
+ rq->packets_out = out;
+
+ oce_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ rq->pending--;
+
+ frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
+ pd->mbuf->m_len = frag_len;
+
+ if (tail != NULL) {
+ /* additional fragments */
+ pd->mbuf->m_flags &= ~M_PKTHDR;
+ tail->m_next = pd->mbuf;
+ tail = pd->mbuf;
+ } else {
+ /* first fragment, fill out much of the packet header */
+ pd->mbuf->m_pkthdr.len = len;
+ pd->mbuf->m_pkthdr.csum_flags = 0;
+ if (IF_CSUM_ENABLED(ifp)) {
+ if (cqe->u0.s.ip_cksum_pass) {
+ if (!cqe->u0.s.ip_ver) { /* IPV4 */
+ pd->mbuf->m_pkthdr.csum_flags =
+ M_IPV4_CSUM_IN_OK;
+ }
+ }
+ if (cqe->u0.s.l4_cksum_pass) {
+ pd->mbuf->m_pkthdr.csum_flags |=
+ M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
+ }
+ }
+ m = tail = pd->mbuf;
+ }
+ pd->mbuf = NULL;
+ len -= frag_len;
+ }
+
+ if (m) {
+ if (!oce_cqe_portid_valid(sc, cqe)) {
+ m_freem(m);
+ goto exit;
+ }
+
+ m->m_pkthdr.rcvif = ifp;
+
+#if NVLAN > 0
+ /* This deternies if vlan tag is Valid */
+ if (oce_cqe_vtp_valid(sc, cqe)) {
+ if (sc->function_mode & FNM_FLEX10_MODE) {
+ /* FLEX10. If QnQ is not set, neglect VLAN */
+ if (cqe->u0.s.qnq) {
+ m->m_pkthdr.ether_vtag = vtag;
+ m->m_flags |= M_VLANTAG;
+ }
+ } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
+ /* In UMC mode generally pvid will be striped by
+ hw. But in some cases we have seen it comes
+ with pvid. So if pvid == vlan, neglect vlan.
+ */
+ m->m_pkthdr.ether_vtag = vtag;
+ m->m_flags |= M_VLANTAG;
+ }
+ }
+#endif
+
+ ifp->if_ipackets++;
+
+#ifdef OCE_LRO
+#if defined(INET6) || defined(INET)
+ /* Try to queue to LRO */
+ if (IF_LRO_ENABLED(sc) &&
+ !(m->m_flags & M_VLANTAG) &&
+ (cqe->u0.s.ip_cksum_pass) &&
+ (cqe->u0.s.l4_cksum_pass) &&
+ (!cqe->u0.s.ip_ver) &&
+ (rq->lro.lro_cnt != 0)) {
+
+ if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
+ rq->lro_pkts_queued ++;
+ goto post_done;
+ }
+ /* If LRO posting fails then try to post to STACK */
+ }
+#endif
+#endif /* OCE_LRO */
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap_ether(ifp->if_bpf, m,
+ BPF_DIRECTION_IN);
+#endif
+
+ ether_input_mbuf(ifp, m);
+
+#ifdef OCE_LRO
+#if defined(INET6) || defined(INET)
+post_done:
+#endif
+#endif
+ /* Update rx stats per queue */
+ rq->rx_stats.rx_pkts++;
+ rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
+ rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
+ if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
+ rq->rx_stats.rx_mcast_pkts++;
+ if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
+ rq->rx_stats.rx_ucast_pkts++;
+ }
+exit:
+ return;
+}
+
+void
+oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
+{
+ uint32_t out, i = 0;
+ struct oce_packet_desc *pd;
+ struct oce_softc *sc = (struct oce_softc *) rq->parent;
+ int num_frags = cqe->u0.s.num_fragments;
+
+ if (IS_XE201(sc) && cqe->u0.s.error) {
+ /* Lancer A0 workaround
+ * num_frags will be 1 more than actual in case of error
+ */
+ if (num_frags)
+ num_frags -= 1;
+ }
+ for (i = 0; i < num_frags; i++) {
+ if (rq->packets_out == rq->packets_in) {
+ printf("%s: RQ transmit descriptor missing\n",
+ sc->dev.dv_xname);
+ }
+ out = rq->packets_out + 1;
+ if (out == OCE_RQ_PACKET_ARRAY_SIZE)
+ out = 0;
+ pd = &rq->pckts[rq->packets_out];
+ rq->packets_out = out;
+
+ oce_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ rq->pending--;
+ m_freem(pd->mbuf);
+ }
+}
+
+int
+oce_cqe_vtp_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
+{
+ struct oce_nic_rx_cqe_v1 *cqe_v1;
+ int vtp = 0;
+
+ if (sc->be3_native) {
+ cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
+ vtp = cqe_v1->u0.s.vlan_tag_present;
+ } else
+ vtp = cqe->u0.s.vlan_tag_present;
+
+ return vtp;
+
+}
+
+int
+oce_cqe_portid_valid(struct oce_softc *sc, struct oce_nic_rx_cqe *cqe)
+{
+ struct oce_nic_rx_cqe_v1 *cqe_v1;
+ int port_id = 0;
+
+ if (sc->be3_native && IS_BE(sc)) {
+ cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
+ port_id = cqe_v1->u0.s.port;
+ if (sc->port_id != port_id)
+ return 0;
+ } else
+ ;/* For BE3 legacy and Lancer this is dummy */
+
+ return 1;
+
+}
+
+#ifdef OCE_LRO
+#if defined(INET6) || defined(INET)
+void
+oce_rx_flush_lro(struct oce_rq *rq)
+{
+ struct lro_ctrl *lro = &rq->lro;
+ struct lro_entry *queued;
+ struct oce_softc *sc = (struct oce_softc *) rq->parent;
+
+ if (!IF_LRO_ENABLED(sc))
+ return;
+
+ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&lro->lro_active, next);
+ tcp_lro_flush(lro, queued);
+ }
+ rq->lro_pkts_queued = 0;
+
+ return;
+}
+
+int
+oce_init_lro(struct oce_softc *sc)
+{
+ struct lro_ctrl *lro = NULL;
+ int i = 0, rc = 0;
+
+ for (i = 0; i < sc->nrqs; i++) {
+ lro = &sc->rq[i]->lro;
+ rc = tcp_lro_init(lro);
+ if (rc != 0) {
+ printf("%s: LRO init failed\n");
+ return rc;
+ }
+ lro->ifp = &sc->arpcom.ac_if;
+ }
+
+ return rc;
+}
+
+void
+oce_free_lro(struct oce_softc *sc)
+{
+ struct lro_ctrl *lro = NULL;
+ int i = 0;
+
+ for (i = 0; i < sc->nrqs; i++) {
+ lro = &sc->rq[i]->lro;
+ if (lro)
+ tcp_lro_free(lro);
+ }
+}
+#endif /* INET6 || INET */
+#endif /* OCE_LRO */
+
+int
+oce_get_buf(struct oce_rq *rq)
+{
+ struct oce_softc *sc = (struct oce_softc *)rq->parent;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct oce_packet_desc *pd;
+ struct oce_nic_rqe *rqe;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ int in, rc;
+
+ in = rq->packets_in + 1;
+ if (in == OCE_RQ_PACKET_ARRAY_SIZE)
+ in = 0;
+ if (in == rq->packets_out)
+ return 0; /* no more room */
+
+ pd = &rq->pckts[rq->packets_in];
+
+ pd->mbuf = MCLGETI(NULL, M_DONTWAIT, ifp, MCLBYTES);
+ if (pd->mbuf == NULL)
+ return 0;
+
+ pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
+
+ rc = bus_dmamap_load_mbuf(rq->tag, pd->map, pd->mbuf,
+ BUS_DMA_NOWAIT);
+ if (rc) {
+ printf("%s: failed to load an mbuf", sc->dev.dv_xname);
+ m_freem(pd->mbuf);
+ return 0;
+ }
+ segs = pd->map->dm_segs;
+ nsegs = pd->map->dm_nsegs;
+ if (nsegs != 1) {
+ printf("%s: too many DMA segments", sc->dev.dv_xname);
+ return 0;
+ }
+
+ rq->packets_in = in;
+ oce_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
+
+ rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
+ rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
+ rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
+ DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
+ RING_PUT(rq->ring, 1);
+ rq->pending++;
+ return 1;
+}
+
+int
+oce_alloc_rx_bufs(struct oce_rq *rq)
+{
+ struct oce_softc *sc = (struct oce_softc *)rq->parent;
+ pd_rxulp_db_t rxdb_reg;
+ int i, nbufs = 0;
+
+ while (oce_get_buf(rq))
+ nbufs++;
+ if (!nbufs)
+ return 0;
+ for (i = nbufs / OCE_MAX_RQ_POSTS; i > 0; i--) {
+ DELAY(1);
+ bzero(&rxdb_reg, sizeof(rxdb_reg));
+ rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
+ rxdb_reg.bits.qid = rq->rq_id;
+ OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ nbufs -= OCE_MAX_RQ_POSTS;
+ }
+ if (nbufs > 0) {
+ DELAY(1);
+ bzero(&rxdb_reg, sizeof(rxdb_reg));
+ rxdb_reg.bits.qid = rq->rq_id;
+ rxdb_reg.bits.num_posted = nbufs;
+ OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ }
+ return 1;
+}
+
+void
+oce_refill_rx(void *arg)
+{
+ struct oce_softc *sc = arg;
+ struct oce_rq *rq;
+ int i, s;
+
+ s = splnet();
+ for_all_rq_queues(sc, rq, i) {
+ if (!oce_alloc_rx_bufs(rq))
+ ; /* timeout_add(&sc->rxrefill, 10); */
+ }
+ splx(s);
+}
+
+#ifdef OCE_DEBUG
+void oce_inspect_rxring(struct oce_softc *sc, struct oce_ring *ring);
+
+void
+oce_inspect_rxring(struct oce_softc *sc, struct oce_ring *ring)
+{
+ struct oce_nic_rx_cqe *cqe;
+ int i;
+
+ printf("%s: cidx %d pidx %d used %d from %d\n", sc->dev.dv_xname,
+ ring->cidx, ring->pidx, ring->num_used, ring->num_items);
+
+ for (i = 0; i < ring->num_items; i++) {
+ cqe = OCE_DMAPTR(&ring->dma, struct oce_nic_rx_cqe) + i;
+ if (cqe->u0.dw[0] || cqe->u0.dw[1] || cqe->u0.dw[2])
+ printf("%s: cqe %d dw0=%#x dw1=%#x dw2=%#x\n", sc->dev.dv_xname,
+ i, cqe->u0.dw[0], cqe->u0.dw[1], cqe->u0.dw[2]);
+ }
+}
+#endif
+
+/* Handle the Completion Queue for receive */
+void
+oce_rq_handler(void *arg)
+{
+ struct oce_rq *rq = (struct oce_rq *)arg;
+ struct oce_cq *cq = rq->cq;
+ struct oce_softc *sc = rq->parent;
+ struct oce_nic_rx_cqe *cqe;
+ int num_cqes = 0, rq_buffers_used = 0;
+
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+
+#ifdef OCE_DEBUG
+ oce_inspect_rxring(sc, cq->ring);
+#endif
+
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+
+#ifdef OCE_DEBUG
+ printf("%s: %s %x %x %x\n", sc->dev.dv_xname, __func__,
+ cqe->u0.dw[0], cqe->u0.dw[1], cqe->u0.dw[2]);
+#endif
+
+ while (cqe->u0.dw[2]) {
+ DW_SWAP((uint32_t *)cqe, sizeof(oce_rq_cqe));
+
+ RING_GET(rq->ring, 1);
+ if (cqe->u0.s.error == 0) {
+ oce_rxeof(rq, cqe);
+ } else {
+ rq->rx_stats.rxcp_err++;
+ sc->arpcom.ac_if.if_ierrors++;
+ if (IS_XE201(sc))
+ /* Lancer A0 no buffer workaround */
+ oce_discard_rx_comp(rq, cqe);
+ else
+ /* Post L3/L4 errors to stack.*/
+ oce_rxeof(rq, cqe);
+ }
+ rq->rx_stats.rx_compl++;
+ cqe->u0.dw[2] = 0;
+
+#ifdef OCE_LRO
+#if defined(INET6) || defined(INET)
+ if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
+ oce_rx_flush_lro(rq);
+ }
+#endif
+#endif
+
+ RING_GET(cq->ring, 1);
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ cqe =
+ RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ num_cqes++;
+ if (num_cqes >= (IS_XE201(sc) ? 8 : OCE_MAX_RSP_HANDLED))
+ break;
+ }
+
+#ifdef OCE_LRO
+#if defined(INET6) || defined(INET)
+ if (IF_LRO_ENABLED(sc))
+ oce_rx_flush_lro(rq);
+#endif
+#endif
+
+ if (num_cqes) {
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+ rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
+ if (rq_buffers_used > 1 && !oce_alloc_rx_bufs(rq))
+ timeout_add(&sc->rxrefill, 1);
+ }
+}
+
+int
+oce_attach_ifp(struct oce_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+
+ ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+ strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = oce_ioctl;
+ ifp->if_start = oce_start;
+ ifp->if_hardmtu = OCE_MAX_MTU;
+ ifp->if_softc = sc;
+ IFQ_SET_MAXLEN(&ifp->if_snd, sc->tx_ring_size - 1);
+ IFQ_SET_READY(&ifp->if_snd);
+
+ /* oce splits jumbos into 2k chunks... */
+ m_clsetwms(ifp, MCLBYTES, 8, sc->rx_ring_size);
+
+ ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+#if NVLAN > 0
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
+#endif
+
+#if defined(INET6) || defined(INET)
+#ifdef OCE_TSO
+ ifp->if_capabilities |= IFCAP_TSO;
+ ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
+#endif
+#ifdef OCE_LRO
+ ifp->if_capabilities |= IFCAP_LRO;
+#endif
+#endif
+
+ ifp->if_baudrate = IF_Gbps(10UL);
+
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ return 0;
+}
+
+void
+oce_mac_addr_set(struct oce_softc *sc)
+{
+ uint32_t old_pmac_id = sc->pmac_id;
+ int status = 0;
+
+ if (!bcmp(sc->arpcom.ac_enaddr, sc->macaddr.mac_addr, ETH_ADDR_LEN))
+ return;
+
+ status = oce_mbox_macaddr_add(sc, sc->arpcom.ac_enaddr, sc->if_id,
+ &sc->pmac_id);
+ if (!status) {
+ status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
+ bcopy(sc->arpcom.ac_enaddr, sc->macaddr.mac_addr,
+ sc->macaddr.size_of_struct);
+ } else
+ printf("%s: Failed to update MAC address\n", sc->dev.dv_xname);
+}
+
+void
+oce_local_timer(void *arg)
+{
+ struct oce_softc *sc = arg;
+ int s;
+
+ s = splnet();
+
+ oce_refresh_nic_stats(sc);
+ oce_refresh_queue_stats(sc);
+
+#if 0
+ /* TX Watchdog */
+ oce_start(ifp);
+#endif
+
+ splx(s);
+
+ timeout_add_sec(&sc->timer, 1);
+}
+
+void
+oce_stop(struct oce_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct oce_rq *rq;
+ struct oce_wq *wq;
+ struct oce_eq *eq;
+ int i;
+
+ timeout_del(&sc->timer);
+
+ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+
+ /* Stop intrs and finish any bottom halves pending */
+ oce_hw_intr_disable(sc);
+
+ oce_stop_rx(sc);
+
+ /* Invalidate any pending cq and eq entries */
+ for_all_eq_queues(sc, eq, i)
+ oce_drain_eq(eq);
+ for_all_rq_queues(sc, rq, i)
+ oce_drain_rq_cq(rq);
+ for_all_wq_queues(sc, wq, i)
+ oce_drain_wq_cq(wq);
+
+ DELAY(10);
+}
+
+void
+oce_init(void *arg)
+{
+ struct oce_softc *sc = arg;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct oce_eq *eq;
+ struct oce_rq *rq;
+ struct oce_wq *wq;
+ int i;
+
+ splassert(IPL_NET);
+
+ oce_stop(sc);
+
+ oce_mac_addr_set(sc);
+
+ oce_iff(sc);
+
+ if (oce_start_rx(sc)) {
+ printf("%s: failed to create rq\n", sc->dev.dv_xname);
+ goto error;
+ }
+
+ for_all_rq_queues(sc, rq, i) {
+ if (!oce_alloc_rx_bufs(rq)) {
+ printf("%s: failed to allocate rx buffers\n",
+ sc->dev.dv_xname);
+ goto error;
+ }
+ }
+
+ DELAY(10);
+
+ /* RSS config */
+ if (sc->rss_enable) {
+ if (oce_config_nic_rss(sc, (uint8_t)sc->if_id, RSS_ENABLE)) {
+ printf("%s: failed to configure RSS\n",
+ sc->dev.dv_xname);
+ goto error;
+ }
+ }
+
+ for_all_rq_queues(sc, rq, i)
+ oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
+
+ for_all_wq_queues(sc, wq, i)
+ oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
+
+ oce_arm_cq(sc->mq->parent, sc->mq->cq->cq_id, 0, TRUE);
+
+ for_all_eq_queues(sc, eq, i)
+ oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
+
+ if (oce_get_link_status(sc) == 0)
+ oce_update_link_status(sc);
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifp->if_flags &= ~IFF_OACTIVE;
+
+ timeout_add_sec(&sc->timer, 1);
+
+ oce_hw_intr_enable(sc);
+
+ return;
+error:
+ oce_stop(sc);
+}
+
+void
+oce_link_event(struct oce_softc *sc, struct oce_async_cqe_link_state *acqe)
+{
+ /* Update Link status */
+ if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
+ ASYNC_EVENT_LINK_UP)
+ sc->link_status = ASYNC_EVENT_LINK_UP;
+ else
+ sc->link_status = ASYNC_EVENT_LINK_DOWN;
+
+ /* Update speed */
+ sc->link_speed = acqe->u0.s.speed;
+ sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
+
+ oce_update_link_status(sc);
+}
+
+/* Handle the Completion Queue for the Mailbox/Async notifications */
+void
+oce_mq_handler(void *arg)
+{
+ struct oce_mq *mq = (struct oce_mq *)arg;
+ struct oce_softc *sc = mq->parent;
+ struct oce_cq *cq = mq->cq;
+ struct oce_mq_cqe *cqe;
+ struct oce_async_cqe_link_state *acqe;
+ struct oce_async_event_grp5_pvid_state *gcqe;
+ int evt_type, optype, num_cqes = 0;
+
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
+ while (cqe->u0.dw[3]) {
+ DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
+ if (cqe->u0.s.async_event) {
+ evt_type = cqe->u0.s.event_type;
+ optype = cqe->u0.s.async_type;
+ if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) {
+ /* Link status evt */
+ acqe = (struct oce_async_cqe_link_state *)cqe;
+ oce_link_event(sc, acqe);
+ } else if ((evt_type == ASYNC_EVENT_GRP5) &&
+ (optype == ASYNC_EVENT_PVID_STATE)) {
+ /* GRP5 PVID */
+ gcqe =
+ (struct oce_async_event_grp5_pvid_state *)cqe;
+ if (gcqe->enabled)
+ sc->pvid = gcqe->tag & VLAN_VID_MASK;
+ else
+ sc->pvid = 0;
+ }
+ }
+ cqe->u0.dw[3] = 0;
+ RING_GET(cq->ring, 1);
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
+ num_cqes++;
+ }
+
+ if (num_cqes)
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE /* TRUE */);
+}
+
+/**
+ * @brief Create and initialize all the queues on the board
+ * @param sc software handle to the device
+ * @returns 0 if successful, or error
+ **/
+int
+oce_queue_init_all(struct oce_softc *sc)
+{
+ struct oce_wq *wq;
+ struct oce_rq *rq;
+ int i, rc = 0;
+
+ /* alloc TX/RX queues */
+ for_all_wq_queues(sc, wq, i) {
+ sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
+ NIC_WQ_TYPE_STANDARD);
+ if (!sc->wq[i])
+ goto error;
+ }
+
+ for_all_rq_queues(sc, rq, i) {
+ sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
+ OCE_MAX_JUMBO_FRAME_SIZE, (i == 0) ? 0 : sc->rss_enable);
+ if (!sc->rq[i])
+ goto error;
+ }
+
+ /* Create network interface on card */
+ if (oce_create_nw_interface(sc))
+ goto error;
+
+ /* create all of the event queues */
+ for (i = 0; i < sc->intr_count; i++) {
+ sc->eq[i] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4, 80);
+ if (!sc->eq[i])
+ goto error;
+ }
+
+ /* create Tx, Rx and mcc queues */
+ for_all_wq_queues(sc, wq, i) {
+ rc = oce_wq_create(wq, sc->eq[i]);
+ if (rc)
+ goto error;
+ wq->queue_index = i;
+ }
+
+ for_all_rq_queues(sc, rq, i) {
+ rc = oce_rq_create(rq, sc->if_id, sc->eq[i == 0 ? 0 : i - 1]);
+ if (rc)
+ goto error;
+ rq->queue_index = i;
+ }
+
+ sc->mq = oce_mq_create(sc, sc->eq[0], 128);
+ if (!sc->mq)
+ goto error;
+
+ return rc;
+
+error:
+ oce_queue_release_all(sc);
+ return 1;
+}
+
+/**
+ * @brief Releases all mailbox queues created
+ * @param sc software handle to the device
+ */
+void
+oce_queue_release_all(struct oce_softc *sc)
+{
+ int i = 0;
+ struct oce_wq *wq;
+ struct oce_rq *rq;
+ struct oce_eq *eq;
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq) {
+ oce_rq_del(sc->rq[i]);
+ oce_rq_free(sc->rq[i]);
+ }
+ }
+
+ for_all_wq_queues(sc, wq, i) {
+ if (wq) {
+ oce_wq_del(sc->wq[i]);
+ oce_wq_free(sc->wq[i]);
+ }
+ }
+
+ if (sc->mq)
+ oce_mq_free(sc->mq);
+
+ for_all_eq_queues(sc, eq, i) {
+ if (eq)
+ oce_eq_del(sc->eq[i]);
+ }
+}
+
+/**
+ * @brief Function to create a WQ for NIC Tx
+ * @param sc software handle to the device
+ * @param qlen number of entries in the queue
+ * @param wq_type work queue type
+ * @returns the pointer to the WQ created or NULL on failure
+ */
+struct oce_wq *
+oce_wq_init(struct oce_softc *sc, uint32_t q_len, uint32_t wq_type)
+{
+ struct oce_wq *wq;
+ int rc = 0, i;
+
+ /* q_len must be min 256 and max 2k */
+ if (q_len < 256 || q_len > 2048) {
+ printf("%s: Invalid q length. Must be [256, 2000]: 0x%x\n",
+ sc->dev.dv_xname, q_len);
+ return NULL;
+ }
+
+ wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!wq)
+ return NULL;
+
+ wq->cfg.q_len = q_len;
+ wq->cfg.wq_type = (uint8_t) wq_type;
+ wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
+ wq->cfg.nbufs = 2 * wq->cfg.q_len;
+ wq->cfg.nhdl = 2 * wq->cfg.q_len;
+
+ wq->parent = (void *)sc;
+ wq->tag = sc->pa.pa_dmat;
+
+ for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
+ rc = bus_dmamap_create(wq->tag, OCE_MAX_TX_SIZE,
+ OCE_MAX_TX_ELEMENTS, PAGE_SIZE, 0, BUS_DMA_NOWAIT,
+ &wq->pckts[i].map);
+ if (rc)
+ goto free_wq;
+ }
+
+ wq->ring = oce_create_ring(sc, q_len, NIC_WQE_SIZE, 8);
+ if (!wq->ring)
+ goto free_wq;
+
+ return wq;
+
+free_wq:
+ printf("%s: Create WQ failed\n", sc->dev.dv_xname);
+ oce_wq_free(wq);
+ return NULL;
+}
+
+/**
+ * @brief Frees the work queue
+ * @param wq pointer to work queue to free
+ */
+void
+oce_wq_free(struct oce_wq *wq)
+{
+ struct oce_softc *sc = (struct oce_softc *) wq->parent;
+ int i;
+
+ if (wq->ring != NULL) {
+ oce_destroy_ring(sc, wq->ring);
+ wq->ring = NULL;
+ }
+
+ for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
+ if (wq->pckts[i].map != NULL) {
+ bus_dmamap_unload(wq->tag, wq->pckts[i].map);
+ bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
+ wq->pckts[i].map = NULL;
+ }
+ }
+
+ wq->tag = NULL;
+
+ free(wq, M_DEVBUF);
+}
+
+/**
+ * @brief Create a work queue
+ * @param wq pointer to work queue
+ * @param eq pointer to associated event queue
+ */
+int
+oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
+{
+ struct oce_softc *sc = wq->parent;
+ struct oce_cq *cq;
+ int rc = 0;
+
+ cq = oce_cq_create(sc, eq, CQ_LEN_512, sizeof(struct oce_nic_tx_cqe),
+ 1, 0, 3);
+ if (!cq)
+ return ENXIO;
+
+ wq->cq = cq;
+
+ rc = oce_mbox_create_wq(wq);
+ if (rc)
+ goto error;
+
+ wq->qstate = QCREATED;
+ wq->wq_free = wq->cfg.q_len;
+ wq->ring->cidx = 0;
+ wq->ring->pidx = 0;
+
+ eq->cq[eq->cq_valid] = cq;
+ eq->cq_valid++;
+ cq->cb_arg = wq;
+ cq->cq_handler = oce_wq_handler;
+
+ return 0;
+error:
+ printf("%s: failed to create wq\n", sc->dev.dv_xname);
+ oce_wq_del(wq);
+ return rc;
+}
+
+/**
+ * @brief Delete a work queue
+ * @param wq pointer to work queue
+ */
+void
+oce_wq_del(struct oce_wq *wq)
+{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_wq *fwcmd;
+ struct oce_softc *sc = (struct oce_softc *) wq->parent;
+
+ if (wq->qstate == QCREATED) {
+ bzero(&mbx, sizeof(struct oce_mbx));
+ /* now fill the command */
+ fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
+ fwcmd->params.req.wq_id = wq->wq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
+ wq->qstate = QDELETED;
+ }
+
+ if (wq->cq != NULL) {
+ oce_cq_del(sc, wq->cq);
+ wq->cq = NULL;
+ }
+}
+
+/**
+ * @brief function to allocate receive queue resources
+ * @param sc software handle to the device
+ * @param q_len length of receive queue
+ * @param frag_size size of an receive queue fragment
+ * @param mtu maximum transmission unit
+ * @param rss is-rss-queue flag
+ * @returns the pointer to the RQ created or NULL on failure
+ */
+struct oce_rq *
+oce_rq_init(struct oce_softc *sc, uint32_t q_len, uint32_t frag_size,
+ uint32_t mtu, uint32_t rss)
+{
+ struct oce_rq *rq;
+ int rc = 0, i;
+
+ if (OCE_LOG2(frag_size) <= 0)
+ return NULL;
+
+ /* Hardware doesn't support any other value */
+ if (q_len != 1024)
+ return NULL;
+
+ rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!rq)
+ return NULL;
+
+ rq->cfg.q_len = q_len;
+ rq->cfg.frag_size = frag_size;
+ rq->cfg.mtu = mtu;
+ rq->cfg.is_rss_queue = rss;
+
+ rq->parent = (void *)sc;
+ rq->tag = sc->pa.pa_dmat;
+
+ for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
+ rc = bus_dmamap_create(rq->tag, frag_size, 1, frag_size, 0,
+ BUS_DMA_NOWAIT, &rq->pckts[i].map);
+ if (rc)
+ goto free_rq;
+ }
+
+ rq->ring = oce_create_ring(sc, q_len, sizeof(struct oce_nic_rqe), 2);
+ if (!rq->ring)
+ goto free_rq;
+
+ return rq;
+
+free_rq:
+ printf("%s: failed to create rq\n", sc->dev.dv_xname);
+ oce_rq_free(rq);
+ return NULL;
+}
+
+/**
+ * @brief Free a receive queue
+ * @param rq pointer to receive queue
+ */
+void
+oce_rq_free(struct oce_rq *rq)
+{
+ struct oce_softc *sc = (struct oce_softc *) rq->parent;
+ int i = 0 ;
+
+ if (rq->ring != NULL) {
+ oce_destroy_ring(sc, rq->ring);
+ rq->ring = NULL;
+ }
+ for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
+ if (rq->pckts[i].map != NULL) {
+ bus_dmamap_unload(rq->tag, rq->pckts[i].map);
+ bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
+ rq->pckts[i].map = NULL;
+ }
+ if (rq->pckts[i].mbuf) {
+ m_freem(rq->pckts[i].mbuf);
+ rq->pckts[i].mbuf = NULL;
+ }
+ }
+
+ rq->tag = NULL;
+
+ free(rq, M_DEVBUF);
+}
+
+/**
+ * @brief Create a receive queue
+ * @param rq receive queue
+ * @param if_id interface identifier index
+ * @param eq pointer to event queue
+ */
+int
+oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
+{
+ struct oce_softc *sc = rq->parent;
+ struct oce_cq *cq;
+
+ cq = oce_cq_create(sc, eq, CQ_LEN_1024, sizeof(struct oce_nic_rx_cqe),
+ 1, 0, 3);
+ if (!cq)
+ return ENXIO;
+
+ rq->cq = cq;
+ rq->cfg.if_id = if_id;
+
+ eq->cq[eq->cq_valid] = cq;
+ eq->cq_valid++;
+ cq->cb_arg = rq;
+ cq->cq_handler = oce_rq_handler;
+
+ /* RX queue is created in oce_init */
+
+ return 0;
+}
+
+/**
+ * @brief Delete a receive queue
+ * @param rq receive queue
+ */
+void
+oce_rq_del(struct oce_rq *rq)
+{
+ struct oce_softc *sc = (struct oce_softc *) rq->parent;
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_rq *fwcmd;
+
+ if (rq->qstate == QCREATED) {
+ bzero(&mbx, sizeof(mbx));
+
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
+ rq->qstate = QDELETED;
+ }
+
+ if (rq->cq != NULL) {
+ oce_cq_del(sc, rq->cq);
+ rq->cq = NULL;
+ }
+}
+
+/**
+ * @brief function to create an event queue
+ * @param sc software handle to the device
+ * @param q_len length of event queue
+ * @param item_size size of an event queue item
+ * @param eq_delay event queue delay
+ * @retval eq success, pointer to event queue
+ * @retval NULL failure
+ */
+struct oce_eq *
+oce_eq_create(struct oce_softc *sc, uint32_t q_len, uint32_t item_size,
+ uint32_t eq_delay)
+{
+ struct oce_eq *eq;
+ int rc = 0;
+
+ /* allocate an eq */
+ eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (eq == NULL)
+ return NULL;
+
+ eq->parent = sc;
+ eq->eq_id = 0xffff;
+
+ eq->ring = oce_create_ring(sc, q_len, item_size, 8);
+ if (!eq->ring)
+ goto free_eq;
+
+ eq->eq_cfg.q_len = q_len;
+ eq->eq_cfg.item_size = item_size;
+ eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
+
+ rc = oce_mbox_create_eq(eq);
+ if (rc)
+ goto free_eq;
+
+ return eq;
+free_eq:
+ printf("%s: failed to create eq\n", __func__);
+ oce_eq_del(eq);
+ return NULL;
+}
+
+/**
+ * @brief Function to delete an event queue
+ * @param eq pointer to an event queue
+ */
+void
+oce_eq_del(struct oce_eq *eq)
+{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_eq *fwcmd;
+ struct oce_softc *sc = (struct oce_softc *) eq->parent;
+
+ if (eq->eq_id != 0xffff) {
+ bzero(&mbx, sizeof(mbx));
+ fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
+ fwcmd->params.req.id = eq->eq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
+ }
+
+ if (eq->ring != NULL) {
+ oce_destroy_ring(sc, eq->ring);
+ eq->ring = NULL;
+ }
+
+ free(eq, M_DEVBUF);
+
+}
+
+/**
+ * @brief Function to create an MQ
+ * @param sc software handle to the device
+ * @param eq the EQ to associate with the MQ for event notification
+ * @param q_len the number of entries to create in the MQ
+ * @returns pointer to the created MQ, failure otherwise
+ */
+struct oce_mq *
+oce_mq_create(struct oce_softc *sc, struct oce_eq *eq, uint32_t q_len)
+{
+ struct oce_mq *mq = NULL;
+ int rc = 0;
+ struct oce_cq *cq;
+
+ /* allocate the mq */
+ mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!mq)
+ return NULL;
+
+ cq = oce_cq_create(sc, eq, CQ_LEN_256, sizeof(struct oce_mq_cqe),
+ 1, 0, 0);
+ if (!cq) {
+ free(mq, M_DEVBUF);
+ return NULL;
+ }
+
+ mq->parent = sc;
+ mq->cq = cq;
+
+ mq->ring = oce_create_ring(sc, q_len, sizeof(struct oce_mbx), 8);
+ if (!mq->ring)
+ goto error;
+
+ mq->cfg.q_len = (uint8_t)q_len;
+
+ rc = oce_mbox_create_mq(mq);
+ if (rc)
+ goto error;
+
+ eq->cq[eq->cq_valid] = cq;
+ eq->cq_valid++;
+ mq->cq->eq = eq;
+ mq->qstate = QCREATED;
+ mq->cq->cb_arg = mq;
+ mq->cq->cq_handler = oce_mq_handler;
+
+ return mq;
+error:
+ printf("%s: failed to create mq\n", sc->dev.dv_xname);
+ oce_mq_free(mq);
+ mq = NULL;
+ return mq;
+}
+
+/**
+ * @brief Function to free a mailbox queue
+ * @param mq pointer to a mailbox queue
+ */
+void
+oce_mq_free(struct oce_mq *mq)
+{
+ struct oce_softc *sc = (struct oce_softc *) mq->parent;
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_mq *fwcmd;
+
+ if (!mq)
+ return;
+
+ if (mq->ring != NULL) {
+ oce_destroy_ring(sc, mq->ring);
+ mq->ring = NULL;
+ if (mq->qstate == QCREATED) {
+ bzero(&mbx, sizeof (struct oce_mbx));
+ fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
+ fwcmd->params.req.id = mq->mq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof (struct mbx_destroy_common_mq),
+ QTYPE_MQ);
+ }
+ mq->qstate = QDELETED;
+ }
+
+ if (mq->cq != NULL) {
+ oce_cq_del(sc, mq->cq);
+ mq->cq = NULL;
+ }
+
+ free(mq, M_DEVBUF);
+ mq = NULL;
+}
+
+/**
+ * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
+ * @param sc sofware handle to the device
+ * @param mbx mailbox command to send to the fw to delete the queue
+ * (mbx contains the queue information to delete)
+ * @param req_size the size of the mbx payload dependent on the qtype
+ * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
+ * @returns 0 on success, failure otherwise
+ */
+int
+oce_destroy_q(struct oce_softc *sc, struct oce_mbx *mbx, size_t req_size,
+ enum qtype qtype)
+{
+ struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
+ int opcode;
+ int subsys;
+ int rc = 0;
+
+ switch (qtype) {
+ case QTYPE_EQ:
+ opcode = OPCODE_COMMON_DESTROY_EQ;
+ subsys = MBX_SUBSYSTEM_COMMON;
+ break;
+ case QTYPE_CQ:
+ opcode = OPCODE_COMMON_DESTROY_CQ;
+ subsys = MBX_SUBSYSTEM_COMMON;
+ break;
+ case QTYPE_MQ:
+ opcode = OPCODE_COMMON_DESTROY_MQ;
+ subsys = MBX_SUBSYSTEM_COMMON;
+ break;
+ case QTYPE_WQ:
+ opcode = OPCODE_NIC_DELETE_WQ;
+ subsys = MBX_SUBSYSTEM_NIC;
+ break;
+ case QTYPE_RQ:
+ opcode = OPCODE_NIC_DELETE_RQ;
+ subsys = MBX_SUBSYSTEM_NIC;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ mbx_common_req_hdr_init(hdr, 0, 0, subsys,
+ opcode, MBX_TIMEOUT_SEC, req_size,
+ OCE_MBX_VER_V0);
+
+ mbx->u0.s.embedded = 1;
+ mbx->payload_length = (uint32_t) req_size;
+ DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, mbx, NULL);
+
+ if (rc != 0)
+ printf("%s: Failed to del q\n", sc->dev.dv_xname);
+
+ return rc;
+}
+
+/**
+ * @brief Function to create a completion queue
+ * @param sc software handle to the device
+ * @param eq optional eq to be associated with to the cq
+ * @param q_len length of completion queue
+ * @param item_size size of completion queue items
+ * @param is_eventable event table
+ * @param nodelay no delay flag
+ * @param ncoalesce no coalescence flag
+ * @returns pointer to the cq created, NULL on failure
+ */
+struct oce_cq *
+oce_cq_create(struct oce_softc *sc, struct oce_eq *eq, uint32_t q_len,
+ uint32_t item_size, uint32_t is_eventable, uint32_t nodelay,
+ uint32_t ncoalesce)
+{
+ struct oce_cq *cq = NULL;
+ int rc = 0;
+
+ cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!cq)
+ return NULL;
+
+ cq->ring = oce_create_ring(sc, q_len, item_size, 4);
+ if (!cq->ring)
+ goto error;
+
+ cq->parent = sc;
+ cq->eq = eq;
+ cq->cq_cfg.q_len = q_len;
+ cq->cq_cfg.item_size = item_size;
+ cq->cq_cfg.nodelay = (uint8_t) nodelay;
+
+ rc = oce_mbox_create_cq(cq, ncoalesce, is_eventable);
+ if (rc)
+ goto error;
+
+ sc->cq[sc->ncqs++] = cq;
+
+ return cq;
+error:
+ printf("%s: failed to create cq\n", sc->dev.dv_xname);
+ oce_cq_del(sc, cq);
+ return NULL;
+}
+
+/**
+ * @brief Deletes the completion queue
+ * @param sc software handle to the device
+ * @param cq pointer to a completion queue
+ */
+void
+oce_cq_del(struct oce_softc *sc, struct oce_cq *cq)
+{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_cq *fwcmd;
+
+ if (cq->ring != NULL) {
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ /* now fill the command */
+ fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
+ fwcmd->params.req.id = cq->cq_id;
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
+ /*NOW destroy the ring */
+ oce_destroy_ring(sc, cq->ring);
+ cq->ring = NULL;
+ }
+
+ free(cq, M_DEVBUF);
+ cq = NULL;
+}
+
+/**
+ * @brief Function to arm an EQ so that it can generate events
+ * @param sc software handle to the device
+ * @param qid id of the EQ returned by the fw at the time of creation
+ * @param npopped number of EQEs to arm
+ * @param rearm rearm bit enable/disable
+ * @param clearint bit to clear the interrupt condition because of which
+ * EQEs are generated
+ */
+void
+oce_arm_eq(struct oce_softc *sc, int16_t qid, int npopped, uint32_t rearm,
+ uint32_t clearint)
+{
+ eq_db_t eq_db = { 0 };
+
+ eq_db.bits.rearm = rearm;
+ eq_db.bits.event = 1;
+ eq_db.bits.num_popped = npopped;
+ eq_db.bits.clrint = clearint;
+ eq_db.bits.qid = qid;
+ OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
+}
+
+/**
+ * @brief Function to arm a CQ with CQEs
+ * @param sc software handle to the device
+ * @param qid id of the CQ returned by the fw at the time of creation
+ * @param npopped number of CQEs to arm
+ * @param rearm rearm bit enable/disable
+ */
+void
+oce_arm_cq(struct oce_softc *sc, int16_t qid, int npopped, uint32_t rearm)
+{
+ cq_db_t cq_db = { 0 };
+
+ cq_db.bits.rearm = rearm;
+ cq_db.bits.num_popped = npopped;
+ cq_db.bits.event = 0;
+ cq_db.bits.qid = qid;
+ OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
+}
+
+/*
+ * @brief function to cleanup the eqs used during stop
+ * @param eq pointer to event queue structure
+ * @returns the number of EQs processed
+ */
+void
+oce_drain_eq(struct oce_eq *eq)
+{
+ struct oce_eqe *eqe;
+ uint16_t num_eqe = 0;
+ struct oce_softc *sc = eq->parent;
+
+ do {
+ eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
+ if (eqe->evnt == 0)
+ break;
+ eqe->evnt = 0;
+ oce_dma_sync(&eq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ num_eqe++;
+ RING_GET(eq->ring, 1);
+
+ } while (TRUE);
+
+ oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
+}
+
+void
+oce_drain_wq_cq(struct oce_wq *wq)
+{
+ struct oce_softc *sc = wq->parent;
+ struct oce_cq *cq = wq->cq;
+ struct oce_nic_tx_cqe *cqe;
+ int num_cqes = 0;
+
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+
+ do {
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
+ if (cqe->u0.dw[3] == 0)
+ break;
+ cqe->u0.dw[3] = 0;
+ oce_dma_sync(&cq->ring->dma, BUS_DMASYNC_POSTWRITE);
+ RING_GET(cq->ring, 1);
+ num_cqes++;
+
+ } while (TRUE);
+
+ oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
+}
+
+/*
+ * @brief function to drain a MCQ and process its CQEs
+ * @param dev software handle to the device
+ * @param cq pointer to the cq to drain
+ * @returns the number of CQEs processed
+ */
+void
+oce_drain_mq_cq(void *arg)
+{
+ /* TODO: additional code. */
+}
+
+/**
+ * @brief function to process a Recieve queue
+ * @param arg pointer to the RQ to charge
+ * @return number of cqes processed
+ */
+void
+oce_drain_rq_cq(struct oce_rq *rq)
+{
+ struct oce_nic_rx_cqe *cqe;
+ uint16_t num_cqe = 0;
+ struct oce_cq *cq;
+ struct oce_softc *sc;
+
+ sc = rq->parent;
+ cq = rq->cq;
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ /* dequeue till you reach an invalid cqe */
+ while (RQ_CQE_VALID(cqe)) {
+ RQ_CQE_INVALIDATE(cqe);
+ RING_GET(cq->ring, 1);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
+ struct oce_nic_rx_cqe);
+ num_cqe++;
+ }
+
+ oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
+}
+
+
+void
+oce_free_posted_rxbuf(struct oce_rq *rq)
+{
+ struct oce_packet_desc *pd;
+
+ while (rq->pending) {
+ pd = &rq->pckts[rq->packets_out];
+ oce_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ if (pd->mbuf != NULL) {
+ m_freem(pd->mbuf);
+ pd->mbuf = NULL;
+ }
+
+ if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
+ rq->packets_out = 0;
+ else
+ rq->packets_out++;
+
+ rq->pending--;
+ }
+}
+
+void
+oce_stop_rx(struct oce_softc *sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_rq *fwcmd;
+ struct oce_rq *rq;
+ int i;
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq->qstate == QCREATED) {
+ /* Delete rxq in firmware */
+
+ bzero(&mbx, sizeof(mbx));
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+
+ (void)oce_destroy_q(sc, &mbx,
+ sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
+
+ rq->qstate = QDELETED;
+
+ DELAY(1);
+
+ /* Free posted RX buffers that are not used */
+ oce_free_posted_rxbuf(rq);
+ }
+ }
+}
+
+int
+oce_start_rx(struct oce_softc *sc)
+{
+ struct oce_rq *rq;
+ int rc = 0, i;
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq->qstate == QCREATED)
+ continue;
+ rc = oce_mbox_create_rq(rq);
+ if (rc)
+ return rc;
+ /* reset queue pointers */
+ rq->qstate = QCREATED;
+ rq->pending = 0;
+ rq->ring->cidx = 0;
+ rq->ring->pidx = 0;
+ rq->packets_in = 0;
+ rq->packets_out = 0;
+ }
+
+ DELAY(10);
+
+ return rc;
+}
+
+/**
+ * @brief Allocate DMA memory
+ * @param sc software handle to the device
+ * @param size bus size
+ * @param dma dma memory area
+ * @param flags creation flags
+ * @returns 0 on success, error otherwize
+ */
+int
+oce_dma_alloc(struct oce_softc *sc, bus_size_t size, struct oce_dma_mem *dma,
+ int flags)
+{
+ int rc;
+
+ bzero(dma, sizeof(struct oce_dma_mem));
+
+ dma->tag = sc->pa.pa_dmat;
+ rc = bus_dmamap_create(dma->tag, size, 1, size, 0, BUS_DMA_NOWAIT,
+ &dma->map);
+ if (rc != 0) {
+ printf("%s: failed to allocate DMA handle", sc->dev.dv_xname);
+ goto fail_0;
+ }
+
+ rc = bus_dmamem_alloc(dma->tag, size, PAGE_SIZE, 0, &dma->segs, 1,
+ &dma->nsegs, BUS_DMA_NOWAIT);
+ if (rc != 0) {
+ printf("%s: failed to allocate DMA memory", sc->dev.dv_xname);
+ goto fail_1;
+ }
+
+ rc = bus_dmamem_map(dma->tag, &dma->segs, dma->nsegs, size,
+ &dma->vaddr, BUS_DMA_NOWAIT);
+ if (rc != 0) {
+ printf("%s: failed to map DMA memory", sc->dev.dv_xname);
+ goto fail_2;
+ }
+
+ rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, NULL,
+ flags | BUS_DMA_NOWAIT);
+ if (rc != 0) {
+ printf("%s: failed to load DMA memory", sc->dev.dv_xname);
+ goto fail_3;
+ }
+
+ dma->paddr = dma->map->dm_segs[0].ds_addr;
+ dma->size = size;
+
+ return 0;
+
+fail_3:
+ bus_dmamem_unmap(dma->tag, dma->vaddr, size);
+fail_2:
+ bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
+fail_1:
+ bus_dmamap_destroy(dma->tag, dma->map);
+fail_0:
+ return rc;
+}
+
+/**
+ * @brief Free DMA memory
+ * @param sc software handle to the device
+ * @param dma dma area to free
+ */
+void
+oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma)
+{
+ if (dma->tag == NULL)
+ return;
+
+ if (dma->map != NULL) {
+ oce_dma_sync(dma, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+
+ if (dma->vaddr != 0) {
+ bus_dmamem_free(dma->tag, &dma->segs, dma->nsegs);
+ dma->vaddr = 0;
+ }
+
+ bus_dmamap_destroy(dma->tag, dma->map);
+ dma->map = NULL;
+ dma->tag = NULL;
+ }
+}
+
+/**
+ * @brief Destroy a ring buffer
+ * @param sc software handle to the device
+ * @param ring ring buffer
+ */
+void
+oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring)
+{
+ oce_dma_free(sc, &ring->dma);
+ free(ring, M_DEVBUF);
+}
+
+struct oce_ring *
+oce_create_ring(struct oce_softc *sc, int q_len, int item_size,
+ int max_segs)
+{
+ bus_size_t size = q_len * item_size;
+ struct oce_ring *ring;
+ int rc;
+
+ if (size > max_segs * PAGE_SIZE)
+ return NULL;
+
+ ring = malloc(sizeof(struct oce_ring), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (ring == NULL)
+ return NULL;
+
+ ring->item_size = item_size;
+ ring->num_items = q_len;
+
+ ring->dma.tag = sc->pa.pa_dmat;
+ rc = bus_dmamap_create(ring->dma.tag, size, max_segs, PAGE_SIZE, 0,
+ BUS_DMA_NOWAIT, &ring->dma.map);
+ if (rc != 0) {
+ printf("%s: failed to allocate DMA handle", sc->dev.dv_xname);
+ goto fail_0;
+ }
+
+ rc = bus_dmamem_alloc(ring->dma.tag, size, 0, 0, &ring->dma.segs,
+ max_segs, &ring->dma.nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
+ if (rc != 0) {
+ printf("%s: failed to allocate DMA memory", sc->dev.dv_xname);
+ goto fail_1;
+ }
+
+ rc = bus_dmamem_map(ring->dma.tag, &ring->dma.segs, ring->dma.nsegs,
+ size, &ring->dma.vaddr, BUS_DMA_NOWAIT);
+ if (rc != 0) {
+ printf("%s: failed to map DMA memory", sc->dev.dv_xname);
+ goto fail_2;
+ }
+
+ oce_dma_sync(&ring->dma, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ ring->dma.paddr = 0;
+ ring->dma.size = size;
+
+ return ring;
+
+fail_2:
+ bus_dmamem_free(ring->dma.tag, &ring->dma.segs, ring->dma.nsegs);
+fail_1:
+ bus_dmamap_destroy(ring->dma.tag, ring->dma.map);
+fail_0:
+ free(ring, M_DEVBUF);
+ ring = NULL;
+ return NULL;
+}
+
+/**
+ * @brief Load bus dma map for a ring buffer
+ * @param ring ring buffer pointer
+ * @param pa_list physical address list
+ * @returns number entries
+ */
+uint32_t
+oce_page_list(struct oce_softc *sc, struct oce_ring *ring,
+ struct phys_addr *pa_list, int max_segs)
+{
+ struct oce_dma_mem *dma = &ring->dma;
+ bus_dma_segment_t *segs;
+ int i, nsegs;
+
+ if (bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
+ ring->item_size * ring->num_items, NULL, BUS_DMA_NOWAIT)) {
+ printf("%s: oce_page_list failed to load\n", sc->dev.dv_xname);
+ return 0;
+ }
+
+ segs = dma->map->dm_segs;
+ nsegs = dma->map->dm_nsegs;
+ if (nsegs > max_segs) {
+ printf("%s: too many segments", sc->dev.dv_xname);
+ return 0;
+ }
+
+ for (i = 0; i < nsegs; i++) {
+ pa_list[i].lo = ADDR_LO(segs[i].ds_addr);
+ pa_list[i].hi = ADDR_HI(segs[i].ds_addr);
+ }
+ return nsegs;
+}
diff --git a/sys/dev/pci/oce.c b/sys/dev/pci/oce.c
new file mode 100644
index 00000000000..4e7f527010e
--- /dev/null
+++ b/sys/dev/pci/oce.c
@@ -0,0 +1,2178 @@
+/* $OpenBSD: oce.c,v 1.1 2012/08/02 17:35:52 mikeb Exp $ */
+
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include "bpfilter.h"
+#include "vlan.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/sockio.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/device.h>
+#include <sys/timeout.h>
+#include <sys/socket.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#ifdef INET
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+#endif
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#if NVLAN > 0
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+#endif
+
+#include <dev/rndvar.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/pci/ocereg.h>
+#include <dev/pci/ocevar.h>
+
+int oce_post(struct oce_softc *sc);
+int oce_fw_clean(struct oce_softc *sc);
+int oce_reset_fun(struct oce_softc *sc);
+int oce_get_fw_version(struct oce_softc *sc);
+
+int oce_get_fw_config(struct oce_softc *sc);
+int oce_if_create(struct oce_softc *sc, uint32_t cap_flags, uint32_t en_flags,
+ uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id);
+int oce_if_del(struct oce_softc *sc, uint32_t if_id);
+int oce_config_vlan(struct oce_softc *sc, uint32_t if_id,
+ struct normal_vlan *vtag_arr, uint8_t vtag_cnt, uint32_t untagged,
+ uint32_t enable_promisc);
+int oce_set_flow_control(struct oce_softc *sc, uint32_t flow_control);
+int oce_rss_itbl_init(struct oce_softc *sc, struct mbx_config_nic_rss *fwcmd);
+int oce_update_multicast(struct oce_softc *sc, struct oce_dma_mem *pdma_mem);
+
+int oce_set_common_iface_rx_filter(struct oce_softc *sc,
+ struct oce_dma_mem *sgl);
+
+int oce_mbox_check_native_mode(struct oce_softc *sc);
+
+int oce_mbox_get_nic_stats_v0(struct oce_softc *sc,
+ struct oce_dma_mem *pstats_dma_mem);
+int oce_mbox_get_nic_stats(struct oce_softc *sc,
+ struct oce_dma_mem *pstats_dma_mem);
+int oce_mbox_get_pport_stats(struct oce_softc *sc,
+ struct oce_dma_mem *pstats_dma_mem, uint32_t reset_stats);
+void copy_stats_to_sc_xe201(struct oce_softc *sc);
+void copy_stats_to_sc_be3(struct oce_softc *sc);
+void copy_stats_to_sc_be2(struct oce_softc *sc);
+
+/**
+ * @brief Function to post status
+ * @param sc software handle to the device
+ */
+int
+oce_post(struct oce_softc *sc)
+{
+ mpu_ep_semaphore_t post_status;
+ int tmo = 60000;
+
+ /* read semaphore CSR */
+ post_status.dw0 = OCE_READ_REG32(sc, csr, MPU_EP_SEMAPHORE(sc));
+
+ /* if host is ready then wait for fw ready else send POST */
+ if (post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) {
+ post_status.bits.stage = POST_STAGE_CHIP_RESET;
+ OCE_WRITE_REG32(sc, csr, MPU_EP_SEMAPHORE(sc), post_status.dw0);
+ }
+
+ /* wait for FW ready */
+ for (;;) {
+ if (--tmo == 0)
+ break;
+
+ DELAY(1000);
+
+ post_status.dw0 = OCE_READ_REG32(sc, csr, MPU_EP_SEMAPHORE(sc));
+ if (post_status.bits.error) {
+ printf("%s: POST failed: %x\n", sc->dev.dv_xname,
+ post_status.dw0);
+ return ENXIO;
+ }
+ if (post_status.bits.stage == POST_STAGE_ARMFW_READY)
+ return 0;
+ }
+
+ printf("%s: POST timed out: %x\n", sc->dev.dv_xname, post_status.dw0);
+
+ return ENXIO;
+}
+
+/**
+ * @brief Function for hardware initialization
+ * @param sc software handle to the device
+ */
+int
+oce_hw_init(struct oce_softc *sc)
+{
+ int rc = 0;
+
+ rc = oce_post(sc);
+ if (rc)
+ return rc;
+
+ /* create the bootstrap mailbox */
+ rc = oce_dma_alloc(sc, sizeof(struct oce_bmbx), &sc->bsmbx, 0);
+ if (rc) {
+ printf("%s: Mailbox alloc failed\n", sc->dev.dv_xname);
+ return rc;
+ }
+
+ rc = oce_reset_fun(sc);
+ if (rc)
+ goto error;
+
+ rc = oce_mbox_init(sc);
+ if (rc)
+ goto error;
+
+ rc = oce_get_fw_version(sc);
+ if (rc)
+ goto error;
+
+ rc = oce_get_fw_config(sc);
+ if (rc)
+ goto error;
+
+ sc->macaddr.size_of_struct = 6;
+ rc = oce_read_mac_addr(sc, 0, 1, MAC_ADDRESS_TYPE_NETWORK,
+ &sc->macaddr);
+ if (rc)
+ goto error;
+
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE3)) {
+ rc = oce_mbox_check_native_mode(sc);
+ if (rc)
+ goto error;
+ } else
+ sc->be3_native = 0;
+
+ return rc;
+
+error:
+ oce_dma_free(sc, &sc->bsmbx);
+ printf("%s: Hardware initialisation failed\n", sc->dev.dv_xname);
+ return rc;
+}
+
+/**
+ * @brief Allocate PCI resources.
+ *
+ * @param sc software handle to the device
+ * @returns 0 if successful, or error
+ */
+int
+oce_hw_pci_alloc(struct oce_softc *sc)
+{
+ struct pci_attach_args *pa = &sc->pa;
+ pci_sli_intf_t intf;
+ pcireg_t memtype, reg;
+
+ /* setup the device config region */
+ if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
+ reg = OCE_DEV_BE2_CFG_BAR;
+ else
+ reg = OCE_DEV_CFG_BAR;
+
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
+ if (pci_mapreg_map(pa, reg, memtype, 0, &sc->cfg_btag,
+ &sc->cfg_bhandle, NULL, &sc->cfg_size,
+ IS_BE(sc) ? 0 : 32768)) {
+ printf(": can't find cfg mem space\n");
+ return ENXIO;
+ }
+
+ /* Read the SLI_INTF register and determine whether we
+ * can use this port and its features
+ */
+ intf.dw0 = pci_conf_read(pa->pa_pc, pa->pa_tag, OCE_INTF_REG_OFFSET);
+
+ if (intf.bits.sli_valid != OCE_INTF_VALID_SIG) {
+ printf(": invalid signature\n");
+ goto fail_1;
+ }
+
+ if (intf.bits.sli_rev != OCE_INTF_SLI_REV4) {
+ printf(": adapter doesnt support SLI revision %d\n",
+ intf.bits.sli_rev);
+ goto fail_1;
+ }
+
+ if (intf.bits.sli_if_type == OCE_INTF_IF_TYPE_1)
+ sc->flags |= OCE_FLAGS_MBOX_ENDIAN_RQD;
+
+ if (intf.bits.sli_hint1 == OCE_INTF_FUNC_RESET_REQD)
+ sc->flags |= OCE_FLAGS_FUNCRESET_RQD;
+
+ if (intf.bits.sli_func_type == OCE_INTF_VIRT_FUNC)
+ sc->flags |= OCE_FLAGS_VIRTUAL_PORT;
+
+ /* Lancer has one BAR (CFG) but BE3 has three (CFG, CSR, DB) */
+ if (IS_BE(sc)) {
+ /* set up CSR region */
+ reg = OCE_PCI_CSR_BAR;
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
+ if (pci_mapreg_map(pa, reg, memtype, 0, &sc->csr_btag,
+ &sc->csr_bhandle, NULL, &sc->csr_size, 0)) {
+ printf(": can't find csr mem space\n");
+ goto fail_1;
+ }
+
+ /* set up DB doorbell region */
+ reg = OCE_PCI_DB_BAR;
+ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, reg);
+ if (pci_mapreg_map(pa, reg, memtype, 0, &sc->db_btag,
+ &sc->db_bhandle, NULL, &sc->db_size, 0)) {
+ printf(": can't find csr mem space\n");
+ goto fail_2;
+ }
+ }
+
+ return 0;
+
+fail_2:
+ bus_space_unmap(sc->csr_btag, sc->csr_bhandle, sc->csr_size);
+fail_1:
+ bus_space_unmap(sc->cfg_btag, sc->cfg_bhandle, sc->cfg_size);
+ return ENXIO;
+}
+
+/**
+ * @brief Function for creating nw interface.
+ * @param sc software handle to the device
+ * @returns 0 on success, error otherwise
+ */
+int
+oce_create_nw_interface(struct oce_softc *sc)
+{
+ int rc;
+ uint32_t capab_flags;
+ uint32_t capab_en_flags;
+
+ /* interface capabilities to give device when creating interface */
+ capab_flags = OCE_CAPAB_FLAGS;
+
+ /* capabilities to enable by default (others set dynamically) */
+ capab_en_flags = OCE_CAPAB_ENABLE;
+
+ if (IS_XE201(sc)) {
+ /* LANCER A0 workaround */
+ capab_en_flags &= ~MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR;
+ capab_flags &= ~MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR;
+ }
+
+ /* enable capabilities controlled via driver startup parameters */
+ if (sc->rss_enable)
+ capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS;
+ else {
+ capab_en_flags &= ~MBX_RX_IFACE_FLAGS_RSS;
+ capab_flags &= ~MBX_RX_IFACE_FLAGS_RSS;
+ }
+
+ rc = oce_if_create(sc, capab_flags, capab_en_flags, 0,
+ &sc->macaddr.mac_addr[0], &sc->if_id);
+ if (rc)
+ return rc;
+
+ sc->nifs++;
+
+ sc->if_cap_flags = capab_en_flags;
+
+ /* Enable VLAN Promisc on HW */
+ rc = oce_config_vlan(sc, (uint8_t)sc->if_id, NULL, 0, 1, 1);
+ if (rc)
+ goto error;
+
+ /* set default flow control */
+ rc = oce_set_flow_control(sc, sc->flow_control);
+ if (rc)
+ goto error;
+
+ return rc;
+
+error:
+ oce_delete_nw_interface(sc);
+ return rc;
+}
+
+/**
+ * @brief Function to delete a nw interface.
+ * @param sc software handle to the device
+ */
+void
+oce_delete_nw_interface(struct oce_softc *sc)
+{
+ /* currently only single interface is implmeneted */
+ if (sc->nifs > 0) {
+ oce_if_del(sc, sc->if_id);
+ sc->nifs--;
+ }
+}
+
+/**
+ * @brief Function for hardware enable interupts.
+ * @param sc software handle to the device
+ */
+void
+oce_hw_intr_enable(struct oce_softc *sc)
+{
+ uint32_t reg;
+
+ reg = OCE_READ_REG32(sc, cfg, PCICFG_INTR_CTRL);
+ reg |= HOSTINTR_MASK;
+ OCE_WRITE_REG32(sc, cfg, PCICFG_INTR_CTRL, reg);
+}
+
+/**
+ * @brief Function for hardware disable interupts
+ * @param sc software handle to the device
+ */
+void
+oce_hw_intr_disable(struct oce_softc *sc)
+{
+ uint32_t reg;
+
+ reg = OCE_READ_REG32(sc, cfg, PCICFG_INTR_CTRL);
+ reg &= ~HOSTINTR_MASK;
+ OCE_WRITE_REG32(sc, cfg, PCICFG_INTR_CTRL, reg);
+}
+
+/**
+ * @brief Function for hardware update multicast filter
+ * @param sc software handle to the device
+ */
+int
+oce_hw_update_multicast(struct oce_softc *sc)
+{
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ struct mbx_set_common_iface_multicast *req = NULL;
+ struct oce_dma_mem dma;
+ int rc = 0;
+
+ /* Allocate DMA mem*/
+ if (oce_dma_alloc(sc, sizeof(struct mbx_set_common_iface_multicast),
+ &dma, 0))
+ return ENOMEM;
+
+ req = OCE_DMAPTR(&dma, struct mbx_set_common_iface_multicast);
+ bzero(req, sizeof(struct mbx_set_common_iface_multicast));
+
+ ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
+ while (enm != NULL) {
+ if (req->params.req.num_mac == OCE_MAX_MC_FILTER_SIZE) {
+ /*More multicast addresses than our hardware table
+ So Enable multicast promiscus in our hardware to
+ accept all multicat packets
+ */
+ req->params.req.promiscuous = 1;
+ break;
+ }
+ bcopy(enm->enm_addrlo,
+ &req->params.req.mac[req->params.req.num_mac],
+ ETH_ADDR_LEN);
+ req->params.req.num_mac = req->params.req.num_mac + 1;
+ ETHER_NEXT_MULTI(step, enm);
+ }
+
+ req->params.req.if_id = sc->if_id;
+ rc = oce_update_multicast(sc, &dma);
+ oce_dma_free(sc, &dma);
+ return rc;
+}
+
+/**
+ * @brief Reset (firmware) common function
+ * @param sc software handle to the device
+ * @returns 0 on success, ETIMEDOUT on failure
+ */
+int
+oce_reset_fun(struct oce_softc *sc)
+{
+ struct oce_mbx *mbx;
+ struct oce_bmbx *mb;
+ struct ioctl_common_function_reset *fwcmd;
+ int rc = 0;
+
+ if (sc->flags & OCE_FLAGS_FUNCRESET_RQD) {
+ mb = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ mbx = &mb->mbx;
+ bzero(mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct ioctl_common_function_reset *)&mbx->payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_FUNCTION_RESET,
+ 10, /* MBX_TIMEOUT_SEC */
+ sizeof(struct
+ ioctl_common_function_reset),
+ OCE_MBX_VER_V0);
+
+ mbx->u0.s.embedded = 1;
+ mbx->payload_length =
+ sizeof(struct ioctl_common_function_reset);
+
+ rc = oce_mbox_dispatch(sc, 2);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief This funtions tells firmware we are
+ * done with commands.
+ * @param sc software handle to the device
+ * @returns 0 on success, ETIMEDOUT on failure
+ */
+int
+oce_fw_clean(struct oce_softc *sc)
+{
+ struct oce_bmbx *mbx;
+ uint8_t *ptr;
+ int ret = 0;
+
+ mbx = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ ptr = (uint8_t *)&mbx->mbx;
+
+ /* Endian Signature */
+ *ptr++ = 0xff;
+ *ptr++ = 0xaa;
+ *ptr++ = 0xbb;
+ *ptr++ = 0xff;
+ *ptr++ = 0xff;
+ *ptr++ = 0xcc;
+ *ptr++ = 0xdd;
+ *ptr = 0xff;
+
+ ret = oce_mbox_dispatch(sc, 2);
+
+ return ret;
+}
+
+/**
+ * @brief Mailbox wait
+ * @param sc software handle to the device
+ * @param tmo_sec timeout in seconds
+ */
+int
+oce_mbox_wait(struct oce_softc *sc, uint32_t tmo_sec)
+{
+ tmo_sec *= 10000;
+ pd_mpu_mbox_db_t mbox_db;
+
+ for (;;) {
+ if (tmo_sec != 0) {
+ if (--tmo_sec == 0)
+ break;
+ }
+
+ mbox_db.dw0 = OCE_READ_REG32(sc, db, PD_MPU_MBOX_DB);
+
+ if (mbox_db.bits.ready)
+ return 0;
+
+ DELAY(100);
+ }
+
+ printf("%s: Mailbox timed out\n", sc->dev.dv_xname);
+
+ return ETIMEDOUT;
+}
+
+/**
+ * @brief Mailbox dispatch
+ * @param sc software handle to the device
+ * @param tmo_sec timeout in seconds
+ */
+int
+oce_mbox_dispatch(struct oce_softc *sc, uint32_t tmo_sec)
+{
+ pd_mpu_mbox_db_t mbox_db;
+ uint32_t pa;
+ int rc;
+
+ oce_dma_sync(&sc->bsmbx, BUS_DMASYNC_PREWRITE);
+ pa = (uint32_t) ((uint64_t) sc->bsmbx.paddr >> 34);
+ bzero(&mbox_db, sizeof(pd_mpu_mbox_db_t));
+ mbox_db.bits.ready = 0;
+ mbox_db.bits.hi = 1;
+ mbox_db.bits.address = pa;
+
+ rc = oce_mbox_wait(sc, tmo_sec);
+ if (rc == 0) {
+ OCE_WRITE_REG32(sc, db, PD_MPU_MBOX_DB, mbox_db.dw0);
+
+ pa = (uint32_t) ((uint64_t) sc->bsmbx.paddr >> 4) & 0x3fffffff;
+ mbox_db.bits.ready = 0;
+ mbox_db.bits.hi = 0;
+ mbox_db.bits.address = pa;
+
+ rc = oce_mbox_wait(sc, tmo_sec);
+
+ if (rc == 0) {
+ OCE_WRITE_REG32(sc, db, PD_MPU_MBOX_DB, mbox_db.dw0);
+
+ rc = oce_mbox_wait(sc, tmo_sec);
+
+ oce_dma_sync(&sc->bsmbx, BUS_DMASYNC_POSTWRITE);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Mailbox common request header initialization
+ * @param hdr mailbox header
+ * @param dom domain
+ * @param port port
+ * @param subsys subsystem
+ * @param opcode opcode
+ * @param timeout timeout
+ * @param payload_len payload length
+ */
+void
+mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port,
+ uint8_t subsys, uint8_t opcode, uint32_t timeout, uint32_t payload_len,
+ uint8_t version)
+{
+ hdr->u0.req.opcode = opcode;
+ hdr->u0.req.subsystem = subsys;
+ hdr->u0.req.port_number = port;
+ hdr->u0.req.domain = dom;
+
+ hdr->u0.req.timeout = timeout;
+ hdr->u0.req.request_length = payload_len - sizeof(struct mbx_hdr);
+ hdr->u0.req.version = version;
+}
+
+/**
+ * @brief Function to initialize the hw with host endian information
+ * @param sc software handle to the device
+ * @returns 0 on success, ETIMEDOUT on failure
+ */
+int
+oce_mbox_init(struct oce_softc *sc)
+{
+ struct oce_bmbx *mbx;
+ uint8_t *ptr;
+ int ret = 0;
+
+ if (sc->flags & OCE_FLAGS_MBOX_ENDIAN_RQD) {
+ mbx = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ ptr = (uint8_t *) &mbx->mbx;
+
+ /* Endian Signature */
+ *ptr++ = 0xff;
+ *ptr++ = 0x12;
+ *ptr++ = 0x34;
+ *ptr++ = 0xff;
+ *ptr++ = 0xff;
+ *ptr++ = 0x56;
+ *ptr++ = 0x78;
+ *ptr = 0xff;
+
+ ret = oce_mbox_dispatch(sc, 0);
+ }
+
+ return ret;
+}
+
+/**
+ * @brief Function to get the firmware version
+ * @param sc software handle to the device
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_get_fw_version(struct oce_softc *sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_common_fw_version *fwcmd;
+ int ret = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_get_common_fw_version *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FW_VERSION,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_common_fw_version),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_get_common_fw_version);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ ret = oce_mbox_post(sc, &mbx, NULL);
+ if (ret)
+ return ret;
+
+ bcopy(fwcmd->params.rsp.fw_ver_str, sc->fw_version, 32);
+
+ return 0;
+}
+
+/**
+ * @brief Firmware will send gracious notifications during
+ * attach only after sending first mcc commnad. We
+ * use MCC queue only for getting async and mailbox
+ * for sending cmds. So to get gracious notifications
+ * atleast send one dummy command on mcc.
+ */
+int
+oce_first_mcc_cmd(struct oce_softc *sc)
+{
+ struct oce_mbx *mbx;
+ struct oce_mq *mq = sc->mq;
+ struct mbx_get_common_fw_version *fwcmd;
+ uint32_t reg_value;
+
+ mbx = RING_GET_PRODUCER_ITEM_VA(mq->ring, struct oce_mbx);
+ bzero(mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_get_common_fw_version *)&mbx->payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_FW_VERSION,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_common_fw_version),
+ OCE_MBX_VER_V0);
+ mbx->u0.s.embedded = 1;
+ mbx->payload_length = sizeof(struct mbx_get_common_fw_version);
+ oce_dma_sync(&mq->ring->dma, BUS_DMASYNC_PREREAD |
+ BUS_DMASYNC_PREWRITE);
+ RING_PUT(mq->ring, 1);
+ reg_value = (1 << 16) | mq->mq_id;
+ OCE_WRITE_REG32(sc, db, PD_MQ_DB, reg_value);
+
+ return 0;
+}
+
+/**
+ * @brief Function to post a MBX to the mbox
+ * @param sc software handle to the device
+ * @param mbx pointer to the MBX to send
+ * @param mbxctx pointer to the mbx context structure
+ * @returns 0 on success, error on failure
+ */
+int
+oce_mbox_post(struct oce_softc *sc, struct oce_mbx *mbx, struct oce_mbx_ctx *mbxctx)
+{
+ struct oce_mbx *mb_mbx = NULL;
+ struct oce_mq_cqe *mb_cqe = NULL;
+ struct oce_bmbx *mb = NULL;
+ int rc = 0;
+ uint32_t tmo = 0;
+ uint32_t cstatus = 0;
+ uint32_t xstatus = 0;
+
+ mb = OCE_DMAPTR(&sc->bsmbx, struct oce_bmbx);
+ mb_mbx = &mb->mbx;
+
+ /* get the tmo */
+ tmo = mbx->tag[0];
+ mbx->tag[0] = 0;
+
+ /* copy mbx into mbox */
+ bcopy(mbx, mb_mbx, sizeof(struct oce_mbx));
+
+ /* now dispatch */
+ rc = oce_mbox_dispatch(sc, tmo);
+ if (rc == 0) {
+ /*
+ * the command completed successfully. Now get the
+ * completion queue entry
+ */
+ mb_cqe = &mb->cqe;
+ DW_SWAP(u32ptr(&mb_cqe->u0.dw[0]), sizeof(struct oce_mq_cqe));
+
+ /* copy mbox mbx back */
+ bcopy(mb_mbx, mbx, sizeof(struct oce_mbx));
+
+ /* pick up the mailbox status */
+ cstatus = mb_cqe->u0.s.completion_status;
+ xstatus = mb_cqe->u0.s.extended_status;
+
+ /*
+ * store the mbx context in the cqe tag section so that
+ * the upper layer handling the cqe can associate the mbx
+ * with the response
+ */
+ if (cstatus == 0 && mbxctx) {
+ /* save context */
+ mbxctx->mbx = mb_mbx;
+ bcopy(&mbxctx, mb_cqe->u0.s.mq_tag,
+ sizeof(struct oce_mbx_ctx *));
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Function to read the mac address associated with an interface
+ * @param sc software handle to the device
+ * @param if_id interface id to read the address from
+ * @param perm set to 1 if reading the factory mac address.
+ * In this case if_id is ignored
+ * @param type type of the mac address, whether network or storage
+ * @param[out] mac [OUTPUT] pointer to a buffer containing the
+ * mac address when the command succeeds.
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_read_mac_addr(struct oce_softc *sc, uint32_t if_id, uint8_t perm,
+ uint8_t type, struct mac_address_format *mac)
+{
+ struct oce_mbx mbx;
+ struct mbx_query_common_iface_mac *fwcmd;
+ int ret = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_query_common_iface_mac *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_IFACE_MAC,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_query_common_iface_mac),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.permanent = perm;
+ if (!perm)
+ fwcmd->params.req.if_id = (uint16_t) if_id;
+ else
+ fwcmd->params.req.if_id = 0;
+
+ fwcmd->params.req.type = type;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_query_common_iface_mac);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ ret = oce_mbox_post(sc, &mbx, NULL);
+ if (ret)
+ return ret;
+
+ /* copy the mac addres in the output parameter */
+ mac->size_of_struct = fwcmd->params.rsp.mac.size_of_struct;
+ bcopy(&fwcmd->params.rsp.mac.mac_addr[0], &mac->mac_addr[0],
+ mac->size_of_struct);
+
+ return 0;
+}
+
+/**
+ * @brief Function to query the fw attributes from the hw
+ * @param sc software handle to the device
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_get_fw_config(struct oce_softc *sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_query_fw_config *fwcmd;
+ int ret = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_common_query_fw_config *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_query_fw_config),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_query_fw_config);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ ret = oce_mbox_post(sc, &mbx, NULL);
+ if (ret)
+ return ret;
+
+ DW_SWAP(u32ptr(fwcmd), sizeof(struct mbx_common_query_fw_config));
+
+ sc->config_number = fwcmd->params.rsp.config_number;
+ sc->asic_revision = fwcmd->params.rsp.asic_revision;
+ sc->port_id = fwcmd->params.rsp.port_id;
+ sc->function_mode = fwcmd->params.rsp.function_mode;
+ sc->function_caps = fwcmd->params.rsp.function_caps;
+
+ if (fwcmd->params.rsp.ulp[0].ulp_mode & ULP_NIC_MODE) {
+ sc->max_tx_rings = fwcmd->params.rsp.ulp[0].nic_wq_tot;
+ sc->max_rx_rings = fwcmd->params.rsp.ulp[0].lro_rqid_tot;
+ } else {
+ sc->max_tx_rings = fwcmd->params.rsp.ulp[1].nic_wq_tot;
+ sc->max_rx_rings = fwcmd->params.rsp.ulp[1].lro_rqid_tot;
+ }
+
+ return 0;
+
+}
+
+/**
+ *
+ * @brief function to create a device interface
+ * @param sc software handle to the device
+ * @param cap_flags capability flags
+ * @param en_flags enable capability flags
+ * @param vlan_tag optional vlan tag to associate with the if
+ * @param mac_addr pointer to a buffer containing the mac address
+ * @param[out] if_id [OUTPUT] pointer to an integer to hold the ID of the
+ interface created
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_if_create(struct oce_softc *sc, uint32_t cap_flags, uint32_t en_flags,
+ uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_iface *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_iface *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_IFACE,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_iface),
+ OCE_MBX_VER_V0);
+ DW_SWAP(u32ptr(&fwcmd->hdr), sizeof(struct mbx_hdr));
+
+ fwcmd->params.req.version = 0;
+ fwcmd->params.req.cap_flags = htole32(cap_flags);
+ fwcmd->params.req.enable_flags = htole32(en_flags);
+ if (mac_addr != NULL) {
+ bcopy(mac_addr, &fwcmd->params.req.mac_addr[0], 6);
+ fwcmd->params.req.vlan_tag.u0.normal.vtag = htole16(vlan_tag);
+ fwcmd->params.req.mac_invalid = 0;
+ } else {
+ fwcmd->params.req.mac_invalid = 1;
+ printf(": invalid mac");
+ }
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_iface);
+ DW_SWAP(u32ptr(&mbx), OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ return rc;
+
+ *if_id = letoh32(fwcmd->params.rsp.if_id);
+
+ if (mac_addr != NULL)
+ sc->pmac_id = letoh32(fwcmd->params.rsp.pmac_id);
+
+ return 0;
+}
+
+/**
+ * @brief Function to delete an interface
+ * @param sc software handle to the device
+ * @param if_id ID of the interface to delete
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_if_del(struct oce_softc *sc, uint32_t if_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_destroy_common_iface *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_destroy_common_iface *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_DESTROY_IFACE,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_destroy_common_iface),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = if_id;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_destroy_common_iface);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+/**
+ * @brief Function to send the mbx command to configure vlan
+ * @param sc software handle to the device
+ * @param if_id interface identifier index
+ * @param vtag_arr array of vlan tags
+ * @param vtag_cnt number of elements in array
+ * @param untagged boolean TRUE/FLASE
+ * @param enable_promisc flag to enable/disable VLAN promiscuous mode
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_config_vlan(struct oce_softc *sc, uint32_t if_id,
+ struct normal_vlan *vtag_arr, uint8_t vtag_cnt, uint32_t untagged,
+ uint32_t enable_promisc)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_config_vlan *fwcmd;
+ int rc;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = (struct mbx_common_config_vlan *)&mbx.payload;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CONFIG_IFACE_VLAN,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_config_vlan),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = (uint8_t) if_id;
+ fwcmd->params.req.promisc = (uint8_t) enable_promisc;
+ fwcmd->params.req.untagged = (uint8_t) untagged;
+ fwcmd->params.req.num_vlans = vtag_cnt;
+
+ if (!enable_promisc) {
+ bcopy(vtag_arr, fwcmd->params.req.tags.normal_vlans,
+ vtag_cnt * sizeof(struct normal_vlan));
+ }
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_config_vlan);
+ DW_SWAP(u32ptr(&mbx), (OCE_BMBX_RHDR_SZ + mbx.payload_length));
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+}
+
+/**
+ * @brief Function to set flow control capability in the hardware
+ * @param sc software handle to the device
+ * @param flow_control flow control flags to set
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_set_flow_control(struct oce_softc *sc, uint32_t flow_control)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_get_set_flow_control *fwcmd =
+ (struct mbx_common_get_set_flow_control *)&mbx.payload;
+ int rc;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FLOW_CONTROL,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_get_set_flow_control),
+ OCE_MBX_VER_V0);
+
+ if (flow_control & OCE_FC_TX)
+ fwcmd->tx_flow_control = 1;
+
+ if (flow_control & OCE_FC_RX)
+ fwcmd->rx_flow_control = 1;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_get_set_flow_control);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+}
+
+/**
+ * @brief Initialize the RSS CPU indirection table
+ *
+ * The table is used to choose the queue to place the incomming packets.
+ * Incomming packets are hashed. The lowest bits in the hash result
+ * are used as the index into the CPU indirection table.
+ * Each entry in the table contains the RSS CPU-ID returned by the NIC
+ * create. Based on the CPU ID, the receive completion is routed to
+ * the corresponding RSS CQs. (Non-RSS packets are always completed
+ * on the default (0) CQ).
+ *
+ * @param sc software handle to the device
+ * @param *fwcmd pointer to the rss mbox command
+ * @returns none
+ */
+int
+oce_rss_itbl_init(struct oce_softc *sc, struct mbx_config_nic_rss *fwcmd)
+{
+ int i = 0, j = 0, rc = 0;
+ uint8_t *tbl = fwcmd->params.req.cputable;
+
+
+ for (j = 0; j < sc->nrqs; j++) {
+ if (sc->rq[j]->cfg.is_rss_queue) {
+ tbl[i] = sc->rq[j]->rss_cpuid;
+ i = i + 1;
+ }
+ }
+ if (i == 0) {
+ printf("%s: error: Invalid number of RSS RQ's\n",
+ sc->dev.dv_xname);
+ rc = ENXIO;
+
+ }
+
+ /* fill log2 value indicating the size of the CPU table */
+ if (rc == 0)
+ fwcmd->params.req.cpu_tbl_sz_log2 = htole16(OCE_LOG2(i));
+
+ return rc;
+}
+
+/**
+ * @brief Function to set flow control capability in the hardware
+ * @param sc software handle to the device
+ * @param if_id interface id to read the address from
+ * @param enable_rss 0=disable, RSS_ENABLE_xxx flags otherwise
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_config_nic_rss(struct oce_softc *sc, uint32_t if_id, uint16_t enable_rss)
+{
+ int rc;
+ struct oce_mbx mbx;
+ struct mbx_config_nic_rss *fwcmd =
+ (struct mbx_config_nic_rss *)&mbx.payload;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_NIC_CONFIG_RSS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_config_nic_rss),
+ OCE_MBX_VER_V0);
+ if (enable_rss)
+ fwcmd->params.req.enable_rss = (RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV4 |
+ RSS_ENABLE_IPV6 |
+ RSS_ENABLE_TCP_IPV6);
+ fwcmd->params.req.flush = OCE_FLUSH;
+ fwcmd->params.req.if_id = htole32(if_id);
+
+ arc4random_buf(fwcmd->params.req.hash, sizeof(fwcmd->params.req.hash));
+
+ rc = oce_rss_itbl_init(sc, fwcmd);
+ if (rc == 0) {
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_config_nic_rss);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ }
+
+ return rc;
+}
+
+/**
+ * @brief RXF function to enable/disable device promiscuous mode
+ * @param sc software handle to the device
+ * @param enable enable/disable flag
+ * @returns 0 on success, EIO on failure
+ * @note
+ * The OPCODE_NIC_CONFIG_PROMISCUOUS command deprecated for Lancer.
+ * This function uses the COMMON_SET_IFACE_RX_FILTER command instead.
+ */
+int
+oce_rxf_set_promiscuous(struct oce_softc *sc, uint32_t enable)
+{
+ struct mbx_set_common_iface_rx_filter *fwcmd;
+ int sz = sizeof(struct mbx_set_common_iface_rx_filter);
+ iface_rx_filter_ctx_t *req;
+ struct oce_dma_mem sgl;
+ int rc;
+
+ /* allocate mbx payload's dma scatter/gather memory */
+ rc = oce_dma_alloc(sc, sz, &sgl, 0);
+ if (rc)
+ return rc;
+
+ fwcmd = OCE_DMAPTR(&sgl, struct mbx_set_common_iface_rx_filter);
+
+ req = &fwcmd->params.req;
+ req->iface_flags_mask = MBX_RX_IFACE_FLAGS_PROMISCUOUS |
+ MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
+ if (enable) {
+ req->iface_flags = MBX_RX_IFACE_FLAGS_PROMISCUOUS |
+ MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
+ }
+ req->if_id = sc->if_id;
+
+ rc = oce_set_common_iface_rx_filter(sc, &sgl);
+ oce_dma_free(sc, &sgl);
+
+ return rc;
+}
+
+/**
+ * @brief Function modify and select rx filter options
+ * @param sc software handle to the device
+ * @param sgl scatter/gather request/response
+ * @returns 0 on success, error code on failure
+ */
+int
+oce_set_common_iface_rx_filter(struct oce_softc *sc, struct oce_dma_mem *sgl)
+{
+ struct oce_mbx mbx;
+ int mbx_sz = sizeof(struct mbx_set_common_iface_rx_filter);
+ struct mbx_set_common_iface_rx_filter *fwcmd;
+ int rc;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(sgl, struct mbx_set_common_iface_rx_filter);
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_IFACE_RX_FILTER,
+ MBX_TIMEOUT_SEC,
+ mbx_sz,
+ OCE_MBX_VER_V0);
+
+ oce_dma_sync(sgl, BUS_DMASYNC_PREWRITE);
+ mbx.u0.s.embedded = 0;
+ mbx.u0.s.sge_count = 1;
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(sgl->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(sgl->paddr);
+ mbx.payload.u0.u1.sgl[0].length = mbx_sz;
+ mbx.payload_length = mbx_sz;
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+/**
+ * @brief Function to query the link status from the hardware
+ * @param sc software handle to the device
+ * @param[out] link pointer to the structure returning link attributes
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_get_link_status(struct oce_softc *sc)
+{
+ struct link_status link;
+ struct oce_mbx mbx;
+ struct mbx_query_common_link_config *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_query_common_link_config *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_QUERY_LINK_CONFIG,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_query_common_link_config),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_query_common_link_config);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ if (rc) {
+ printf("%s: Could not get link speed: %d\n",
+ sc->dev.dv_xname, rc);
+ return rc;
+ } else {
+ /* interpret response */
+ bcopy(&fwcmd->params.rsp, &link, sizeof(struct link_status));
+ link.logical_link_status = letoh32(link.logical_link_status);
+ link.qos_link_speed = letoh16(link.qos_link_speed);
+ }
+
+ if (link.logical_link_status == NTWK_LOGICAL_LINK_UP)
+ sc->link_status = NTWK_LOGICAL_LINK_UP;
+ else
+ sc->link_status = NTWK_LOGICAL_LINK_DOWN;
+
+ if (link.mac_speed > 0 && link.mac_speed < 5)
+ sc->link_speed = link.mac_speed;
+ else
+ sc->link_speed = 0;
+
+ sc->duplex = link.mac_duplex;
+
+ sc->qos_link_speed = (uint32_t )link.qos_link_speed * 10;
+
+ return rc;
+}
+
+int
+oce_mbox_get_nic_stats_v0(struct oce_softc *sc, struct oce_dma_mem *pstats_dma_mem)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_nic_stats_v0 *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v0);
+ bzero(fwcmd, sizeof(struct mbx_get_nic_stats_v0));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_NIC_GET_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_nic_stats_v0),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 0;
+ mbx.u0.s.sge_count = 1;
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats_v0);
+
+ mbx.payload_length = sizeof(struct mbx_get_nic_stats_v0);
+
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+
+ if (rc) {
+ printf("%s: Could not get nic statistics: %d\n",
+ sc->dev.dv_xname, rc);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Function to get NIC statistics
+ * @param sc software handle to the device
+ * @param *stats pointer to where to store statistics
+ * @param reset_stats resets statistics of set
+ * @returns 0 on success, EIO on failure
+ * @note command depricated in Lancer
+ */
+int
+oce_mbox_get_nic_stats(struct oce_softc *sc, struct oce_dma_mem *pstats_dma_mem)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_nic_stats *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats);
+ bzero(fwcmd, sizeof(struct mbx_get_nic_stats));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_NIC_GET_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_nic_stats),
+ OCE_MBX_VER_V1);
+
+
+ mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
+ mbx.u0.s.sge_count = 1; /* using scatter gather instead */
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats);
+
+ mbx.payload_length = sizeof(struct mbx_get_nic_stats);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+ if (rc) {
+ printf("%s: Could not get nic statistics: %d\n",
+ sc->dev.dv_xname, rc);
+ }
+ return rc;
+}
+
+/**
+ * @brief Function to get pport (physical port) statistics
+ * @param sc software handle to the device
+ * @param *stats pointer to where to store statistics
+ * @param reset_stats resets statistics of set
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_mbox_get_pport_stats(struct oce_softc *sc,
+ struct oce_dma_mem *pstats_dma_mem, uint32_t reset_stats)
+{
+ struct oce_mbx mbx;
+ struct mbx_get_pport_stats *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_pport_stats);
+ bzero(fwcmd, sizeof(struct mbx_get_pport_stats));
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_NIC_GET_PPORT_STATS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_get_pport_stats),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.reset_stats = reset_stats;
+ fwcmd->params.req.port_number = sc->if_id;
+
+ mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
+ mbx.u0.s.sge_count = 1; /* using scatter gather instead */
+
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
+ mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_pport_stats);
+
+ mbx.payload_length = sizeof(struct mbx_get_pport_stats);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
+
+ if (rc != 0) {
+ printf("%s: Could not get physical port statistics: %d\n",
+ sc->dev.dv_xname, rc);
+ }
+
+ return rc;
+}
+
+/**
+ * @brief Function to update the muticast filter with
+ * values in dma_mem
+ * @param sc software handle to the device
+ * @param dma_mem pointer to dma memory region
+ * @returns 0 on success, EIO on failure
+ */
+int
+oce_update_multicast(struct oce_softc *sc, struct oce_dma_mem *pdma_mem)
+{
+ struct oce_mbx mbx;
+ struct oce_mq_sge *sgl;
+ struct mbx_set_common_iface_multicast *req = NULL;
+ int rc = 0;
+
+ req = OCE_DMAPTR(pdma_mem, struct mbx_set_common_iface_multicast);
+ mbx_common_req_hdr_init(&req->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_IFACE_MULTICAST,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_set_common_iface_multicast),
+ OCE_MBX_VER_V0);
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ mbx.u0.s.embedded = 0; /*Non embeded*/
+ mbx.payload_length = sizeof(struct mbx_set_common_iface_multicast);
+ mbx.u0.s.sge_count = 1;
+ sgl = &mbx.payload.u0.u1.sgl[0];
+ sgl->pa_hi = htole32(upper_32_bits(pdma_mem->paddr));
+ sgl->pa_lo = htole32((pdma_mem->paddr) & 0xFFFFFFFF);
+ sgl->length = htole32(mbx.payload_length);
+
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+
+ return rc;
+}
+
+int
+oce_mbox_macaddr_add(struct oce_softc *sc, uint8_t *mac_addr, uint32_t if_id,
+ uint32_t *pmac_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_add_common_iface_mac *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_add_common_iface_mac *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_ADD_IFACE_MAC,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_add_common_iface_mac),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = (uint16_t) if_id;
+ bcopy(mac_addr, fwcmd->params.req.mac_address, 6);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_add_common_iface_mac);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ return rc;
+
+ *pmac_id = fwcmd->params.rsp.pmac_id;
+
+ return rc;
+}
+
+int
+oce_mbox_macaddr_del(struct oce_softc *sc, uint32_t if_id, uint32_t pmac_id)
+{
+ struct oce_mbx mbx;
+ struct mbx_del_common_iface_mac *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_del_common_iface_mac *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_DEL_IFACE_MAC,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_del_common_iface_mac),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.if_id = (uint16_t)if_id;
+ fwcmd->params.req.pmac_id = pmac_id;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_del_common_iface_mac);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ return rc;
+}
+
+int
+oce_mbox_check_native_mode(struct oce_softc *sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_common_set_function_cap *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_common_set_function_cap *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FUNCTIONAL_CAPS,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_common_set_function_cap),
+ OCE_MBX_VER_V0);
+
+ fwcmd->params.req.valid_capability_flags = CAP_SW_TIMESTAMPS |
+ CAP_BE3_NATIVE_ERX_API;
+
+ fwcmd->params.req.capability_flags = CAP_BE3_NATIVE_ERX_API;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_common_set_function_cap);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc != 0)
+ printf(" mbox failure!");
+ //if (rc != 0) This can fail in legacy mode. So skip
+ // FN_LEAVE(rc);
+
+ sc->be3_native = fwcmd->params.rsp.capability_flags
+ & CAP_BE3_NATIVE_ERX_API;
+
+ return 0;
+}
+
+int
+oce_mbox_create_rq(struct oce_rq *rq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_nic_rq *fwcmd;
+ struct oce_softc *sc = rq->parent;
+ int num_pages, version, rc = 0;
+
+ if (rq->qstate == QCREATED)
+ return 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
+ if (IS_XE201(sc))
+ version = OCE_MBX_VER_V1;
+ else
+ version = OCE_MBX_VER_V0;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_NIC_CREATE_RQ, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_nic_rq),
+ version);
+
+ num_pages = oce_page_list(sc, rq->ring, &fwcmd->params.req.pages[0],
+ nitems(fwcmd->params.req.pages));
+ if (!num_pages) {
+ printf("%s: failed to load the rq ring\n", __func__);
+ goto out;
+ }
+
+ if (version == OCE_MBX_VER_V1) {
+ fwcmd->params.req.frag_size = rq->cfg.frag_size / 2048;
+ fwcmd->params.req.page_size = 1;
+ } else
+ fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
+ fwcmd->params.req.num_pages = num_pages;
+ fwcmd->params.req.cq_id = rq->cq->cq_id;
+ fwcmd->params.req.if_id = sc->if_id;
+ fwcmd->params.req.max_frame_size = rq->cfg.mtu;
+ fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_nic_rq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto out;
+
+ rq->rq_id = letoh16(fwcmd->params.rsp.rq_id);
+ rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid;
+
+out:
+ return rc;
+}
+
+int
+oce_mbox_create_wq(struct oce_wq *wq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_nic_wq *fwcmd;
+ struct oce_softc *sc = wq->parent;
+ int num_pages, version, rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
+ if (IS_XE201(sc)) {
+ version = OCE_MBX_VER_V1;
+ fwcmd->params.req.if_id = sc->if_id;
+ } else
+ version = OCE_MBX_VER_V0;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ OPCODE_NIC_CREATE_WQ, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_nic_wq),
+ version);
+
+ num_pages = oce_page_list(sc, wq->ring, &fwcmd->params.req.pages[0],
+ nitems(fwcmd->params.req.pages));
+ if (!num_pages) {
+ printf("%s: failed to load the wq ring\n", __func__);
+ goto out;
+ }
+
+ fwcmd->params.req.nic_wq_type = wq->cfg.wq_type;
+ fwcmd->params.req.num_pages = num_pages;
+ fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
+ fwcmd->params.req.cq_id = htole16(wq->cq->cq_id);
+ fwcmd->params.req.ulp_num = 1;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_nic_wq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto out;
+
+ wq->wq_id = letoh16(fwcmd->params.rsp.wq_id);
+
+out:
+ return 0;
+}
+
+int
+oce_mbox_create_mq(struct oce_mq *mq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_mq_ex *fwcmd = NULL;
+ struct oce_softc *sc = mq->parent;
+ oce_mq_ext_ctx_t *ctx;
+ int num_pages, version, rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
+ version = OCE_MBX_VER_V0;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_MQ_EXT,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_mq_ex),
+ version);
+
+ num_pages = oce_page_list(sc, mq->ring, &fwcmd->params.req.pages[0],
+ nitems(fwcmd->params.req.pages));
+ if (!num_pages) {
+ printf("%s: failed to load the mq ring\n", __func__);
+ goto out;
+ }
+
+ ctx = &fwcmd->params.req.context;
+ ctx->v0.num_pages = num_pages;
+ ctx->v0.cq_id = mq->cq->cq_id;
+ ctx->v0.ring_size = OCE_LOG2(mq->cfg.q_len) + 1;
+ ctx->v0.valid = 1;
+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
+ ctx->v0.async_evt_bitmap = 0xffffffff;
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto out;
+
+ mq->mq_id = letoh16(fwcmd->params.rsp.mq_id);
+
+out:
+ return rc;
+}
+
+int
+oce_mbox_create_eq(struct oce_eq *eq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_eq *fwcmd;
+ struct oce_softc *sc = eq->parent;
+ int rc = 0;
+ uint32_t num_pages;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_EQ, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_eq),
+ OCE_MBX_VER_V0);
+
+ num_pages = oce_page_list(sc, eq->ring, &fwcmd->params.req.pages[0],
+ nitems(fwcmd->params.req.pages));
+ if (!num_pages) {
+ printf("%s: failed to load the eq ring\n", __func__);
+ goto out;
+ }
+
+ fwcmd->params.req.ctx.num_pages = htole16(num_pages);
+ fwcmd->params.req.ctx.valid = 1;
+ fwcmd->params.req.ctx.size = (eq->eq_cfg.item_size == 4) ? 0 : 1;
+ fwcmd->params.req.ctx.count = OCE_LOG2(eq->eq_cfg.q_len / 256);
+ fwcmd->params.req.ctx.armed = 0;
+ fwcmd->params.req.ctx.delay_mult = htole32(eq->eq_cfg.cur_eqd);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_eq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto out;
+
+ eq->eq_id = letoh16(fwcmd->params.rsp.eq_id);
+
+out:
+ return rc;
+}
+
+int
+oce_mbox_create_cq(struct oce_cq *cq, uint32_t ncoalesce,
+ uint32_t is_eventable)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_common_cq *fwcmd;
+ struct oce_softc *sc = cq->parent;
+ uint8_t version;
+ oce_cq_ctx_t *ctx;
+ uint32_t num_pages, page_size;
+ int rc = 0;
+
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
+
+ if (IS_XE201(sc))
+ version = OCE_MBX_VER_V2;
+ else
+ version = OCE_MBX_VER_V0;
+
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_CREATE_CQ,
+ MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_common_cq),
+ version);
+
+ num_pages = oce_page_list(sc, cq->ring, &fwcmd->params.req.pages[0],
+ nitems(fwcmd->params.req.pages));
+ if (!num_pages) {
+ printf("%s: failed to load the cq ring\n", __func__);
+ goto out;
+ }
+
+ page_size = 1; /* 1 for 4K */
+
+ ctx = &fwcmd->params.req.cq_ctx;
+
+ if (version == OCE_MBX_VER_V2) {
+ ctx->v2.num_pages = htole16(num_pages);
+ ctx->v2.page_size = page_size;
+ ctx->v2.eventable = is_eventable;
+ ctx->v2.valid = 1;
+ ctx->v2.count = OCE_LOG2(cq->cq_cfg.q_len / 256);
+ ctx->v2.nodelay = cq->cq_cfg.nodelay;
+ ctx->v2.coalesce_wm = ncoalesce;
+ ctx->v2.armed = 0;
+ ctx->v2.eq_id = cq->eq->eq_id;
+ if (ctx->v2.count == 3) {
+ if (cq->cq_cfg.q_len > (4*1024)-1)
+ ctx->v2.cqe_count = (4*1024)-1;
+ else
+ ctx->v2.cqe_count = cq->cq_cfg.q_len;
+ }
+ } else {
+ ctx->v0.num_pages = htole16(num_pages);
+ ctx->v0.eventable = is_eventable;
+ ctx->v0.valid = 1;
+ ctx->v0.count = OCE_LOG2(cq->cq_cfg.q_len / 256);
+ ctx->v0.nodelay = cq->cq_cfg.nodelay;
+ ctx->v0.coalesce_wm = ncoalesce;
+ ctx->v0.armed = 0;
+ ctx->v0.eq_id = cq->eq->eq_id;
+ }
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_common_cq);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (rc)
+ goto out;
+
+ cq->cq_id = letoh16(fwcmd->params.rsp.cq_id);
+
+out:
+ return rc;
+}
+
+void
+oce_refresh_queue_stats(struct oce_softc *sc)
+{
+ struct oce_drv_stats *adapter_stats;
+ int i;
+
+ adapter_stats = &sc->oce_stats_info;
+
+ /* Caluculate total TX and TXstats from all queues */
+
+ for (i = 0; i < sc->nrqs; i++) {
+ adapter_stats->rx.t_rx_pkts += sc->rq[i]->rx_stats.rx_pkts;
+ adapter_stats->rx.t_rx_bytes += sc->rq[i]->rx_stats.rx_bytes;
+ adapter_stats->rx.t_rx_frags += sc->rq[i]->rx_stats.rx_frags;
+ adapter_stats->rx.t_rx_mcast_pkts +=
+ sc->rq[i]->rx_stats.rx_mcast_pkts;
+ adapter_stats->rx.t_rx_ucast_pkts +=
+ sc->rq[i]->rx_stats.rx_ucast_pkts;
+ adapter_stats->rx.t_rxcp_errs += sc-> rq[i]->rx_stats.rxcp_err;
+ }
+
+ for (i = 0; i < sc->nwqs; i++) {
+ adapter_stats->tx.t_tx_reqs += sc->wq[i]->tx_stats.tx_reqs;
+ adapter_stats->tx.t_tx_stops += sc->wq[i]->tx_stats.tx_stops;
+ adapter_stats->tx.t_tx_wrbs += sc->wq[i]->tx_stats.tx_wrbs;
+ adapter_stats->tx.t_tx_compl += sc->wq[i]->tx_stats.tx_compl;
+ adapter_stats->tx.t_tx_bytes += sc->wq[i]->tx_stats.tx_bytes;
+ adapter_stats->tx.t_tx_pkts += sc->wq[i]->tx_stats.tx_pkts;
+ adapter_stats->tx.t_ipv6_ext_hdr_tx_drop +=
+ sc->wq[i]->tx_stats.ipv6_ext_hdr_tx_drop;
+ }
+}
+
+void
+copy_stats_to_sc_xe201(struct oce_softc *sc)
+{
+ struct oce_xe201_stats *adapter_stats;
+ struct mbx_get_pport_stats *nic_mbx;
+ struct pport_stats *port_stats;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_pport_stats);
+ port_stats = &nic_mbx->params.rsp.pps;
+ adapter_stats = &sc->oce_stats_info.u0.xe201;
+
+ adapter_stats->tx_pkts = port_stats->tx_pkts;
+ adapter_stats->tx_unicast_pkts = port_stats->tx_unicast_pkts;
+ adapter_stats->tx_multicast_pkts = port_stats->tx_multicast_pkts;
+ adapter_stats->tx_broadcast_pkts = port_stats->tx_broadcast_pkts;
+ adapter_stats->tx_bytes = port_stats->tx_bytes;
+ adapter_stats->tx_unicast_bytes = port_stats->tx_unicast_bytes;
+ adapter_stats->tx_multicast_bytes = port_stats->tx_multicast_bytes;
+ adapter_stats->tx_broadcast_bytes = port_stats->tx_broadcast_bytes;
+ adapter_stats->tx_discards = port_stats->tx_discards;
+ adapter_stats->tx_errors = port_stats->tx_errors;
+ adapter_stats->tx_pause_frames = port_stats->tx_pause_frames;
+ adapter_stats->tx_pause_on_frames = port_stats->tx_pause_on_frames;
+ adapter_stats->tx_pause_off_frames = port_stats->tx_pause_off_frames;
+ adapter_stats->tx_internal_mac_errors =
+ port_stats->tx_internal_mac_errors;
+ adapter_stats->tx_control_frames = port_stats->tx_control_frames;
+ adapter_stats->tx_pkts_64_bytes = port_stats->tx_pkts_64_bytes;
+ adapter_stats->tx_pkts_65_to_127_bytes =
+ port_stats->tx_pkts_65_to_127_bytes;
+ adapter_stats->tx_pkts_128_to_255_bytes =
+ port_stats->tx_pkts_128_to_255_bytes;
+ adapter_stats->tx_pkts_256_to_511_bytes =
+ port_stats->tx_pkts_256_to_511_bytes;
+ adapter_stats->tx_pkts_512_to_1023_bytes =
+ port_stats->tx_pkts_512_to_1023_bytes;
+ adapter_stats->tx_pkts_1024_to_1518_bytes =
+ port_stats->tx_pkts_1024_to_1518_bytes;
+ adapter_stats->tx_pkts_1519_to_2047_bytes =
+ port_stats->tx_pkts_1519_to_2047_bytes;
+ adapter_stats->tx_pkts_2048_to_4095_bytes =
+ port_stats->tx_pkts_2048_to_4095_bytes;
+ adapter_stats->tx_pkts_4096_to_8191_bytes =
+ port_stats->tx_pkts_4096_to_8191_bytes;
+ adapter_stats->tx_pkts_8192_to_9216_bytes =
+ port_stats->tx_pkts_8192_to_9216_bytes;
+ adapter_stats->tx_lso_pkts = port_stats->tx_lso_pkts;
+ adapter_stats->rx_pkts = port_stats->rx_pkts;
+ adapter_stats->rx_unicast_pkts = port_stats->rx_unicast_pkts;
+ adapter_stats->rx_multicast_pkts = port_stats->rx_multicast_pkts;
+ adapter_stats->rx_broadcast_pkts = port_stats->rx_broadcast_pkts;
+ adapter_stats->rx_bytes = port_stats->rx_bytes;
+ adapter_stats->rx_unicast_bytes = port_stats->rx_unicast_bytes;
+ adapter_stats->rx_multicast_bytes = port_stats->rx_multicast_bytes;
+ adapter_stats->rx_broadcast_bytes = port_stats->rx_broadcast_bytes;
+ adapter_stats->rx_unknown_protos = port_stats->rx_unknown_protos;
+ adapter_stats->rx_discards = port_stats->rx_discards;
+ adapter_stats->rx_errors = port_stats->rx_errors;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_alignment_errors = port_stats->rx_alignment_errors;
+ adapter_stats->rx_symbol_errors = port_stats->rx_symbol_errors;
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_pause_on_frames = port_stats->rx_pause_on_frames;
+ adapter_stats->rx_pause_off_frames = port_stats->rx_pause_off_frames;
+ adapter_stats->rx_frames_too_long = port_stats->rx_frames_too_long;
+ adapter_stats->rx_internal_mac_errors =
+ port_stats->rx_internal_mac_errors;
+ adapter_stats->rx_undersize_pkts = port_stats->rx_undersize_pkts;
+ adapter_stats->rx_oversize_pkts = port_stats->rx_oversize_pkts;
+ adapter_stats->rx_fragment_pkts = port_stats->rx_fragment_pkts;
+ adapter_stats->rx_jabbers = port_stats->rx_jabbers;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_control_frames_unknown_opcode =
+ port_stats->rx_control_frames_unknown_opcode;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_out_of_range_errors =
+ port_stats->rx_out_of_range_errors;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_vlan_mismatch_errors =
+ port_stats->rx_vlan_mismatch_errors;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_dropped_invalid_tcp_length =
+ port_stats->rx_dropped_invalid_tcp_length;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errors =
+ port_stats->rx_ip_checksum_errors;
+ adapter_stats->rx_tcp_checksum_errors =
+ port_stats->rx_tcp_checksum_errors;
+ adapter_stats->rx_udp_checksum_errors =
+ port_stats->rx_udp_checksum_errors;
+ adapter_stats->rx_non_rss_pkts = port_stats->rx_non_rss_pkts;
+ adapter_stats->rx_ipv4_pkts = port_stats->rx_ipv4_pkts;
+ adapter_stats->rx_ipv6_pkts = port_stats->rx_ipv6_pkts;
+ adapter_stats->rx_ipv4_bytes = port_stats->rx_ipv4_bytes;
+ adapter_stats->rx_ipv6_bytes = port_stats->rx_ipv6_bytes;
+ adapter_stats->rx_nic_pkts = port_stats->rx_nic_pkts;
+ adapter_stats->rx_tcp_pkts = port_stats->rx_tcp_pkts;
+ adapter_stats->rx_iscsi_pkts = port_stats->rx_iscsi_pkts;
+ adapter_stats->rx_management_pkts = port_stats->rx_management_pkts;
+ adapter_stats->rx_switched_unicast_pkts =
+ port_stats->rx_switched_unicast_pkts;
+ adapter_stats->rx_switched_multicast_pkts =
+ port_stats->rx_switched_multicast_pkts;
+ adapter_stats->rx_switched_broadcast_pkts =
+ port_stats->rx_switched_broadcast_pkts;
+ adapter_stats->num_forwards = port_stats->num_forwards;
+ adapter_stats->rx_fifo_overflow = port_stats->rx_fifo_overflow;
+ adapter_stats->rx_input_fifo_overflow =
+ port_stats->rx_input_fifo_overflow;
+ adapter_stats->rx_drops_too_many_frags =
+ port_stats->rx_drops_too_many_frags;
+ adapter_stats->rx_drops_invalid_queue =
+ port_stats->rx_drops_invalid_queue;
+ adapter_stats->rx_drops_mtu = port_stats->rx_drops_mtu;
+ adapter_stats->rx_pkts_64_bytes = port_stats->rx_pkts_64_bytes;
+ adapter_stats->rx_pkts_65_to_127_bytes =
+ port_stats->rx_pkts_65_to_127_bytes;
+ adapter_stats->rx_pkts_128_to_255_bytes =
+ port_stats->rx_pkts_128_to_255_bytes;
+ adapter_stats->rx_pkts_256_to_511_bytes =
+ port_stats->rx_pkts_256_to_511_bytes;
+ adapter_stats->rx_pkts_512_to_1023_bytes =
+ port_stats->rx_pkts_512_to_1023_bytes;
+ adapter_stats->rx_pkts_1024_to_1518_bytes =
+ port_stats->rx_pkts_1024_to_1518_bytes;
+ adapter_stats->rx_pkts_1519_to_2047_bytes =
+ port_stats->rx_pkts_1519_to_2047_bytes;
+ adapter_stats->rx_pkts_2048_to_4095_bytes =
+ port_stats->rx_pkts_2048_to_4095_bytes;
+ adapter_stats->rx_pkts_4096_to_8191_bytes =
+ port_stats->rx_pkts_4096_to_8191_bytes;
+ adapter_stats->rx_pkts_8192_to_9216_bytes =
+ port_stats->rx_pkts_8192_to_9216_bytes;
+}
+
+void
+copy_stats_to_sc_be2(struct oce_softc *sc)
+{
+ struct oce_be_stats *adapter_stats;
+ struct oce_pmem_stats *pmem;
+ struct oce_rxf_stats_v0 *rxf_stats;
+ struct oce_port_rxf_stats_v0 *port_stats;
+ struct mbx_get_nic_stats_v0 *nic_mbx;
+ uint32_t port = sc->port_id;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v0);
+ pmem = &nic_mbx->params.rsp.stats.pmem;
+ rxf_stats = &nic_mbx->params.rsp.stats.rxf;
+ port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
+
+ adapter_stats = &sc->oce_stats_info.u0.be;
+
+ /* Update stats */
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ adapter_stats->rxpp_fifo_overflow_drop =
+ port_stats->rxpp_fifo_overflow_drop;
+ adapter_stats->rx_dropped_tcp_length =
+ port_stats->rx_dropped_tcp_length;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ adapter_stats->tx_pauseframes = port_stats->tx_pauseframes;
+ adapter_stats->tx_controlframes = port_stats->tx_controlframes;
+
+ if (sc->if_id)
+ adapter_stats->jabber_events = rxf_stats->port1_jabber_events;
+ else
+ adapter_stats->jabber_events = rxf_stats->port0_jabber_events;
+
+ adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ adapter_stats->forwarded_packets = rxf_stats->forwarded_packets;
+ adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ adapter_stats->rx_drops_no_tpre_descr =
+ rxf_stats->rx_drops_no_tpre_descr;
+ adapter_stats->rx_drops_too_many_frags =
+ rxf_stats->rx_drops_too_many_frags;
+ adapter_stats->eth_red_drops = pmem->eth_red_drops;
+}
+
+void
+copy_stats_to_sc_be3(struct oce_softc *sc)
+{
+ struct oce_be_stats *adapter_stats;
+ struct oce_pmem_stats *pmem;
+ struct oce_rxf_stats_v1 *rxf_stats;
+ struct oce_port_rxf_stats_v1 *port_stats;
+ struct mbx_get_nic_stats *nic_mbx;
+ uint32_t port = sc->port_id;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats);
+ pmem = &nic_mbx->params.rsp.stats.pmem;
+ rxf_stats = &nic_mbx->params.rsp.stats.rxf;
+ port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
+
+ adapter_stats = &sc->oce_stats_info.u0.be;
+
+ /* Update stats */
+ adapter_stats->pmem_fifo_overflow_drop =
+ port_stats->pmem_fifo_overflow_drop;
+ adapter_stats->rx_priority_pause_frames =
+ port_stats->rx_priority_pause_frames;
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ adapter_stats->rx_dropped_tcp_length =
+ port_stats->rx_dropped_tcp_length;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ adapter_stats->rxpp_fifo_overflow_drop =
+ port_stats->rxpp_fifo_overflow_drop;
+ adapter_stats->tx_pauseframes = port_stats->tx_pauseframes;
+ adapter_stats->tx_controlframes = port_stats->tx_controlframes;
+ adapter_stats->jabber_events = port_stats->jabber_events;
+
+ adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ adapter_stats->forwarded_packets = rxf_stats->forwarded_packets;
+ adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ adapter_stats->rx_drops_no_tpre_descr =
+ rxf_stats->rx_drops_no_tpre_descr;
+ adapter_stats->rx_drops_too_many_frags =
+ rxf_stats->rx_drops_too_many_frags;
+
+ adapter_stats->eth_red_drops = pmem->eth_red_drops;
+}
+
+int
+oce_stats_init(struct oce_softc *sc)
+{
+ int rc = 0, sz;
+
+ if (IS_BE(sc)) {
+ if (sc->flags & OCE_FLAGS_BE2)
+ sz = sizeof(struct mbx_get_nic_stats_v0);
+ else
+ sz = sizeof(struct mbx_get_nic_stats);
+ } else
+ sz = sizeof(struct mbx_get_pport_stats);
+
+ rc = oce_dma_alloc(sc, sz, &sc->stats_mem, 0);
+
+ return rc;
+}
+
+void
+oce_stats_free(struct oce_softc *sc)
+{
+ oce_dma_free(sc, &sc->stats_mem);
+}
+
+int
+oce_refresh_nic_stats(struct oce_softc *sc)
+{
+ int rc = 0, reset = 0;
+
+ if (IS_BE(sc)) {
+ if (sc->flags & OCE_FLAGS_BE2) {
+ rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_be2(sc);
+ } else {
+ rc = oce_mbox_get_nic_stats(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_be3(sc);
+ }
+
+ } else {
+ rc = oce_mbox_get_pport_stats(sc, &sc->stats_mem, reset);
+ if (!rc)
+ copy_stats_to_sc_xe201(sc);
+ }
+
+ return rc;
+}
diff --git a/sys/dev/pci/ocereg.h b/sys/dev/pci/ocereg.h
new file mode 100644
index 00000000000..629f075bd1b
--- /dev/null
+++ b/sys/dev/pci/ocereg.h
@@ -0,0 +1,3409 @@
+/* $OpenBSD: ocereg.h,v 1.1 2012/08/02 17:35:52 mikeb Exp $ */
+
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#undef _BIG_ENDIAN /* TODO */
+
+#define OC_CNA_GEN2 0x2
+#define OC_CNA_GEN3 0x3
+#define DEVID_TIGERSHARK 0x700
+#define DEVID_TOMCAT 0x710
+
+#define OCE_DEV_BE2_CFG_BAR 0x14
+#define OCE_DEV_CFG_BAR 0x10
+#define OCE_PCI_CSR_BAR 0x18
+#define OCE_PCI_DB_BAR 0x20
+
+/* PCI CSR offsets */
+#define PCICFG_F1_CSR 0x0 /* F1 for NIC */
+#define PCICFG_SEMAPHORE 0xbc
+#define PCICFG_SOFT_RESET 0x5c
+#define PCICFG_UE_STATUS_HI_MASK 0xac
+#define PCICFG_UE_STATUS_LO_MASK 0xa8
+#define PCICFG_ONLINE0 0xb0
+#define PCICFG_ONLINE1 0xb4
+#define INTR_EN 0x20000000
+#define IMAGE_TRANSFER_SIZE (32 * 1024) /* 32K at a time */
+
+/* CSR register offsets */
+#define MPU_EP_CONTROL 0
+#define MPU_EP_SEMAPHORE_BE3 0xac
+#define MPU_EP_SEMAPHORE_XE201 0x400
+#define MPU_EP_SEMAPHORE(sc) \
+ ((IS_BE(sc)) ? MPU_EP_SEMAPHORE_BE3 : MPU_EP_SEMAPHORE_XE201)
+#define PCICFG_INTR_CTRL 0xfc
+#define HOSTINTR_MASK (1 << 29)
+#define HOSTINTR_PFUNC_SHIFT 26
+#define HOSTINTR_PFUNC_MASK 7
+
+/* POST status reg struct */
+#define POST_STAGE_POWER_ON_RESET 0x00
+#define POST_STAGE_AWAITING_HOST_RDY 0x01
+#define POST_STAGE_HOST_RDY 0x02
+#define POST_STAGE_CHIP_RESET 0x03
+#define POST_STAGE_ARMFW_READY 0xc000
+#define POST_STAGE_ARMFW_UE 0xf000
+
+/* DOORBELL registers */
+#define PD_RXULP_DB 0x0100
+#define PD_TXULP_DB 0x0060
+#define DB_RQ_ID_MASK 0x3FF
+
+#define PD_CQ_DB 0x0120
+#define PD_EQ_DB PD_CQ_DB
+#define PD_MPU_MBOX_DB 0x0160
+#define PD_MQ_DB 0x0140
+
+/* EQE completion types */
+#define EQ_MINOR_CODE_COMPLETION 0x00
+#define EQ_MINOR_CODE_OTHER 0x01
+#define EQ_MAJOR_CODE_COMPLETION 0x00
+
+/* Link Status field values */
+#define PHY_LINK_FAULT_NONE 0x0
+#define PHY_LINK_FAULT_LOCAL 0x01
+#define PHY_LINK_FAULT_REMOTE 0x02
+
+#define PHY_LINK_SPEED_ZERO 0x0 /* No link */
+#define PHY_LINK_SPEED_10MBPS 0x1 /* (10 Mbps) */
+#define PHY_LINK_SPEED_100MBPS 0x2 /* (100 Mbps) */
+#define PHY_LINK_SPEED_1GBPS 0x3 /* (1 Gbps) */
+#define PHY_LINK_SPEED_10GBPS 0x4 /* (10 Gbps) */
+
+#define PHY_LINK_DUPLEX_NONE 0x0
+#define PHY_LINK_DUPLEX_HALF 0x1
+#define PHY_LINK_DUPLEX_FULL 0x2
+
+#define NTWK_PORT_A 0x0 /* (Port A) */
+#define NTWK_PORT_B 0x1 /* (Port B) */
+
+#define PHY_LINK_SPEED_ZERO 0x0 /* (No link.) */
+#define PHY_LINK_SPEED_10MBPS 0x1 /* (10 Mbps) */
+#define PHY_LINK_SPEED_100MBPS 0x2 /* (100 Mbps) */
+#define PHY_LINK_SPEED_1GBPS 0x3 /* (1 Gbps) */
+#define PHY_LINK_SPEED_10GBPS 0x4 /* (10 Gbps) */
+
+/* Hardware Address types */
+#define MAC_ADDRESS_TYPE_STORAGE 0x0 /* (Storage MAC Address) */
+#define MAC_ADDRESS_TYPE_NETWORK 0x1 /* (Network MAC Address) */
+#define MAC_ADDRESS_TYPE_PD 0x2 /* (Protection Domain MAC Addr) */
+#define MAC_ADDRESS_TYPE_MANAGEMENT 0x3 /* (Management MAC Address) */
+#define MAC_ADDRESS_TYPE_FCOE 0x4 /* (FCoE MAC Address) */
+
+/* CREATE_IFACE capability and cap_en flags */
+#define MBX_RX_IFACE_FLAGS_RSS 0x4
+#define MBX_RX_IFACE_FLAGS_PROMISCUOUS 0x8
+#define MBX_RX_IFACE_FLAGS_BROADCAST 0x10
+#define MBX_RX_IFACE_FLAGS_UNTAGGED 0x20
+#define MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS 0x80
+#define MBX_RX_IFACE_FLAGS_VLAN 0x100
+#define MBX_RX_IFACE_FLAGS_MCAST_PROMISCUOUS 0x200
+#define MBX_RX_IFACE_FLAGS_PASS_L2_ERR 0x400
+#define MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR 0x800
+#define MBX_RX_IFACE_FLAGS_MULTICAST 0x1000
+#define MBX_RX_IFACE_RX_FILTER_IF_MULTICAST_HASH 0x2000
+#define MBX_RX_IFACE_FLAGS_HDS 0x4000
+#define MBX_RX_IFACE_FLAGS_DIRECTED 0x8000
+#define MBX_RX_IFACE_FLAGS_VMQ 0x10000
+#define MBX_RX_IFACE_FLAGS_NETQ 0x20000
+#define MBX_RX_IFACE_FLAGS_QGROUPS 0x40000
+#define MBX_RX_IFACE_FLAGS_LSO 0x80000
+#define MBX_RX_IFACE_FLAGS_LRO 0x100000
+
+#define MQ_RING_CONTEXT_SIZE_16 0x5 /* (16 entries) */
+#define MQ_RING_CONTEXT_SIZE_32 0x6 /* (32 entries) */
+#define MQ_RING_CONTEXT_SIZE_64 0x7 /* (64 entries) */
+#define MQ_RING_CONTEXT_SIZE_128 0x8 /* (128 entries) */
+
+#define MBX_DB_READY_BIT 0x1
+#define MBX_DB_HI_BIT 0x2
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+#define ASYNC_EVENT_LINK_UP 0x1
+#define ASYNC_EVENT_LINK_DOWN 0x0
+#define ASYNC_EVENT_GRP5 0x5
+#define ASYNC_EVENT_PVID_STATE 0x3
+#define VLAN_VID_MASK 0x0FFF
+
+/* port link_status */
+#define ASYNC_EVENT_LOGICAL 0x02
+
+/* Logical Link Status */
+#define NTWK_LOGICAL_LINK_DOWN 0
+#define NTWK_LOGICAL_LINK_UP 1
+
+/* Rx filter bits */
+#define NTWK_RX_FILTER_IP_CKSUM 0x1
+#define NTWK_RX_FILTER_TCP_CKSUM 0x2
+#define NTWK_RX_FILTER_UDP_CKSUM 0x4
+#define NTWK_RX_FILTER_STRIP_CRC 0x8
+
+/* max SGE per mbx */
+#define MAX_MBX_SGE 19
+
+/* Max multicast filter size*/
+#define OCE_MAX_MC_FILTER_SIZE 64
+
+/* PCI SLI (Service Level Interface) capabilities register */
+#define OCE_INTF_REG_OFFSET 0x58
+#define OCE_INTF_VALID_SIG 6 /* register's signature */
+#define OCE_INTF_FUNC_RESET_REQD 1
+#define OCE_INTF_HINT1_NOHINT 0
+#define OCE_INTF_HINT1_SEMAINIT 1
+#define OCE_INTF_HINT1_STATCTRL 2
+#define OCE_INTF_IF_TYPE_0 0
+#define OCE_INTF_IF_TYPE_1 1
+#define OCE_INTF_IF_TYPE_2 2
+#define OCE_INTF_IF_TYPE_3 3
+#define OCE_INTF_SLI_REV3 3 /* not supported by driver */
+#define OCE_INTF_SLI_REV4 4 /* driver supports SLI-4 */
+#define OCE_INTF_PHYS_FUNC 0
+#define OCE_INTF_VIRT_FUNC 1
+#define OCE_INTF_FAMILY_BE2 0 /* not supported by driver */
+#define OCE_INTF_FAMILY_BE3 1 /* driver supports BE3 */
+#define OCE_INTF_FAMILY_A0_CHIP 0xA /* Lancer A0 chip (supported) */
+#define OCE_INTF_FAMILY_B0_CHIP 0xB /* Lancer B0 chip (future) */
+
+#define NIC_WQE_SIZE 16
+#define NIC_UNICAST 0x00
+#define NIC_MULTICAST 0x01
+#define NIC_BROADCAST 0x02
+
+#define NIC_HDS_NO_SPLIT 0x00
+#define NIC_HDS_SPLIT_L3PL 0x01
+#define NIC_HDS_SPLIT_L4PL 0x02
+
+#define NIC_WQ_TYPE_FORWARDING 0x01
+#define NIC_WQ_TYPE_STANDARD 0x02
+#define NIC_WQ_TYPE_LOW_LATENCY 0x04
+
+#define OCE_RESET_STATS 1
+#define OCE_RETAIN_STATS 0
+#define OCE_TXP_SW_SZ 48
+
+typedef union pci_sli_intf_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t sli_valid:3;
+ uint32_t sli_hint2:5;
+ uint32_t sli_hint1:8;
+ uint32_t sli_if_type:4;
+ uint32_t sli_family:4;
+ uint32_t sli_rev:4;
+ uint32_t rsv0:3;
+ uint32_t sli_func_type:1;
+#else
+ uint32_t sli_func_type:1;
+ uint32_t rsv0:3;
+ uint32_t sli_rev:4;
+ uint32_t sli_family:4;
+ uint32_t sli_if_type:4;
+ uint32_t sli_hint1:8;
+ uint32_t sli_hint2:5;
+ uint32_t sli_valid:3;
+#endif
+ } bits;
+} __packed pci_sli_intf_t;
+
+/* physical address structure to be used in MBX */
+struct phys_addr {
+ /* dw0 */
+ uint32_t lo;
+ /* dw1 */
+ uint32_t hi;
+} __packed;
+
+typedef union pcicfg_intr_ctl_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t winselect:2;
+ uint32_t hostintr:1;
+ uint32_t pfnum:3;
+ uint32_t vf_cev_int_line_en:1;
+ uint32_t winaddr:23;
+ uint32_t membarwinen:1;
+#else
+ uint32_t membarwinen:1;
+ uint32_t winaddr:23;
+ uint32_t vf_cev_int_line_en:1;
+ uint32_t pfnum:3;
+ uint32_t hostintr:1;
+ uint32_t winselect:2;
+#endif
+ } bits;
+} __packed pcicfg_intr_ctl_t;
+
+typedef union pcicfg_semaphore_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd:31;
+ uint32_t lock:1;
+#else
+ uint32_t lock:1;
+ uint32_t rsvd:31;
+#endif
+ } bits;
+} __packed pcicfg_semaphore_t;
+
+typedef union pcicfg_soft_reset_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t nec_ll_rcvdetect:8;
+ uint32_t dbg_all_reqs_62_49:14;
+ uint32_t scratchpad0:1;
+ uint32_t exception_oe:1;
+ uint32_t soft_reset:1;
+ uint32_t rsvd0:7;
+#else
+ uint32_t rsvd0:7;
+ uint32_t soft_reset:1;
+ uint32_t exception_oe:1;
+ uint32_t scratchpad0:1;
+ uint32_t dbg_all_reqs_62_49:14;
+ uint32_t nec_ll_rcvdetect:8;
+#endif
+ } bits;
+} __packed pcicfg_soft_reset_t;
+
+typedef union pcicfg_online1_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t host8_online:1;
+ uint32_t host7_online:1;
+ uint32_t host6_online:1;
+ uint32_t host5_online:1;
+ uint32_t host4_online:1;
+ uint32_t host3_online:1;
+ uint32_t host2_online:1;
+ uint32_t ipc_online:1;
+ uint32_t arm_online:1;
+ uint32_t txp_online:1;
+ uint32_t xaui_online:1;
+ uint32_t rxpp_online:1;
+ uint32_t txpb_online:1;
+ uint32_t rr_online:1;
+ uint32_t pmem_online:1;
+ uint32_t pctl1_online:1;
+ uint32_t pctl0_online:1;
+ uint32_t pcs1online_online:1;
+ uint32_t mpu_iram_online:1;
+ uint32_t pcs0online_online:1;
+ uint32_t mgmt_mac_online:1;
+ uint32_t lpcmemhost_online:1;
+#else
+ uint32_t lpcmemhost_online:1;
+ uint32_t mgmt_mac_online:1;
+ uint32_t pcs0online_online:1;
+ uint32_t mpu_iram_online:1;
+ uint32_t pcs1online_online:1;
+ uint32_t pctl0_online:1;
+ uint32_t pctl1_online:1;
+ uint32_t pmem_online:1;
+ uint32_t rr_online:1;
+ uint32_t txpb_online:1;
+ uint32_t rxpp_online:1;
+ uint32_t xaui_online:1;
+ uint32_t txp_online:1;
+ uint32_t arm_online:1;
+ uint32_t ipc_online:1;
+ uint32_t host2_online:1;
+ uint32_t host3_online:1;
+ uint32_t host4_online:1;
+ uint32_t host5_online:1;
+ uint32_t host6_online:1;
+ uint32_t host7_online:1;
+ uint32_t host8_online:1;
+#endif
+ } bits;
+} __packed pcicfg_online1_t;
+
+typedef union mpu_ep_semaphore_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t error:1;
+ uint32_t backup_fw:1;
+ uint32_t iscsi_no_ip:1;
+ uint32_t iscsi_ip_conflict:1;
+ uint32_t option_rom_installed:1;
+ uint32_t iscsi_drv_loaded:1;
+ uint32_t rsvd0:10;
+ uint32_t stage:16;
+#else
+ uint32_t stage:16;
+ uint32_t rsvd0:10;
+ uint32_t iscsi_drv_loaded:1;
+ uint32_t option_rom_installed:1;
+ uint32_t iscsi_ip_conflict:1;
+ uint32_t iscsi_no_ip:1;
+ uint32_t backup_fw:1;
+ uint32_t error:1;
+#endif
+ } bits;
+} __packed mpu_ep_semaphore_t;
+
+typedef union mpu_ep_control_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t cpu_reset:1;
+ uint32_t rsvd1:15;
+ uint32_t ep_ram_init_status:1;
+ uint32_t rsvd0:12;
+ uint32_t m2_rxpbuf:1;
+ uint32_t m1_rxpbuf:1;
+ uint32_t m0_rxpbuf:1;
+#else
+ uint32_t m0_rxpbuf:1;
+ uint32_t m1_rxpbuf:1;
+ uint32_t m2_rxpbuf:1;
+ uint32_t rsvd0:12;
+ uint32_t ep_ram_init_status:1;
+ uint32_t rsvd1:15;
+ uint32_t cpu_reset:1;
+#endif
+ } bits;
+} __packed mpu_ep_control_t;
+
+/* RX doorbell */
+typedef union pd_rxulp_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t num_posted:8;
+ uint32_t invalidate:1;
+ uint32_t rsvd1:13;
+ uint32_t qid:10;
+#else
+ uint32_t qid:10;
+ uint32_t rsvd1:13;
+ uint32_t invalidate:1;
+ uint32_t num_posted:8;
+#endif
+ } bits;
+} __packed pd_rxulp_db_t;
+
+/* TX doorbell */
+typedef union pd_txulp_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t num_posted:14;
+ uint32_t rsvd0:6;
+ uint32_t qid:10;
+#else
+ uint32_t qid:10;
+ uint32_t rsvd0:6;
+ uint32_t num_posted:14;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} __packed pd_txulp_db_t;
+
+/* CQ doorbell */
+typedef union cq_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t rearm:1;
+ uint32_t num_popped:13;
+ uint32_t rsvd0:5;
+ uint32_t event:1;
+ uint32_t qid:10;
+#else
+ uint32_t qid:10;
+ uint32_t event:1;
+ uint32_t rsvd0:5;
+ uint32_t num_popped:13;
+ uint32_t rearm:1;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} __packed cq_db_t;
+
+/* EQ doorbell */
+typedef union eq_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t rearm:1;
+ uint32_t num_popped:13;
+ uint32_t rsvd0:5;
+ uint32_t event:1;
+ uint32_t clrint:1;
+ uint32_t qid:9;
+#else
+ uint32_t qid:9;
+ uint32_t clrint:1;
+ uint32_t event:1;
+ uint32_t rsvd0:5;
+ uint32_t num_popped:13;
+ uint32_t rearm:1;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} __packed eq_db_t;
+
+/* bootstrap mbox doorbell */
+typedef union pd_mpu_mbox_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t address:30;
+ uint32_t hi:1;
+ uint32_t ready:1;
+#else
+ uint32_t ready:1;
+ uint32_t hi:1;
+ uint32_t address:30;
+#endif
+ } bits;
+} __packed pd_mpu_mbox_db_t;
+
+/* MQ ring doorbell */
+typedef union pd_mq_db_u {
+ uint32_t dw0;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t rsvd1:2;
+ uint32_t num_posted:14;
+ uint32_t rsvd0:5;
+ uint32_t mq_id:11;
+#else
+ uint32_t mq_id:11;
+ uint32_t rsvd0:5;
+ uint32_t num_posted:14;
+ uint32_t rsvd1:2;
+#endif
+ } bits;
+} __packed pd_mq_db_t;
+
+/*
+ * Event Queue Entry
+ */
+struct oce_eqe {
+ uint32_t evnt;
+} __packed;
+
+/* MQ scatter gather entry. Array of these make an SGL */
+struct oce_mq_sge {
+ uint32_t pa_lo;
+ uint32_t pa_hi;
+ uint32_t length;
+} __packed;
+
+/*
+ * payload can contain an SGL or an embedded array of upto 59 dwords
+ */
+struct oce_mbx_payload {
+ union {
+ union {
+ struct oce_mq_sge sgl[MAX_MBX_SGE];
+ uint32_t embedded[59];
+ } u1;
+ uint32_t dw[59];
+ } u0;
+} __packed;
+
+/*
+ * MQ MBX structure
+ */
+struct oce_mbx {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t special:8;
+ uint32_t rsvd1:16;
+ uint32_t sge_count:5;
+ uint32_t rsvd0:2;
+ uint32_t embedded:1;
+#else
+ uint32_t embedded:1;
+ uint32_t rsvd0:2;
+ uint32_t sge_count:5;
+ uint32_t rsvd1:16;
+ uint32_t special:8;
+#endif
+ } s;
+ uint32_t dw0;
+ } u0;
+
+ uint32_t payload_length;
+ uint32_t tag[2];
+ uint32_t rsvd2[1];
+ struct oce_mbx_payload payload;
+} __packed;
+
+/* completion queue entry for MQ */
+struct oce_mq_cqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint32_t extended_status:16;
+ uint32_t completion_status:16;
+ /* dw1 dw2 */
+ uint32_t mq_tag[2];
+ /* dw3 */
+ uint32_t valid:1;
+ uint32_t async_event:1;
+ uint32_t hpi_buffer_cmpl:1;
+ uint32_t completed:1;
+ uint32_t consumed:1;
+ uint32_t rsvd0:3;
+ uint32_t async_type:8;
+ uint32_t event_type:8;
+ uint32_t rsvd1:8;
+#else
+ /* dw0 */
+ uint32_t completion_status:16;
+ uint32_t extended_status:16;
+ /* dw1 dw2 */
+ uint32_t mq_tag[2];
+ /* dw3 */
+ uint32_t rsvd1:8;
+ uint32_t event_type:8;
+ uint32_t async_type:8;
+ uint32_t rsvd0:3;
+ uint32_t consumed:1;
+ uint32_t completed:1;
+ uint32_t hpi_buffer_cmpl:1;
+ uint32_t async_event:1;
+ uint32_t valid:1;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+
+/* Mailbox Completion Status Codes */
+enum MBX_COMPLETION_STATUS {
+ MBX_CQE_STATUS_SUCCESS = 0x00,
+ MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 0x01,
+ MBX_CQE_STATUS_INVALID_PARAMETER = 0x02,
+ MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 0x03,
+ MBX_CQE_STATUS_QUEUE_FLUSHING = 0x04,
+ MBX_CQE_STATUS_DMA_FAILED = 0x05
+};
+
+struct oce_async_cqe_link_state {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint8_t speed;
+ uint8_t duplex;
+ uint8_t link_status;
+ uint8_t phy_port;
+ /* dw1 */
+ uint16_t qos_link_speed;
+ uint8_t rsvd0;
+ uint8_t fault;
+ /* dw2 */
+ uint32_t event_tag;
+ /* dw3 */
+ uint32_t valid:1;
+ uint32_t async_event:1;
+ uint32_t rsvd2:6;
+ uint32_t event_type:8;
+ uint32_t event_code:8;
+ uint32_t rsvd1:8;
+#else
+ /* dw0 */
+ uint8_t phy_port;
+ uint8_t link_status;
+ uint8_t duplex;
+ uint8_t speed;
+ /* dw1 */
+ uint8_t fault;
+ uint8_t rsvd0;
+ uint16_t qos_link_speed;
+ /* dw2 */
+ uint32_t event_tag;
+ /* dw3 */
+ uint32_t rsvd1:8;
+ uint32_t event_code:8;
+ uint32_t event_type:8;
+ uint32_t rsvd2:6;
+ uint32_t async_event:1;
+ uint32_t valid:1;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+
+/* PVID aync event */
+struct oce_async_event_grp5_pvid_state {
+ uint8_t enabled;
+ uint8_t rsvd0;
+ uint16_t tag;
+ uint32_t event_tag;
+ uint32_t rsvd1;
+ uint32_t code;
+} __packed;
+
+typedef union oce_mq_ext_ctx_u {
+ uint32_t dw[6];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+ /* dw1 */
+ uint32_t async_evt_bitmap;
+ /* dw2 */
+ uint32_t cq_id:10;
+ uint32_t dw5rsvd2:2;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd1:16;
+ /* dw3 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw4 */
+ uint32_t dw7rsvd1:21;
+ uint32_t async_cq_id:10;
+ uint32_t async_cq_valid:1;
+ #else
+ /* dw0 */
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+ /* dw1 */
+ uint32_t async_evt_bitmap;
+ /* dw2 */
+ uint32_t dw5rsvd1:16;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd2:2;
+ uint32_t cq_id:10;
+ /* dw3 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw4 */
+ uint32_t async_cq_valid:1;
+ uint32_t async_cq_id:10;
+ uint32_t dw7rsvd1:21;
+ #endif
+ /* dw5 */
+ uint32_t dw8rsvd1;
+ } v0;
+} __packed oce_mq_ext_ctx_t;
+
+/* MQ mailbox structure */
+struct oce_bmbx {
+ struct oce_mbx mbx;
+ struct oce_mq_cqe cqe;
+} __packed;
+
+/* ---[ MBXs start here ]---------------------------------------------- */
+/* MBXs sub system codes */
+enum MBX_SUBSYSTEM_CODES {
+ MBX_SUBSYSTEM_RSVD = 0,
+ MBX_SUBSYSTEM_COMMON = 1,
+ MBX_SUBSYSTEM_COMMON_ISCSI = 2,
+ MBX_SUBSYSTEM_NIC = 3,
+ MBX_SUBSYSTEM_TOE = 4,
+ MBX_SUBSYSTEM_PXE_UNDI = 5,
+ MBX_SUBSYSTEM_ISCSI_INI = 6,
+ MBX_SUBSYSTEM_ISCSI_TGT = 7,
+ MBX_SUBSYSTEM_MILI_PTL = 8,
+ MBX_SUBSYSTEM_MILI_TMD = 9,
+ MBX_SUBSYSTEM_RDMA = 10,
+ MBX_SUBSYSTEM_LOWLEVEL = 11,
+ MBX_SUBSYSTEM_LRO = 13,
+ IOCBMBX_SUBSYSTEM_DCBX = 15,
+ IOCBMBX_SUBSYSTEM_DIAG = 16,
+ IOCBMBX_SUBSYSTEM_VENDOR = 17
+};
+
+/* common ioctl opcodes */
+enum COMMON_SUBSYSTEM_OPCODES {
+/* These opcodes are common to both networking and storage PCI functions
+ * They are used to reserve resources and configure CNA. These opcodes
+ * all use the MBX_SUBSYSTEM_COMMON subsystem code.
+ */
+ OPCODE_COMMON_QUERY_IFACE_MAC = 1,
+ OPCODE_COMMON_SET_IFACE_MAC = 2,
+ OPCODE_COMMON_SET_IFACE_MULTICAST = 3,
+ OPCODE_COMMON_CONFIG_IFACE_VLAN = 4,
+ OPCODE_COMMON_QUERY_LINK_CONFIG = 5,
+ OPCODE_COMMON_READ_FLASHROM = 6,
+ OPCODE_COMMON_WRITE_FLASHROM = 7,
+ OPCODE_COMMON_QUERY_MAX_MBX_BUFFER_SIZE = 8,
+ OPCODE_COMMON_CREATE_CQ = 12,
+ OPCODE_COMMON_CREATE_EQ = 13,
+ OPCODE_COMMON_CREATE_MQ = 21,
+ OPCODE_COMMON_GET_QOS = 27,
+ OPCODE_COMMON_SET_QOS = 28,
+ OPCODE_COMMON_READ_EPROM = 30,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES = 32,
+ OPCODE_COMMON_NOP = 33,
+ OPCODE_COMMON_SET_IFACE_RX_FILTER = 34,
+ OPCODE_COMMON_GET_FW_VERSION = 35,
+ OPCODE_COMMON_SET_FLOW_CONTROL = 36,
+ OPCODE_COMMON_GET_FLOW_CONTROL = 37,
+ OPCODE_COMMON_SET_FRAME_SIZE = 39,
+ OPCODE_COMMON_MODIFY_EQ_DELAY = 41,
+ OPCODE_COMMON_CREATE_IFACE = 50,
+ OPCODE_COMMON_DESTROY_IFACE = 51,
+ OPCODE_COMMON_MODIFY_MSI_MESSAGES = 52,
+ OPCODE_COMMON_DESTROY_MQ = 53,
+ OPCODE_COMMON_DESTROY_CQ = 54,
+ OPCODE_COMMON_DESTROY_EQ = 55,
+ OPCODE_COMMON_UPLOAD_TCP = 56,
+ OPCODE_COMMON_SET_NTWK_LINK_SPEED = 57,
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG = 58,
+ OPCODE_COMMON_ADD_IFACE_MAC = 59,
+ OPCODE_COMMON_DEL_IFACE_MAC = 60,
+ OPCODE_COMMON_FUNCTION_RESET = 61,
+ OPCODE_COMMON_SET_PHYSICAL_LINK_CONFIG = 62,
+ OPCODE_COMMON_GET_BOOT_CONFIG = 66,
+ OPCPDE_COMMON_SET_BOOT_CONFIG = 67,
+ OPCODE_COMMON_SET_BEACON_CONFIG = 69,
+ OPCODE_COMMON_GET_BEACON_CONFIG = 70,
+ OPCODE_COMMON_GET_PHYSICAL_LINK_CONFIG = 71,
+ OPCODE_COMMON_GET_OEM_ATTRIBUTES = 76,
+ OPCODE_COMMON_GET_PORT_NAME = 77,
+ OPCODE_COMMON_GET_CONFIG_SIGNATURE = 78,
+ OPCODE_COMMON_SET_CONFIG_SIGNATURE = 79,
+ OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG = 80,
+ OPCODE_COMMON_GET_BE_CONFIGURATION_RESOURCES = 81,
+ OPCODE_COMMON_SET_BE_CONFIGURATION_RESOURCES = 82,
+ OPCODE_COMMON_GET_RESET_NEEDED = 84,
+ OPCODE_COMMON_GET_SERIAL_NUMBER = 85,
+ OPCODE_COMMON_GET_NCSI_CONFIG = 86,
+ OPCODE_COMMON_SET_NCSI_CONFIG = 87,
+ OPCODE_COMMON_CREATE_MQ_EXT = 90,
+ OPCODE_COMMON_SET_FUNCTION_PRIVILEGES = 100,
+ OPCODE_COMMON_SET_VF_PORT_TYPE = 101,
+ OPCODE_COMMON_GET_PHY_CONFIG = 102,
+ OPCODE_COMMON_SET_FUNCTIONAL_CAPS = 103,
+ OPCODE_COMMON_GET_ADAPTER_ID = 110,
+ OPCODE_COMMON_GET_UPGRADE_FEATURES = 111,
+ OPCODE_COMMON_GET_INSTALLED_FEATURES = 112,
+ OPCODE_COMMON_GET_AVAIL_PERSONALITIES = 113,
+ OPCODE_COMMON_GET_CONFIG_PERSONALITIES = 114,
+ OPCODE_COMMON_SEND_ACTIVATION = 115,
+ OPCODE_COMMON_RESET_LICENSES = 116,
+ OPCODE_COMMON_GET_CNTL_ADDL_ATTRIBUTES = 121,
+ OPCODE_COMMON_QUERY_TCB = 144,
+ OPCODE_COMMON_ADD_IFACE_QUEUE_FILTER = 145,
+ OPCODE_COMMON_DEL_IFACE_QUEUE_FILTER = 146,
+ OPCODE_COMMON_GET_IFACE_MAC_LIST = 147,
+ OPCODE_COMMON_SET_IFACE_MAC_LIST = 148,
+ OPCODE_COMMON_MODIFY_CQ = 149,
+ OPCODE_COMMON_GET_IFACE_VLAN_LIST = 150,
+ OPCODE_COMMON_SET_IFACE_VLAN_LIST = 151,
+ OPCODE_COMMON_GET_HSW_CONFIG = 152,
+ OPCODE_COMMON_SET_HSW_CONFIG = 153,
+ OPCODE_COMMON_GET_RESOURCE_EXTENT_INFO = 154,
+ OPCODE_COMMON_GET_ALLOCATED_RESOURCE_EXTENTS = 155,
+ OPCODE_COMMON_ALLOC_RESOURCE_EXTENTS = 156,
+ OPCODE_COMMON_DEALLOC_RESOURCE_EXTENTS = 157,
+ OPCODE_COMMON_SET_DIAG_REGISTERS = 158,
+ OPCODE_COMMON_GET_FUNCTION_CONFIG = 160,
+ OPCODE_COMMON_GET_PROFILE_CAPACITIES = 161,
+ OPCODE_COMMON_GET_MR_PROFILE_CAPACITIES = 162,
+ OPCODE_COMMON_SET_MR_PROFILE_CAPACITIES = 163,
+ OPCODE_COMMON_GET_PROFILE_CONFIG = 164,
+ OPCODE_COMMON_SET_PROFILE_CONFIG = 165,
+ OPCODE_COMMON_GET_PROFILE_LIST = 166,
+ OPCODE_COMMON_GET_ACTIVE_PROFILE = 167,
+ OPCODE_COMMON_SET_ACTIVE_PROFILE = 168,
+ OPCODE_COMMON_GET_FUNCTION_PRIVILEGES = 170,
+ OPCODE_COMMON_READ_OBJECT = 171,
+ OPCODE_COMMON_WRITE_OBJECT = 172
+};
+
+/* common ioctl header */
+#define OCE_MBX_VER_V2 0x0002 /* Version V2 mailbox command */
+#define OCE_MBX_VER_V1 0x0001 /* Version V1 mailbox command */
+#define OCE_MBX_VER_V0 0x0000 /* Version V0 mailbox command */
+struct mbx_hdr {
+ union {
+ uint32_t dw[4];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t domain:8;
+ uint32_t port_number:8;
+ uint32_t subsystem:8;
+ uint32_t opcode:8;
+ /* dw 1 */
+ uint32_t timeout;
+ /* dw 2 */
+ uint32_t request_length;
+ /* dw 3 */
+ uint32_t rsvd0:24;
+ uint32_t version:8;
+ #else
+ /* dw 0 */
+ uint32_t opcode:8;
+ uint32_t subsystem:8;
+ uint32_t port_number:8;
+ uint32_t domain:8;
+ /* dw 1 */
+ uint32_t timeout;
+ /* dw 2 */
+ uint32_t request_length;
+ /* dw 3 */
+ uint32_t version:8;
+ uint32_t rsvd0:24;
+ #endif
+ } req;
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t domain:8;
+ uint32_t rsvd0:8;
+ uint32_t subsystem:8;
+ uint32_t opcode:8;
+ /* dw 1 */
+ uint32_t rsvd1:16;
+ uint32_t additional_status:8;
+ uint32_t status:8;
+ #else
+ /* dw 0 */
+ uint32_t opcode:8;
+ uint32_t subsystem:8;
+ uint32_t rsvd0:8;
+ uint32_t domain:8;
+ /* dw 1 */
+ uint32_t status:8;
+ uint32_t additional_status:8;
+ uint32_t rsvd1:16;
+ #endif
+ uint32_t rsp_length;
+ uint32_t actual_rsp_length;
+ } rsp;
+ } u0;
+} __packed;
+
+#define OCE_BMBX_RHDR_SZ 20
+#define OCE_MBX_RRHDR_SZ sizeof (struct mbx_hdr)
+#define OCE_MBX_ADDL_STATUS(_MHDR) ((_MHDR)->u0.rsp.additional_status)
+#define OCE_MBX_STATUS(_MHDR) ((_MHDR)->u0.rsp.status)
+
+/* [05] OPCODE_COMMON_QUERY_LINK_CONFIG */
+struct mbx_query_common_link_config {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ struct {
+ /* dw 0 */
+ uint8_t physical_port;
+ uint8_t mac_duplex;
+ uint8_t mac_speed;
+ uint8_t mac_fault;
+ /* dw 1 */
+ uint8_t mgmt_mac_duplex;
+ uint8_t mgmt_mac_speed;
+ uint16_t qos_link_speed;
+ uint32_t logical_link_status;
+ } rsp;
+ } params;
+} __packed;
+
+/* [57] OPCODE_COMMON_SET_LINK_SPEED */
+struct mbx_set_common_link_speed {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint8_t rsvd0;
+ uint8_t mac_speed;
+ uint8_t virtual_port;
+ uint8_t physical_port;
+#else
+ uint8_t physical_port;
+ uint8_t virtual_port;
+ uint8_t mac_speed;
+ uint8_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw;
+ } params;
+} __packed;
+
+struct mac_address_format {
+ uint16_t size_of_struct;
+ uint8_t mac_addr[6];
+} __packed;
+
+/* [01] OPCODE_COMMON_QUERY_IFACE_MAC */
+struct mbx_query_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t if_id;
+ uint8_t permanent;
+ uint8_t type;
+#else
+ uint8_t type;
+ uint8_t permanent;
+ uint16_t if_id;
+#endif
+
+ } req;
+
+ struct {
+ struct mac_address_format mac;
+ } rsp;
+ } params;
+} __packed;
+
+/* [02] OPCODE_COMMON_SET_IFACE_MAC */
+struct mbx_set_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint16_t if_id;
+ uint8_t invalidate;
+ uint8_t type;
+#else
+ /* dw 0 */
+ uint8_t type;
+ uint8_t invalidate;
+ uint16_t if_id;
+#endif
+ /* dw 1 */
+ struct mac_address_format mac;
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw[2];
+ } params;
+} __packed;
+
+/* [03] OPCODE_COMMON_SET_IFACE_MULTICAST */
+struct mbx_set_common_iface_multicast {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw 0 */
+ uint16_t num_mac;
+ uint8_t promiscuous;
+ uint8_t if_id;
+ /* dw 1-48 */
+ struct {
+ uint8_t byte[6];
+ } mac[32];
+
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw[49];
+ } params;
+} __packed;
+
+struct qinq_vlan {
+#ifdef _BIG_ENDIAN
+ uint16_t inner;
+ uint16_t outer;
+#else
+ uint16_t outer;
+ uint16_t inner;
+#endif
+} __packed;
+
+struct normal_vlan {
+ uint16_t vtag;
+} __packed;
+
+struct ntwk_if_vlan_tag {
+ union {
+ struct normal_vlan normal;
+ struct qinq_vlan qinq;
+ } u0;
+} __packed;
+
+/* [50] OPCODE_COMMON_CREATE_IFACE */
+struct mbx_create_common_iface {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t version;
+ uint32_t cap_flags;
+ uint32_t enable_flags;
+ uint8_t mac_addr[6];
+ uint8_t rsvd0;
+ uint8_t mac_invalid;
+ struct ntwk_if_vlan_tag vlan_tag;
+ } req;
+
+ struct {
+ uint32_t if_id;
+ uint32_t pmac_id;
+ } rsp;
+ uint32_t dw[4];
+ } params;
+} __packed;
+
+/* [51] OPCODE_COMMON_DESTROY_IFACE */
+struct mbx_destroy_common_iface {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t if_id;
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+
+ uint32_t dw;
+ } params;
+} __packed;
+
+/* event queue context structure */
+struct oce_eq_ctx {
+#ifdef _BIG_ENDIAN
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+
+ uint32_t size:1;
+ uint32_t dw5rsvd2:1;
+ uint32_t valid:1;
+ uint32_t dw5rsvd1:29;
+
+ uint32_t armed:1;
+ uint32_t dw6rsvd2:2;
+ uint32_t count:3;
+ uint32_t dw6rsvd1:26;
+
+ uint32_t dw7rsvd2:9;
+ uint32_t delay_mult:10;
+ uint32_t dw7rsvd1:13;
+
+ uint32_t dw8rsvd1;
+#else
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+
+ uint32_t dw5rsvd1:29;
+ uint32_t valid:1;
+ uint32_t dw5rsvd2:1;
+ uint32_t size:1;
+
+ uint32_t dw6rsvd1:26;
+ uint32_t count:3;
+ uint32_t dw6rsvd2:2;
+ uint32_t armed:1;
+
+ uint32_t dw7rsvd1:13;
+ uint32_t delay_mult:10;
+ uint32_t dw7rsvd2:9;
+
+ uint32_t dw8rsvd1;
+#endif
+} __packed;
+
+/* [13] OPCODE_COMMON_CREATE_EQ */
+struct mbx_create_common_eq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ struct oce_eq_ctx ctx;
+ struct phys_addr pages[8];
+ } req;
+
+ struct {
+ uint16_t eq_id;
+ uint16_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [55] OPCODE_COMMON_DESTROY_EQ */
+struct mbx_destroy_common_eq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint16_t id;
+#else
+ uint16_t id;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* SLI-4 CQ context - use version V0 for B3, version V2 for Lancer */
+typedef union oce_cq_ctx_u {
+ uint32_t dw[5];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+ /* dw5 */
+ uint32_t eventable:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t valid:1;
+ uint32_t count:2;
+ uint32_t dw5rsvd2:12;
+ uint32_t nodelay:1;
+ uint32_t coalesce_wm:2;
+ uint32_t dw5rsvd1:12;
+ /* dw6 */
+ uint32_t armed:1;
+ uint32_t dw6rsvd2:1;
+ uint32_t eq_id:8;
+ uint32_t dw6rsvd1:22;
+ #else
+ /* dw4 */
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+ /* dw5 */
+ uint32_t dw5rsvd1:12;
+ uint32_t coalesce_wm:2;
+ uint32_t nodelay:1;
+ uint32_t dw5rsvd2:12;
+ uint32_t count:2;
+ uint32_t valid:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t eventable:1;
+ /* dw6 */
+ uint32_t dw6rsvd1:22;
+ uint32_t eq_id:8;
+ uint32_t dw6rsvd2:1;
+ uint32_t armed:1;
+ #endif
+ /* dw7 */
+ uint32_t dw7rsvd1;
+ /* dw8 */
+ uint32_t dw8rsvd1;
+ } v0;
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd1:8;
+ uint32_t page_size:8;
+ uint32_t num_pages:16;
+ /* dw5 */
+ uint32_t eventable:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t valid:1;
+ uint32_t count:2;
+ uint32_t dw5rsvd2:11;
+ uint32_t autovalid:1;
+ uint32_t nodelay:1;
+ uint32_t coalesce_wm:2;
+ uint32_t dw5rsvd1:12;
+ /* dw6 */
+ uint32_t armed:1;
+ uint32_t dw6rsvd1:15;
+ uint32_t eq_id:16;
+ /* dw7 */
+ uint32_t dw7rsvd1:16;
+ uint32_t cqe_count:16;
+ #else
+ /* dw4 */
+ uint32_t num_pages:16;
+ uint32_t page_size:8;
+ uint32_t dw4rsvd1:8;
+ /* dw5 */
+ uint32_t dw5rsvd1:12;
+ uint32_t coalesce_wm:2;
+ uint32_t nodelay:1;
+ uint32_t autovalid:1;
+ uint32_t dw5rsvd2:11;
+ uint32_t count:2;
+ uint32_t valid:1;
+ uint32_t dw5rsvd3:1;
+ uint32_t eventable:1;
+ /* dw6 */
+ uint32_t eq_id:8;
+ uint32_t dw6rsvd1:15;
+ uint32_t armed:1;
+ /* dw7 */
+ uint32_t cqe_count:16;
+ uint32_t dw7rsvd1:16;
+ #endif
+ /* dw8 */
+ uint32_t dw8rsvd1;
+ } v2;
+} __packed oce_cq_ctx_t;
+
+/* [12] OPCODE_COMMON_CREATE_CQ */
+struct mbx_create_common_cq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ oce_cq_ctx_t cq_ctx;
+ struct phys_addr pages[4];
+ } req;
+
+ struct {
+ uint16_t cq_id;
+ uint16_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [54] OPCODE_COMMON_DESTROY_CQ */
+struct mbx_destroy_common_cq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint16_t id;
+#else
+ uint16_t id;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+typedef union oce_mq_ctx_u {
+ uint32_t dw[5];
+ struct {
+ #ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd1:16;
+ uint32_t num_pages:16;
+ /* dw5 */
+ uint32_t cq_id:10;
+ uint32_t dw5rsvd2:2;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd1:16;
+ /* dw6 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw7 */
+ uint32_t dw7rsvd1:21;
+ uint32_t async_cq_id:10;
+ uint32_t async_cq_valid:1;
+ #else
+ /* dw4 */
+ uint32_t num_pages:16;
+ uint32_t dw4rsvd1:16;
+ /* dw5 */
+ uint32_t dw5rsvd1:16;
+ uint32_t ring_size:4;
+ uint32_t dw5rsvd2:2;
+ uint32_t cq_id:10;
+ /* dw6 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw7 */
+ uint32_t async_cq_valid:1;
+ uint32_t async_cq_id:10;
+ uint32_t dw7rsvd1:21;
+ #endif
+ /* dw8 */
+ uint32_t dw8rsvd1;
+ } v0;
+} __packed oce_mq_ctx_t;
+
+/**
+ * @brief [21] OPCODE_COMMON_CREATE_MQ
+ * A MQ must be at least 16 entries deep (corresponding to 1 page) and
+ * at most 128 entries deep (corresponding to 8 pages).
+ */
+struct mbx_create_common_mq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ oce_mq_ctx_t context;
+ struct phys_addr pages[8];
+ } req;
+
+ struct {
+ uint32_t mq_id:16;
+ uint32_t rsvd0:16;
+ } rsp;
+ } params;
+} __packed;
+
+struct mbx_create_common_mq_ex {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ oce_mq_ext_ctx_t context;
+ struct phys_addr pages[8];
+ } req;
+
+ struct {
+ uint32_t mq_id:16;
+ uint32_t rsvd0:16;
+ } rsp;
+ } params;
+} __packed;
+
+/* [53] OPCODE_COMMON_DESTROY_MQ */
+struct mbx_destroy_common_mq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint16_t id;
+#else
+ uint16_t id;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [35] OPCODE_COMMON_GET_ FW_VERSION */
+struct mbx_get_common_fw_version {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ struct {
+ uint8_t fw_ver_str[32];
+ uint8_t fw_on_flash_ver_str[32];
+ } rsp;
+ } params;
+} __packed;
+
+/* [52] OPCODE_COMMON_CEV_MODIFY_MSI_MESSAGES */
+struct mbx_common_cev_modify_msi_messages {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t num_msi_msgs;
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [36] OPCODE_COMMON_SET_FLOW_CONTROL */
+/* [37] OPCODE_COMMON_GET_FLOW_CONTROL */
+struct mbx_common_get_set_flow_control {
+ struct mbx_hdr hdr;
+#ifdef _BIG_ENDIAN
+ uint16_t tx_flow_control;
+ uint16_t rx_flow_control;
+#else
+ uint16_t rx_flow_control;
+ uint16_t tx_flow_control;
+#endif
+} __packed;
+
+enum e_flash_opcode {
+ MGMT_FLASHROM_OPCODE_FLASH = 1,
+ MGMT_FLASHROM_OPCODE_SAVE = 2
+};
+
+/* [06] OPCODE_READ_COMMON_FLASHROM */
+/* [07] OPCODE_WRITE_COMMON_FLASHROM */
+struct mbx_common_read_write_flashrom {
+ struct mbx_hdr hdr;
+ uint32_t flash_op_code;
+ uint32_t flash_op_type;
+ uint32_t data_buffer_size;
+ uint32_t data_offset;
+ uint8_t data_buffer[4]; /* + IMAGE_TRANSFER_SIZE */
+} __packed;
+
+struct oce_phy_info {
+ uint16_t phy_type;
+ uint16_t interface_type;
+ uint32_t misc_params;
+ uint16_t ext_phy_details;
+ uint16_t rsvd;
+ uint16_t auto_speeds_supported;
+ uint16_t fixed_speeds_supported;
+ uint32_t future_use[2];
+} __packed;
+
+struct mbx_common_phy_info {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0[4];
+ } req;
+ struct {
+ struct oce_phy_info phy_info;
+ } rsp;
+ } params;
+} __packed;
+
+/*Lancer firmware*/
+
+struct mbx_lancer_common_write_object {
+ union {
+ struct {
+ struct mbx_hdr hdr;
+ uint32_t write_length: 24;
+ uint32_t rsvd: 7;
+ uint32_t eof: 1;
+ uint32_t write_offset;
+ uint8_t object_name[104];
+ uint32_t descriptor_count;
+ uint32_t buffer_length;
+ uint32_t address_lower;
+ uint32_t address_upper;
+ } req;
+ struct {
+ uint8_t opcode;
+ uint8_t subsystem;
+ uint8_t rsvd1[2];
+ uint8_t status;
+ uint8_t additional_status;
+ uint8_t rsvd2[2];
+ uint32_t response_length;
+ uint32_t actual_response_length;
+ uint32_t actual_write_length;
+ } rsp;
+ } params;
+} __packed;
+
+/**
+ * @brief MBX Common Quiery Firmaware Config
+ * This command retrieves firmware configuration parameters and adapter
+ * resources available to the driver originating the request. The firmware
+ * configuration defines supported protocols by the installed adapter firmware.
+ * This includes which ULP processors support the specified protocols and
+ * the number of TCP connections allowed for that protocol.
+ */
+struct mbx_common_query_fw_config {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0[30];
+ } req;
+
+ struct {
+ uint32_t config_number;
+ uint32_t asic_revision;
+ uint32_t port_id; /* used for stats retrieval */
+ uint32_t function_mode;
+ struct {
+
+ uint32_t ulp_mode;
+ uint32_t nic_wqid_base;
+ uint32_t nic_wq_tot;
+ uint32_t toe_wqid_base;
+ uint32_t toe_wq_tot;
+ uint32_t toe_rqid_base;
+ uint32_t toe_rqid_tot;
+ uint32_t toe_defrqid_base;
+ uint32_t toe_defrqid_count;
+ uint32_t lro_rqid_base;
+ uint32_t lro_rqid_tot;
+ uint32_t iscsi_icd_base;
+ uint32_t iscsi_icd_count;
+ } ulp[2];
+ uint32_t function_caps;
+ uint32_t cqid_base;
+ uint32_t cqid_tot;
+ uint32_t eqid_base;
+ uint32_t eqid_tot;
+ } rsp;
+ } params;
+} __packed;
+
+enum CQFW_CONFIG_NUMBER {
+ FCN_NIC_ISCSI_Initiator = 0x0,
+ FCN_ISCSI_Target = 0x3,
+ FCN_FCoE = 0x7,
+ FCN_ISCSI_Initiator_Target = 0x9,
+ FCN_NIC_RDMA_TOE = 0xA,
+ FCN_NIC_RDMA_FCoE = 0xB,
+ FCN_NIC_RDMA_iSCSI = 0xC,
+ FCN_NIC_iSCSI_FCoE = 0xD
+};
+
+/**
+ * @brief Function Capabilites
+ * This field contains the flags indicating the capabilities of
+ * the SLI Host’s PCI function.
+ */
+enum CQFW_FUNCTION_CAPABILITIES {
+ FNC_UNCLASSIFIED_STATS = 0x1,
+ FNC_RSS = 0x2,
+ FNC_PROMISCUOUS = 0x4,
+ FNC_LEGACY_MODE = 0x8,
+ FNC_HDS = 0x4000,
+ FNC_VMQ = 0x10000,
+ FNC_NETQ = 0x20000,
+ FNC_QGROUPS = 0x40000,
+ FNC_LRO = 0x100000,
+ FNC_VLAN_OFFLOAD = 0x800000
+};
+
+enum CQFW_ULP_MODES_SUPPORTED {
+ ULP_TOE_MODE = 0x1,
+ ULP_NIC_MODE = 0x2,
+ ULP_RDMA_MODE = 0x4,
+ ULP_ISCSI_INI_MODE = 0x10,
+ ULP_ISCSI_TGT_MODE = 0x20,
+ ULP_FCOE_INI_MODE = 0x40,
+ ULP_FCOE_TGT_MODE = 0x80,
+ ULP_DAL_MODE = 0x100,
+ ULP_LRO_MODE = 0x200
+};
+
+/**
+ * @brief Function Modes Supported
+ * Valid function modes (or protocol-types) supported on the SLI-Host’s
+ * PCIe function. This field is a logical OR of the following values:
+ */
+enum CQFW_FUNCTION_MODES_SUPPORTED {
+ FNM_TOE_MODE = 0x1, /* TCP offload supported */
+ FNM_NIC_MODE = 0x2, /* Raw Ethernet supported */
+ FNM_RDMA_MODE = 0x4, /* RDMA protocol supported */
+ FNM_VM_MODE = 0x8, /* Virtual Machines supported */
+ FNM_ISCSI_INI_MODE = 0x10, /* iSCSI initiator supported */
+ FNM_ISCSI_TGT_MODE = 0x20, /* iSCSI target plus initiator */
+ FNM_FCOE_INI_MODE = 0x40, /* FCoE Initiator supported */
+ FNM_FCOE_TGT_MODE = 0x80, /* FCoE target supported */
+ FNM_DAL_MODE = 0x100, /* DAL supported */
+ FNM_LRO_MODE = 0x200, /* LRO supported */
+ FNM_FLEX10_MODE = 0x400, /* QinQ, FLEX-10 or VNIC */
+ FNM_NCSI_MODE = 0x800, /* NCSI supported */
+ FNM_IPV6_MODE = 0x1000, /* IPV6 stack enabled */
+ FNM_BE2_COMPAT_MODE = 0x2000, /* BE2 compatibility (BE3 disable)*/
+ FNM_INVALID_MODE = 0x8000, /* Invalid */
+ FNM_BE3_COMPAT_MODE = 0x10000, /* BE3 features */
+ FNM_VNIC_MODE = 0x20000, /* Set when IBM vNIC mode is set */
+ FNM_VNTAG_MODE = 0x40000, /* Set when VNTAG mode is set */
+ FNM_UMC_MODE = 0x1000000, /* Set when UMC mode is set */
+ FNM_UMC_DEF_EN = 0x100000, /* Set when UMC Default is set */
+ FNM_ONE_GB_EN = 0x200000, /* Set when 1GB Default is set */
+ FNM_VNIC_DEF_VALID = 0x400000, /* Set when VNIC_DEF_EN is valid */
+ FNM_VNIC_DEF_EN = 0x800000 /* Set when VNIC Default enabled */
+};
+
+struct mbx_common_config_vlan {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint8_t num_vlans;
+ uint8_t untagged;
+ uint8_t promisc;
+ uint8_t if_id;
+#else
+ uint8_t if_id;
+ uint8_t promisc;
+ uint8_t untagged;
+ uint8_t num_vlans;
+#endif
+ union {
+ struct normal_vlan normal_vlans[64];
+ struct qinq_vlan qinq_vlans[32];
+ } tags;
+ } req;
+
+ struct {
+ uint32_t rsvd;
+ } rsp;
+ } params;
+} __packed;
+
+typedef struct iface_rx_filter_ctx {
+ uint32_t global_flags_mask;
+ uint32_t global_flags;
+ uint32_t iface_flags_mask;
+ uint32_t iface_flags;
+ uint32_t if_id;
+ #define IFACE_RX_NUM_MCAST_MAX 64
+ uint32_t num_mcast;
+ struct mbx_mcast_addr {
+ uint8_t byte[6];
+ } mac[IFACE_RX_NUM_MCAST_MAX];
+} __packed iface_rx_filter_ctx_t;
+
+/* [34] OPCODE_COMMON_SET_IFACE_RX_FILTER */
+struct mbx_set_common_iface_rx_filter {
+ struct mbx_hdr hdr;
+ union {
+ iface_rx_filter_ctx_t req;
+ iface_rx_filter_ctx_t rsp;
+ } params;
+} __packed;
+
+/* [41] OPCODE_COMMON_MODIFY_EQ_DELAY */
+struct mbx_modify_common_eq_delay {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t num_eq;
+ struct {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t dm;
+ } delay[8];
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [59] OPCODE_ADD_COMMON_IFACE_MAC */
+struct mbx_add_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t if_id;
+ uint8_t mac_address[6];
+ uint8_t rsvd0[2];
+ } req;
+ struct {
+ uint32_t pmac_id;
+ } rsp;
+ } params;
+} __packed;
+
+/* [60] OPCODE_DEL_COMMON_IFACE_MAC */
+struct mbx_del_common_iface_mac {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t if_id;
+ uint32_t pmac_id;
+ } req;
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [8] OPCODE_QUERY_COMMON_MAX_MBX_BUFFER_SIZE */
+struct mbx_query_common_max_mbx_buffer_size {
+ struct mbx_hdr hdr;
+ struct {
+ uint32_t max_ioctl_bufsz;
+ } rsp;
+} __packed;
+
+/* [61] OPCODE_COMMON_FUNCTION_RESET */
+struct ioctl_common_function_reset {
+ struct mbx_hdr hdr;
+} __packed;
+
+/* [80] OPCODE_COMMON_FUNCTION_LINK_CONFIG */
+struct mbx_common_func_link_cfg {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t enable;
+ } req;
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [103] OPCODE_COMMON_SET_FUNCTIONAL_CAPS */
+#define CAP_SW_TIMESTAMPS 2
+#define CAP_BE3_NATIVE_ERX_API 4
+
+struct mbx_common_set_function_cap {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t valid_capability_flags;
+ uint32_t capability_flags;
+ uint8_t sbz[212];
+ } req;
+ struct {
+ uint32_t valid_capability_flags;
+ uint32_t capability_flags;
+ uint8_t sbz[212];
+ } rsp;
+ } params;
+} __packed;
+struct mbx_lowlevel_test_loopback_mode {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t loopback_type;
+ uint32_t num_pkts;
+ uint64_t pattern;
+ uint32_t src_port;
+ uint32_t dest_port;
+ uint32_t pkt_size;
+ }req;
+ struct {
+ uint32_t status;
+ uint32_t num_txfer;
+ uint32_t num_rx;
+ uint32_t miscomp_off;
+ uint32_t ticks_compl;
+ }rsp;
+ } params;
+} __packed;
+
+struct mbx_lowlevel_set_loopback_mode {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint8_t src_port;
+ uint8_t dest_port;
+ uint8_t loopback_type;
+ uint8_t loopback_state;
+ } req;
+ struct {
+ uint8_t rsvd0[4];
+ } rsp;
+ } params;
+} __packed;
+
+struct flash_file_hdr {
+ uint8_t sign[52];
+ uint8_t ufi_version[4];
+ uint32_t file_len;
+ uint32_t cksum;
+ uint32_t antidote;
+ uint32_t num_imgs;
+ uint8_t build[24];
+ uint8_t rsvd[32];
+} __packed;
+
+struct image_hdr {
+ uint32_t imageid;
+ uint32_t imageoffset;
+ uint32_t imagelength;
+ uint32_t image_checksum;
+ uint8_t image_version[32];
+} __packed;
+
+struct flash_section_hdr {
+ uint32_t format_rev;
+ uint32_t cksum;
+ uint32_t antidote;
+ uint32_t num_images;
+ uint8_t id_string[128];
+ uint32_t rsvd[4];
+} __packed;
+
+struct flash_section_entry {
+ uint32_t type;
+ uint32_t offset;
+ uint32_t pad_size;
+ uint32_t image_size;
+ uint32_t cksum;
+ uint32_t entry_point;
+ uint32_t rsvd0;
+ uint32_t rsvd1;
+ uint8_t ver_data[32];
+} __packed;
+
+struct flash_sec_info {
+ uint8_t cookie[32];
+ struct flash_section_hdr fsec_hdr;
+ struct flash_section_entry fsec_entry[32];
+} __packed;
+
+enum LOWLEVEL_SUBSYSTEM_OPCODES {
+/* Opcodes used for lowlevel functions common to many subystems.
+ * Some of these opcodes are used for diagnostic functions only.
+ * These opcodes use the MBX_SUBSYSTEM_LOWLEVEL subsystem code.
+ */
+ OPCODE_LOWLEVEL_TEST_LOOPBACK = 18,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE = 19,
+ OPCODE_LOWLEVEL_GET_LOOPBACK_MODE = 20
+};
+
+enum LLDP_SUBSYSTEM_OPCODES {
+/* Opcodes used for LLDP susbsytem for configuring the LLDP state machines. */
+ OPCODE_LLDP_GET_CFG = 1,
+ OPCODE_LLDP_SET_CFG = 2,
+ OPCODE_LLDP_GET_STATS = 3
+};
+
+enum DCBX_SUBSYSTEM_OPCODES {
+/* Opcodes used for DCBX. */
+ OPCODE_DCBX_GET_CFG = 1,
+ OPCODE_DCBX_SET_CFG = 2,
+ OPCODE_DCBX_GET_MIB_INFO = 3,
+ OPCODE_DCBX_GET_DCBX_MODE = 4,
+ OPCODE_DCBX_SET_MODE = 5
+};
+
+enum DMTF_SUBSYSTEM_OPCODES {
+/* Opcodes used for DCBX subsystem. */
+ OPCODE_DMTF_EXEC_CLP_CMD = 1
+};
+
+enum DIAG_SUBSYSTEM_OPCODES {
+/* Opcodes used for diag functions common to many subsystems. */
+ OPCODE_DIAG_RUN_DMA_TEST = 1,
+ OPCODE_DIAG_RUN_MDIO_TEST = 2,
+ OPCODE_DIAG_RUN_NLB_TEST = 3,
+ OPCODE_DIAG_RUN_ARM_TIMER_TEST = 4,
+ OPCODE_DIAG_GET_MAC = 5
+};
+
+enum VENDOR_SUBSYSTEM_OPCODES {
+/* Opcodes used for Vendor subsystem. */
+ OPCODE_VENDOR_SLI = 1
+};
+
+/* Management Status Codes */
+enum MGMT_STATUS_SUCCESS {
+ MGMT_SUCCESS = 0,
+ MGMT_FAILED = 1,
+ MGMT_ILLEGAL_REQUEST = 2,
+ MGMT_ILLEGAL_FIELD = 3,
+ MGMT_INSUFFICIENT_BUFFER = 4,
+ MGMT_UNAUTHORIZED_REQUEST = 5,
+ MGMT_INVALID_ISNS_ADDRESS = 10,
+ MGMT_INVALID_IPADDR = 11,
+ MGMT_INVALID_GATEWAY = 12,
+ MGMT_INVALID_SUBNETMASK = 13,
+ MGMT_INVALID_TARGET_IPADDR = 16,
+ MGMT_TGTTBL_FULL = 20,
+ MGMT_FLASHROM_SAVE_FAILED = 23,
+ MGMT_IOCTLHANDLE_ALLOC_FAILED = 27,
+ MGMT_INVALID_SESSION = 31,
+ MGMT_INVALID_CONNECTION = 32,
+ MGMT_BTL_PATH_EXCEEDS_OSM_LIMIT = 33,
+ MGMT_BTL_TGTID_EXCEEDS_OSM_LIMIT = 34,
+ MGMT_BTL_PATH_TGTID_OCCUPIED = 35,
+ MGMT_BTL_NO_FREE_SLOT_PATH = 36,
+ MGMT_BTL_NO_FREE_SLOT_TGTID = 37,
+ MGMT_POLL_IOCTL_TIMEOUT = 40,
+ MGMT_ERROR_ACITISCSI = 41,
+ MGMT_BUFFER_SIZE_EXCEED_OSM_OR_OS_LIMIT = 43,
+ MGMT_REBOOT_REQUIRED = 44,
+ MGMT_INSUFFICIENT_TIMEOUT = 45,
+ MGMT_IPADDR_NOT_SET = 46,
+ MGMT_IPADDR_DUP_DETECTED = 47,
+ MGMT_CANT_REMOVE_LAST_CONNECTION = 48,
+ MGMT_TARGET_BUSY = 49,
+ MGMT_TGT_ERR_LISTEN_SOCKET = 50,
+ MGMT_TGT_ERR_BIND_SOCKET = 51,
+ MGMT_TGT_ERR_NO_SOCKET = 52,
+ MGMT_TGT_ERR_ISNS_COMM_FAILED = 55,
+ MGMT_CANNOT_DELETE_BOOT_TARGET = 56,
+ MGMT_TGT_PORTAL_MODE_IN_LISTEN = 57,
+ MGMT_FCF_IN_USE = 58 ,
+ MGMT_NO_CQE = 59,
+ MGMT_TARGET_NOT_FOUND = 65,
+ MGMT_NOT_SUPPORTED = 66,
+ MGMT_NO_FCF_RECORDS = 67,
+ MGMT_FEATURE_NOT_SUPPORTED = 68,
+ MGMT_VPD_FUNCTION_OUT_OF_RANGE = 69,
+ MGMT_VPD_FUNCTION_TYPE_INCORRECT = 70,
+ MGMT_INVALID_NON_EMBEDDED_WRB = 71,
+ MGMT_OOR = 100,
+ MGMT_INVALID_PD = 101,
+ MGMT_STATUS_PD_INUSE = 102,
+ MGMT_INVALID_CQ = 103,
+ MGMT_INVALID_QP = 104,
+ MGMT_INVALID_STAG = 105,
+ MGMT_ORD_EXCEEDS = 106,
+ MGMT_IRD_EXCEEDS = 107,
+ MGMT_SENDQ_WQE_EXCEEDS = 108,
+ MGMT_RECVQ_RQE_EXCEEDS = 109,
+ MGMT_SGE_SEND_EXCEEDS = 110,
+ MGMT_SGE_WRITE_EXCEEDS = 111,
+ MGMT_SGE_RECV_EXCEEDS = 112,
+ MGMT_INVALID_STATE_CHANGE = 113,
+ MGMT_MW_BOUND = 114,
+ MGMT_INVALID_VA = 115,
+ MGMT_INVALID_LENGTH = 116,
+ MGMT_INVALID_FBO = 117,
+ MGMT_INVALID_ACC_RIGHTS = 118,
+ MGMT_INVALID_PBE_SIZE = 119,
+ MGMT_INVALID_PBL_ENTRY = 120,
+ MGMT_INVALID_PBL_OFFSET = 121,
+ MGMT_ADDR_NON_EXIST = 122,
+ MGMT_INVALID_VLANID = 123,
+ MGMT_INVALID_MTU = 124,
+ MGMT_INVALID_BACKLOG = 125,
+ MGMT_CONNECTION_INPROGRESS = 126,
+ MGMT_INVALID_RQE_SIZE = 127,
+ MGMT_INVALID_RQE_ENTRY = 128
+};
+
+/* Additional Management Status Codes */
+enum MGMT_ADDI_STATUS {
+ MGMT_ADDI_NO_STATUS = 0,
+ MGMT_ADDI_INVALID_IPTYPE = 1,
+ MGMT_ADDI_TARGET_HANDLE_NOT_FOUND = 9,
+ MGMT_ADDI_SESSION_HANDLE_NOT_FOUND = 10,
+ MGMT_ADDI_CONNECTION_HANDLE_NOT_FOUND = 11,
+ MGMT_ADDI_ACTIVE_SESSIONS_PRESENT = 16,
+ MGMT_ADDI_SESSION_ALREADY_OPENED = 17,
+ MGMT_ADDI_SESSION_ALREADY_CLOSED = 18,
+ MGMT_ADDI_DEST_HOST_UNREACHABLE = 19,
+ MGMT_ADDI_LOGIN_IN_PROGRESS = 20,
+ MGMT_ADDI_TCP_CONNECT_FAILED = 21,
+ MGMT_ADDI_INSUFFICIENT_RESOURCES = 22,
+ MGMT_ADDI_LINK_DOWN = 23,
+ MGMT_ADDI_DHCP_ERROR = 24,
+ MGMT_ADDI_CONNECTION_OFFLOADED = 25,
+ MGMT_ADDI_CONNECTION_NOT_OFFLOADED = 26,
+ MGMT_ADDI_CONNECTION_UPLOAD_IN_PROGRESS = 27,
+ MGMT_ADDI_REQUEST_REJECTED = 28,
+ MGMT_ADDI_INVALID_SUBSYSTEM = 29,
+ MGMT_ADDI_INVALID_OPCODE = 30,
+ MGMT_ADDI_INVALID_MAXCONNECTION_PARAM = 31,
+ MGMT_ADDI_INVALID_KEY = 32,
+ MGMT_ADDI_INVALID_DOMAIN = 35,
+ MGMT_ADDI_LOGIN_INITIATOR_ERROR = 43,
+ MGMT_ADDI_LOGIN_AUTHENTICATION_ERROR = 44,
+ MGMT_ADDI_LOGIN_AUTHORIZATION_ERROR = 45,
+ MGMT_ADDI_LOGIN_NOT_FOUND = 46,
+ MGMT_ADDI_LOGIN_TARGET_REMOVED = 47,
+ MGMT_ADDI_LOGIN_UNSUPPORTED_VERSION = 48,
+ MGMT_ADDI_LOGIN_TOO_MANY_CONNECTIONS = 49,
+ MGMT_ADDI_LOGIN_MISSING_PARAMETER = 50,
+ MGMT_ADDI_LOGIN_NO_SESSION_SPANNING = 51,
+ MGMT_ADDI_LOGIN_SESSION_TYPE_NOT_SUPPORTED = 52,
+ MGMT_ADDI_LOGIN_SESSION_DOES_NOT_EXIST = 53,
+ MGMT_ADDI_LOGIN_INVALID_DURING_LOGIN = 54,
+ MGMT_ADDI_LOGIN_TARGET_ERROR = 55,
+ MGMT_ADDI_LOGIN_SERVICE_UNAVAILABLE = 56,
+ MGMT_ADDI_LOGIN_OUT_OF_RESOURCES = 57,
+ MGMT_ADDI_SAME_CHAP_SECRET = 58,
+ MGMT_ADDI_INVALID_SECRET_LENGTH = 59,
+ MGMT_ADDI_DUPLICATE_ENTRY = 60,
+ MGMT_ADDI_SETTINGS_MODIFIED_REBOOT_REQD = 63,
+ MGMT_ADDI_INVALID_EXTENDED_TIMEOUT = 64,
+ MGMT_ADDI_INVALID_INTERFACE_HANDLE = 65,
+ MGMT_ADDI_ERR_VLAN_ON_DEF_INTERFACE = 66,
+ MGMT_ADDI_INTERFACE_DOES_NOT_EXIST = 67,
+ MGMT_ADDI_INTERFACE_ALREADY_EXISTS = 68,
+ MGMT_ADDI_INVALID_VLAN_RANGE = 69,
+ MGMT_ADDI_ERR_SET_VLAN = 70,
+ MGMT_ADDI_ERR_DEL_VLAN = 71,
+ MGMT_ADDI_CANNOT_DEL_DEF_INTERFACE = 72,
+ MGMT_ADDI_DHCP_REQ_ALREADY_PENDING = 73,
+ MGMT_ADDI_TOO_MANY_INTERFACES = 74,
+ MGMT_ADDI_INVALID_REQUEST = 75
+};
+
+enum NIC_SUBSYSTEM_OPCODES {
+/**
+ * @brief NIC Subsystem Opcodes (see Network SLI-4 manual >= Rev4, v21-2)
+ * These opcodes are used for configuring the Ethernet interfaces.
+ * These opcodes all use the MBX_SUBSYSTEM_NIC subsystem code.
+ */
+ OPCODE_NIC_CONFIG_RSS = 1,
+ OPCODE_NIC_CONFIG_ACPI = 2,
+ OPCODE_NIC_CONFIG_PROMISCUOUS = 3,
+ OPCODE_NIC_GET_STATS = 4,
+ OPCODE_NIC_CREATE_WQ = 7,
+ OPCODE_NIC_CREATE_RQ = 8,
+ OPCODE_NIC_DELETE_WQ = 9,
+ OPCODE_NIC_DELETE_RQ = 10,
+ OPCODE_NIC_CONFIG_ACPI_WOL_MAGIC = 12,
+ OPCODE_NIC_GET_NETWORK_STATS = 13,
+ OPCODE_NIC_CREATE_HDS_RQ = 16,
+ OPCODE_NIC_DELETE_HDS_RQ = 17,
+ OPCODE_NIC_GET_PPORT_STATS = 18,
+ OPCODE_NIC_GET_VPORT_STATS = 19,
+ OPCODE_NIC_GET_QUEUE_STATS = 20
+};
+
+/* Hash option flags for RSS enable */
+enum RSS_ENABLE_FLAGS {
+ RSS_ENABLE_NONE = 0x0, /* (No RSS) */
+ RSS_ENABLE_IPV4 = 0x1, /* (IPV4 HASH enabled ) */
+ RSS_ENABLE_TCP_IPV4 = 0x2, /* (TCP IPV4 Hash enabled) */
+ RSS_ENABLE_IPV6 = 0x4, /* (IPV6 HASH enabled) */
+ RSS_ENABLE_TCP_IPV6 = 0x8 /* (TCP IPV6 HASH */
+};
+#define RSS_ENABLE (RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV4)
+#define RSS_DISABLE RSS_ENABLE_NONE
+
+/* NIC header WQE */
+struct oce_nic_hdr_wqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw0 */
+ uint32_t rsvd0;
+
+ /* dw1 */
+ uint32_t last_seg_udp_len:14;
+ uint32_t rsvd1:18;
+
+ /* dw2 */
+ uint32_t lso_mss:14;
+ uint32_t num_wqe:5;
+ uint32_t rsvd4:2;
+ uint32_t vlan:1;
+ uint32_t lso:1;
+ uint32_t tcpcs:1;
+ uint32_t udpcs:1;
+ uint32_t ipcs:1;
+ uint32_t rsvd3:1;
+ uint32_t rsvd2:1;
+ uint32_t forward:1;
+ uint32_t crc:1;
+ uint32_t event:1;
+ uint32_t complete:1;
+
+ /* dw3 */
+ uint32_t vlan_tag:16;
+ uint32_t total_length:16;
+#else
+ /* dw0 */
+ uint32_t rsvd0;
+
+ /* dw1 */
+ uint32_t rsvd1:18;
+ uint32_t last_seg_udp_len:14;
+
+ /* dw2 */
+ uint32_t complete:1;
+ uint32_t event:1;
+ uint32_t crc:1;
+ uint32_t forward:1;
+ uint32_t rsvd2:1;
+ uint32_t rsvd3:1;
+ uint32_t ipcs:1;
+ uint32_t udpcs:1;
+ uint32_t tcpcs:1;
+ uint32_t lso:1;
+ uint32_t vlan:1;
+ uint32_t rsvd4:2;
+ uint32_t num_wqe:5;
+ uint32_t lso_mss:14;
+
+ /* dw3 */
+ uint32_t total_length:16;
+ uint32_t vlan_tag:16;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+
+/* NIC fragment WQE */
+struct oce_nic_frag_wqe {
+ union {
+ struct {
+ /* dw0 */
+ uint32_t frag_pa_hi;
+ /* dw1 */
+ uint32_t frag_pa_lo;
+ /* dw2 */
+ uint32_t rsvd0;
+ uint32_t frag_len;
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+
+/* Ethernet Tx Completion Descriptor */
+struct oce_nic_tx_cqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t status:4;
+ uint32_t rsvd0:8;
+ uint32_t port:2;
+ uint32_t ct:2;
+ uint32_t wqe_index:16;
+ /* dw 1 */
+ uint32_t rsvd1:5;
+ uint32_t cast_enc:2;
+ uint32_t lso:1;
+ uint32_t nwh_bytes:8;
+ uint32_t user_bytes:16;
+ /* dw 2 */
+ uint32_t rsvd2;
+ /* dw 3 */
+ uint32_t valid:1;
+ uint32_t rsvd3:4;
+ uint32_t wq_id:11;
+ uint32_t num_pkts:16;
+#else
+ /* dw 0 */
+ uint32_t wqe_index:16;
+ uint32_t ct:2;
+ uint32_t port:2;
+ uint32_t rsvd0:8;
+ uint32_t status:4;
+ /* dw 1 */
+ uint32_t user_bytes:16;
+ uint32_t nwh_bytes:8;
+ uint32_t lso:1;
+ uint32_t cast_enc:2;
+ uint32_t rsvd1:5;
+ /* dw 2 */
+ uint32_t rsvd2;
+ /* dw 3 */
+ uint32_t num_pkts:16;
+ uint32_t wq_id:11;
+ uint32_t rsvd3:4;
+ uint32_t valid:1;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+#define WQ_CQE_VALID(_cqe) (_cqe->u0.dw[3])
+#define WQ_CQE_INVALIDATE(_cqe) (_cqe->u0.dw[3] = 0)
+
+/* Receive Queue Entry (RQE) */
+struct oce_nic_rqe {
+ union {
+ struct {
+ uint32_t frag_pa_hi;
+ uint32_t frag_pa_lo;
+ } s;
+ uint32_t dw[2];
+ } u0;
+} __packed;
+
+/* NIC Receive CQE */
+struct oce_nic_rx_cqe {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t ip_options:1;
+ uint32_t port:1;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag:16;
+ /* dw 1 */
+ uint32_t num_fragments:3;
+ uint32_t switched:1;
+ uint32_t ct:2;
+ uint32_t frag_index:10;
+ uint32_t rsvd0:1;
+ uint32_t vlan_tag_present:1;
+ uint32_t mac_dst:6;
+ uint32_t ip_ver:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t udpframe:1;
+ uint32_t tcpframe:1;
+ uint32_t ipframe:1;
+ uint32_t rss_hp:1;
+ uint32_t error:1;
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t hds_type:2;
+ uint32_t lro_pkt:1;
+ uint32_t rsvd4:1;
+ uint32_t hds_hdr_size:12;
+ uint32_t hds_hdr_frag_index:10;
+ uint32_t rss_bank:1;
+ uint32_t qnq:1;
+ uint32_t pkt_type:2;
+ uint32_t rss_flush:1;
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#else
+ /* dw 0 */
+ uint32_t vlan_tag:16;
+ uint32_t pkt_size:14;
+ uint32_t port:1;
+ uint32_t ip_options:1;
+ /* dw 1 */
+ uint32_t error:1;
+ uint32_t rss_hp:1;
+ uint32_t ipframe:1;
+ uint32_t tcpframe:1;
+ uint32_t udpframe:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_ver:1;
+ uint32_t mac_dst:6;
+ uint32_t vlan_tag_present:1;
+ uint32_t rsvd0:1;
+ uint32_t frag_index:10;
+ uint32_t ct:2;
+ uint32_t switched:1;
+ uint32_t num_fragments:3;
+ /* dw 2 */
+ uint32_t rss_flush:1;
+ uint32_t pkt_type:2;
+ uint32_t qnq:1;
+ uint32_t rss_bank:1;
+ uint32_t hds_hdr_frag_index:10;
+ uint32_t hds_hdr_size:12;
+ uint32_t rsvd4:1;
+ uint32_t lro_pkt:1;
+ uint32_t hds_type:2;
+ uint32_t valid:1;
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+
+/* NIC Receive CQE_v1 */
+struct oce_nic_rx_cqe_v1 {
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t ip_options:1;
+ uint32_t vlan_tag_present:1;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag:16;
+ /* dw 1 */
+ uint32_t num_fragments:3;
+ uint32_t switched:1;
+ uint32_t ct:2;
+ uint32_t frag_index:10;
+ uint32_t rsvd0:1;
+ uint32_t mac_dst:7;
+ uint32_t ip_ver:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t udpframe:1;
+ uint32_t tcpframe:1;
+ uint32_t ipframe:1;
+ uint32_t rss_hp:1;
+ uint32_t error:1;
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t rsvd4:13;
+ uint32_t hds_hdr_size:
+ uint32_t hds_hdr_frag_index:8;
+ uint32_t vlantag:1;
+ uint32_t port:2;
+ uint32_t rss_bank:1;
+ uint32_t qnq:1;
+ uint32_t pkt_type:2;
+ uint32_t rss_flush:1;
+ /* dw 3 */
+ uint32_t rss_hash_value;
+ #else
+ /* dw 0 */
+ uint32_t vlan_tag:16;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag_present:1;
+ uint32_t ip_options:1;
+ /* dw 1 */
+ uint32_t error:1;
+ uint32_t rss_hp:1;
+ uint32_t ipframe:1;
+ uint32_t tcpframe:1;
+ uint32_t udpframe:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_ver:1;
+ uint32_t mac_dst:7;
+ uint32_t rsvd0:1;
+ uint32_t frag_index:10;
+ uint32_t ct:2;
+ uint32_t switched:1;
+ uint32_t num_fragments:3;
+ /* dw 2 */
+ uint32_t rss_flush:1;
+ uint32_t pkt_type:2;
+ uint32_t qnq:1;
+ uint32_t rss_bank:1;
+ uint32_t port:2;
+ uint32_t vlantag:1;
+ uint32_t hds_hdr_frag_index:8;
+ uint32_t hds_hdr_size:2;
+ uint32_t rsvd4:13;
+ uint32_t valid:1;
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#endif
+ } s;
+ uint32_t dw[4];
+ } u0;
+} __packed;
+
+#define RQ_CQE_VALID_MASK 0x80
+#define RQ_CQE_VALID(_cqe) (_cqe->u0.dw[2])
+#define RQ_CQE_INVALIDATE(_cqe) (_cqe->u0.dw[2] = 0)
+
+struct mbx_config_nic_promiscuous {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t rsvd0;
+ uint8_t port1_promisc;
+ uint8_t port0_promisc;
+#else
+ uint8_t port0_promisc;
+ uint8_t port1_promisc;
+ uint16_t rsvd0;
+#endif
+ } req;
+
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+typedef union oce_wq_ctx_u {
+ uint32_t dw[17];
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd2:8;
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd1:8;
+ uint32_t num_pages:8;
+ /* dw5 */
+ uint32_t dw5rsvd2:12;
+ uint32_t wq_size:4;
+ uint32_t dw5rsvd1:16;
+ /* dw6 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw7 */
+ uint32_t dw7rsvd1:16;
+ uint32_t cq_id:16;
+#else
+ /* dw4 */
+ uint32_t num_pages:8;
+#if 0
+ uint32_t dw4rsvd1:8;
+#else
+/* PSP: this workaround is not documented: fill 0x01 for ulp_mask */
+ uint32_t ulp_mask:8;
+#endif
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd2:8;
+ /* dw5 */
+ uint32_t dw5rsvd1:16;
+ uint32_t wq_size:4;
+ uint32_t dw5rsvd2:12;
+ /* dw6 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw7 */
+ uint32_t cq_id:16;
+ uint32_t dw7rsvd1:16;
+#endif
+ /* dw8 - dw20 */
+ uint32_t dw8_20rsvd1[13];
+ } v0;
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint32_t dw4rsvd2:8;
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd1:8;
+ uint32_t num_pages:8;
+ /* dw5 */
+ uint32_t dw5rsvd2:12;
+ uint32_t wq_size:4;
+ uint32_t iface_id:16;
+ /* dw6 */
+ uint32_t valid:1;
+ uint32_t dw6rsvd1:31;
+ /* dw7 */
+ uint32_t dw7rsvd1:16;
+ uint32_t cq_id:16;
+#else
+ /* dw4 */
+ uint32_t num_pages:8;
+ uint32_t dw4rsvd1:8;
+ uint32_t nic_wq_type:8;
+ uint32_t dw4rsvd2:8;
+ /* dw5 */
+ uint32_t iface_id:16;
+ uint32_t wq_size:4;
+ uint32_t dw5rsvd2:12;
+ /* dw6 */
+ uint32_t dw6rsvd1:31;
+ uint32_t valid:1;
+ /* dw7 */
+ uint32_t cq_id:16;
+ uint32_t dw7rsvd1:16;
+#endif
+ /* dw8 - dw20 */
+ uint32_t dw8_20rsvd1[13];
+ } v1;
+} __packed oce_wq_ctx_t;
+
+/**
+ * @brief [07] NIC_CREATE_WQ
+ * @note
+ * Lancer requires an InterfaceID to be specified with every WQ. This
+ * is the basis for NIC IOV where the Interface maps to a vPort and maps
+ * to both Tx and Rx sides.
+ */
+#define OCE_WQ_TYPE_FORWARDING 0x1 /* wq forwards pkts to TOE */
+#define OCE_WQ_TYPE_STANDARD 0x2 /* wq sends network pkts */
+struct mbx_create_nic_wq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint8_t num_pages;
+ uint8_t ulp_num;
+ uint16_t nic_wq_type;
+ uint16_t if_id;
+ uint8_t wq_size;
+ uint8_t rsvd1;
+ uint32_t rsvd2;
+ uint16_t cq_id;
+ uint16_t rsvd3;
+ uint32_t rsvd4[13];
+ struct phys_addr pages[8];
+
+ } req;
+
+ struct {
+ uint16_t wq_id;
+ uint16_t rid;
+ uint32_t db_offset;
+ uint8_t tc_id;
+ uint8_t rsvd0[3];
+ } rsp;
+ } params;
+} __packed;
+
+/* [09] NIC_DELETE_WQ */
+struct mbx_delete_nic_wq {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint16_t rsvd0;
+ uint16_t wq_id;
+#else
+ /* dw4 */
+ uint16_t wq_id;
+ uint16_t rsvd0;
+#endif
+ } req;
+ struct {
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+struct mbx_create_nic_rq {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint16_t cq_id;
+ uint8_t frag_size;
+ uint8_t num_pages;
+ struct phys_addr pages[2];
+ uint32_t if_id;
+ uint16_t max_frame_size;
+ uint16_t page_size;
+ uint32_t is_rss_queue;
+ } req;
+
+ struct {
+ uint16_t rq_id;
+ uint8_t rss_cpuid;
+ uint8_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+/* [10] NIC_DELETE_RQ */
+struct mbx_delete_nic_rq {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ /* dw4 */
+ uint16_t bypass_flush;
+ uint16_t rq_id;
+#else
+ /* dw4 */
+ uint16_t rq_id;
+ uint16_t bypass_flush;
+#endif
+ } req;
+
+ struct {
+ /* dw4 */
+ uint32_t rsvd0;
+ } rsp;
+ } params;
+} __packed;
+
+struct oce_port_rxf_stats_v0 {
+ uint32_t rx_bytes_lsd; /* dword 0*/
+ uint32_t rx_bytes_msd; /* dword 1*/
+ uint32_t rx_total_frames; /* dword 2*/
+ uint32_t rx_unicast_frames; /* dword 3*/
+ uint32_t rx_multicast_frames; /* dword 4*/
+ uint32_t rx_broadcast_frames; /* dword 5*/
+ uint32_t rx_crc_errors; /* dword 6*/
+ uint32_t rx_alignment_symbol_errors; /* dword 7*/
+ uint32_t rx_pause_frames; /* dword 8*/
+ uint32_t rx_control_frames; /* dword 9*/
+ uint32_t rx_in_range_errors; /* dword 10*/
+ uint32_t rx_out_range_errors; /* dword 11*/
+ uint32_t rx_frame_too_long; /* dword 12*/
+ uint32_t rx_address_match_errors; /* dword 13*/
+ uint32_t rx_vlan_mismatch; /* dword 14*/
+ uint32_t rx_dropped_too_small; /* dword 15*/
+ uint32_t rx_dropped_too_short; /* dword 16*/
+ uint32_t rx_dropped_header_too_small; /* dword 17*/
+ uint32_t rx_dropped_tcp_length; /* dword 18*/
+ uint32_t rx_dropped_runt; /* dword 19*/
+ uint32_t rx_64_byte_packets; /* dword 20*/
+ uint32_t rx_65_127_byte_packets; /* dword 21*/
+ uint32_t rx_128_256_byte_packets; /* dword 22*/
+ uint32_t rx_256_511_byte_packets; /* dword 23*/
+ uint32_t rx_512_1023_byte_packets; /* dword 24*/
+ uint32_t rx_1024_1518_byte_packets; /* dword 25*/
+ uint32_t rx_1519_2047_byte_packets; /* dword 26*/
+ uint32_t rx_2048_4095_byte_packets; /* dword 27*/
+ uint32_t rx_4096_8191_byte_packets; /* dword 28*/
+ uint32_t rx_8192_9216_byte_packets; /* dword 29*/
+ uint32_t rx_ip_checksum_errs; /* dword 30*/
+ uint32_t rx_tcp_checksum_errs; /* dword 31*/
+ uint32_t rx_udp_checksum_errs; /* dword 32*/
+ uint32_t rx_non_rss_packets; /* dword 33*/
+ uint32_t rx_ipv4_packets; /* dword 34*/
+ uint32_t rx_ipv6_packets; /* dword 35*/
+ uint32_t rx_ipv4_bytes_lsd; /* dword 36*/
+ uint32_t rx_ipv4_bytes_msd; /* dword 37*/
+ uint32_t rx_ipv6_bytes_lsd; /* dword 38*/
+ uint32_t rx_ipv6_bytes_msd; /* dword 39*/
+ uint32_t rx_chute1_packets; /* dword 40*/
+ uint32_t rx_chute2_packets; /* dword 41*/
+ uint32_t rx_chute3_packets; /* dword 42*/
+ uint32_t rx_management_packets; /* dword 43*/
+ uint32_t rx_switched_unicast_packets; /* dword 44*/
+ uint32_t rx_switched_multicast_packets; /* dword 45*/
+ uint32_t rx_switched_broadcast_packets; /* dword 46*/
+ uint32_t tx_bytes_lsd; /* dword 47*/
+ uint32_t tx_bytes_msd; /* dword 48*/
+ uint32_t tx_unicastframes; /* dword 49*/
+ uint32_t tx_multicastframes; /* dword 50*/
+ uint32_t tx_broadcastframes; /* dword 51*/
+ uint32_t tx_pauseframes; /* dword 52*/
+ uint32_t tx_controlframes; /* dword 53*/
+ uint32_t tx_64_byte_packets; /* dword 54*/
+ uint32_t tx_65_127_byte_packets; /* dword 55*/
+ uint32_t tx_128_256_byte_packets; /* dword 56*/
+ uint32_t tx_256_511_byte_packets; /* dword 57*/
+ uint32_t tx_512_1023_byte_packets; /* dword 58*/
+ uint32_t tx_1024_1518_byte_packets; /* dword 59*/
+ uint32_t tx_1519_2047_byte_packets; /* dword 60*/
+ uint32_t tx_2048_4095_byte_packets; /* dword 61*/
+ uint32_t tx_4096_8191_byte_packets; /* dword 62*/
+ uint32_t tx_8192_9216_byte_packets; /* dword 63*/
+ uint32_t rxpp_fifo_overflow_drop; /* dword 64*/
+ uint32_t rx_input_fifo_overflow_drop; /* dword 65*/
+} __packed;
+
+struct oce_rxf_stats_v0 {
+ struct oce_port_rxf_stats_v0 port[2];
+ uint32_t rx_drops_no_pbuf; /* dword 132*/
+ uint32_t rx_drops_no_txpb; /* dword 133*/
+ uint32_t rx_drops_no_erx_descr; /* dword 134*/
+ uint32_t rx_drops_no_tpre_descr; /* dword 135*/
+ uint32_t management_rx_port_packets; /* dword 136*/
+ uint32_t management_rx_port_bytes; /* dword 137*/
+ uint32_t management_rx_port_pause_frames;/* dword 138*/
+ uint32_t management_rx_port_errors; /* dword 139*/
+ uint32_t management_tx_port_packets; /* dword 140*/
+ uint32_t management_tx_port_bytes; /* dword 141*/
+ uint32_t management_tx_port_pause; /* dword 142*/
+ uint32_t management_rx_port_rxfifo_overflow; /* dword 143*/
+ uint32_t rx_drops_too_many_frags; /* dword 144*/
+ uint32_t rx_drops_invalid_ring; /* dword 145*/
+ uint32_t forwarded_packets; /* dword 146*/
+ uint32_t rx_drops_mtu; /* dword 147*/
+ uint32_t rsvd0[7];
+ uint32_t port0_jabber_events;
+ uint32_t port1_jabber_events;
+ uint32_t rsvd1[6];
+} __packed;
+
+struct oce_port_rxf_stats_v1 {
+ uint32_t rsvd0[12];
+ uint32_t rx_crc_errors;
+ uint32_t rx_alignment_symbol_errors;
+ uint32_t rx_pause_frames;
+ uint32_t rx_priority_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_range_errors;
+ uint32_t rx_frame_too_long;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rsvd1[10];
+ uint32_t rx_ip_checksum_errs;
+ uint32_t rx_tcp_checksum_errs;
+ uint32_t rx_udp_checksum_errs;
+ uint32_t rsvd2[7];
+ uint32_t rx_switched_unicast_packets;
+ uint32_t rx_switched_multicast_packets;
+ uint32_t rx_switched_broadcast_packets;
+ uint32_t rsvd3[3];
+ uint32_t tx_pauseframes;
+ uint32_t tx_priority_pauseframes;
+ uint32_t tx_controlframes;
+ uint32_t rsvd4[10];
+ uint32_t rxpp_fifo_overflow_drop;
+ uint32_t rx_input_fifo_overflow_drop;
+ uint32_t pmem_fifo_overflow_drop;
+ uint32_t jabber_events;
+ uint32_t rsvd5[3];
+} __packed;
+
+struct oce_rxf_stats_v1 {
+ struct oce_port_rxf_stats_v1 port[4];
+ uint32_t rsvd0[2];
+ uint32_t rx_drops_no_pbuf;
+ uint32_t rx_drops_no_txpb;
+ uint32_t rx_drops_no_erx_descr;
+ uint32_t rx_drops_no_tpre_descr;
+ uint32_t rsvd1[6];
+ uint32_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_ring;
+ uint32_t forwarded_packets;
+ uint32_t rx_drops_mtu;
+ uint32_t rsvd2[14];
+} __packed;
+
+struct oce_erx_stats_v1 {
+ uint32_t rx_drops_no_fragments[68];
+ uint32_t rsvd[4];
+} __packed;
+
+
+struct oce_erx_stats_v0 {
+ uint32_t rx_drops_no_fragments[44];
+ uint32_t rsvd[4];
+} __packed;
+
+struct oce_pmem_stats {
+ uint32_t eth_red_drops;
+ uint32_t rsvd[5];
+} __packed;
+
+struct oce_hw_stats_v1 {
+ struct oce_rxf_stats_v1 rxf;
+ uint32_t rsvd0[OCE_TXP_SW_SZ];
+ struct oce_erx_stats_v1 erx;
+ struct oce_pmem_stats pmem;
+ uint32_t rsvd1[18];
+} __packed;
+
+struct oce_hw_stats_v0 {
+ struct oce_rxf_stats_v0 rxf;
+ uint32_t rsvd[48];
+ struct oce_erx_stats_v0 erx;
+ struct oce_pmem_stats pmem;
+} __packed;
+
+struct mbx_get_nic_stats_v0 {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ union {
+ struct oce_hw_stats_v0 stats;
+ } rsp;
+ } params;
+} __packed;
+
+struct mbx_get_nic_stats {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd0;
+ } req;
+
+ struct {
+ struct oce_hw_stats_v1 stats;
+ } rsp;
+ } params;
+} __packed;
+
+/* [18(0x12)] NIC_GET_PPORT_STATS */
+struct pport_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_unicast_pkts;
+ uint64_t tx_multicast_pkts;
+ uint64_t tx_broadcast_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_unicast_bytes;
+ uint64_t tx_multicast_bytes;
+ uint64_t tx_broadcast_bytes;
+ uint64_t tx_discards;
+ uint64_t tx_errors;
+ uint64_t tx_pause_frames;
+ uint64_t tx_pause_on_frames;
+ uint64_t tx_pause_off_frames;
+ uint64_t tx_internal_mac_errors;
+ uint64_t tx_control_frames;
+ uint64_t tx_pkts_64_bytes;
+ uint64_t tx_pkts_65_to_127_bytes;
+ uint64_t tx_pkts_128_to_255_bytes;
+ uint64_t tx_pkts_256_to_511_bytes;
+ uint64_t tx_pkts_512_to_1023_bytes;
+ uint64_t tx_pkts_1024_to_1518_bytes;
+ uint64_t tx_pkts_1519_to_2047_bytes;
+ uint64_t tx_pkts_2048_to_4095_bytes;
+ uint64_t tx_pkts_4096_to_8191_bytes;
+ uint64_t tx_pkts_8192_to_9216_bytes;
+ uint64_t tx_lso_pkts;
+ uint64_t rx_pkts;
+ uint64_t rx_unicast_pkts;
+ uint64_t rx_multicast_pkts;
+ uint64_t rx_broadcast_pkts;
+ uint64_t rx_bytes;
+ uint64_t rx_unicast_bytes;
+ uint64_t rx_multicast_bytes;
+ uint64_t rx_broadcast_bytes;
+ uint32_t rx_unknown_protos;
+ uint32_t reserved_word69;
+ uint64_t rx_discards;
+ uint64_t rx_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_alignment_errors;
+ uint64_t rx_symbol_errors;
+ uint64_t rx_pause_frames;
+ uint64_t rx_pause_on_frames;
+ uint64_t rx_pause_off_frames;
+ uint64_t rx_frames_too_long;
+ uint64_t rx_internal_mac_errors;
+ uint32_t rx_undersize_pkts;
+ uint32_t rx_oversize_pkts;
+ uint32_t rx_fragment_pkts;
+ uint32_t rx_jabbers;
+ uint64_t rx_control_frames;
+ uint64_t rx_control_frames_unknown_opcode;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_of_range_errors;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_vlan_mismatch_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_invalid_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rx_ip_checksum_errors;
+ uint32_t rx_tcp_checksum_errors;
+ uint32_t rx_udp_checksum_errors;
+ uint32_t rx_non_rss_pkts;
+ uint64_t reserved_word111;
+ uint64_t rx_ipv4_pkts;
+ uint64_t rx_ipv6_pkts;
+ uint64_t rx_ipv4_bytes;
+ uint64_t rx_ipv6_bytes;
+ uint64_t rx_nic_pkts;
+ uint64_t rx_tcp_pkts;
+ uint64_t rx_iscsi_pkts;
+ uint64_t rx_management_pkts;
+ uint64_t rx_switched_unicast_pkts;
+ uint64_t rx_switched_multicast_pkts;
+ uint64_t rx_switched_broadcast_pkts;
+ uint64_t num_forwards;
+ uint32_t rx_fifo_overflow;
+ uint32_t rx_input_fifo_overflow;
+ uint64_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_queue;
+ uint32_t reserved_word141;
+ uint64_t rx_drops_mtu;
+ uint64_t rx_pkts_64_bytes;
+ uint64_t rx_pkts_65_to_127_bytes;
+ uint64_t rx_pkts_128_to_255_bytes;
+ uint64_t rx_pkts_256_to_511_bytes;
+ uint64_t rx_pkts_512_to_1023_bytes;
+ uint64_t rx_pkts_1024_to_1518_bytes;
+ uint64_t rx_pkts_1519_to_2047_bytes;
+ uint64_t rx_pkts_2048_to_4095_bytes;
+ uint64_t rx_pkts_4096_to_8191_bytes;
+ uint64_t rx_pkts_8192_to_9216_bytes;
+} __packed;
+
+struct mbx_get_pport_stats {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw4 */
+#ifdef _BIG_ENDIAN
+ uint32_t reset_stats:8;
+ uint32_t rsvd0:8;
+ uint32_t port_number:16;
+#else
+ uint32_t port_number:16;
+ uint32_t rsvd0:8;
+ uint32_t reset_stats:8;
+#endif
+ } req;
+
+ union {
+ struct pport_stats pps;
+ uint32_t pport_stats[164 - 4 + 1];
+ } rsp;
+ } params;
+} __packed;
+
+/* [19(0x13)] NIC_GET_VPORT_STATS */
+struct vport_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_unicast_pkts;
+ uint64_t tx_multicast_pkts;
+ uint64_t tx_broadcast_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_unicast_bytes;
+ uint64_t tx_multicast_bytes;
+ uint64_t tx_broadcast_bytes;
+ uint64_t tx_discards;
+ uint64_t tx_errors;
+ uint64_t tx_pkts_64_bytes;
+ uint64_t tx_pkts_65_to_127_bytes;
+ uint64_t tx_pkts_128_to_255_bytes;
+ uint64_t tx_pkts_256_to_511_bytes;
+ uint64_t tx_pkts_512_to_1023_bytes;
+ uint64_t tx_pkts_1024_to_1518_bytes;
+ uint64_t tx_pkts_1519_to_9699_bytes;
+ uint64_t tx_pkts_over_9699_bytes;
+ uint64_t rx_pkts;
+ uint64_t rx_unicast_pkts;
+ uint64_t rx_multicast_pkts;
+ uint64_t rx_broadcast_pkts;
+ uint64_t rx_bytes;
+ uint64_t rx_unicast_bytes;
+ uint64_t rx_multicast_bytes;
+ uint64_t rx_broadcast_bytes;
+ uint64_t rx_discards;
+ uint64_t rx_errors;
+ uint64_t rx_pkts_64_bytes;
+ uint64_t rx_pkts_65_to_127_bytes;
+ uint64_t rx_pkts_128_to_255_bytes;
+ uint64_t rx_pkts_256_to_511_bytes;
+ uint64_t rx_pkts_512_to_1023_bytes;
+ uint64_t rx_pkts_1024_to_1518_bytes;
+ uint64_t rx_pkts_1519_to_9699_bytes;
+ uint64_t rx_pkts_gt_9699_bytes;
+} __packed;
+struct mbx_get_vport_stats {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw4 */
+#ifdef _BIG_ENDIAN
+ uint32_t reset_stats:8;
+ uint32_t rsvd0:8;
+ uint32_t vport_number:16;
+#else
+ uint32_t vport_number:16;
+ uint32_t rsvd0:8;
+ uint32_t reset_stats:8;
+#endif
+ } req;
+
+ union {
+ struct vport_stats vps;
+ uint32_t vport_stats[75 - 4 + 1];
+ } rsp;
+ } params;
+} __packed;
+
+/**
+ * @brief [20(0x14)] NIC_GET_QUEUE_STATS
+ * The significant difference between vPort and Queue statistics is
+ * the packet byte counters.
+ */
+struct queue_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t drops;
+ uint64_t buffer_errors; /* rsvd when tx */
+} __packed;
+
+#define QUEUE_TYPE_WQ 0
+#define QUEUE_TYPE_RQ 1
+#define QUEUE_TYPE_HDS_RQ 1 /* same as RQ */
+
+struct mbx_get_queue_stats {
+ /* dw0 - dw3 */
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ /* dw4 */
+#ifdef _BIG_ENDIAN
+ uint32_t reset_stats:8;
+ uint32_t queue_type:8;
+ uint32_t queue_id:16;
+#else
+ uint32_t queue_id:16;
+ uint32_t queue_type:8;
+ uint32_t reset_stats:8;
+#endif
+ } req;
+
+ union {
+ struct queue_stats qs;
+ uint32_t queue_stats[13 - 4 + 1];
+ } rsp;
+ } params;
+} __packed;
+
+/* [01] NIC_CONFIG_RSS */
+#define OCE_HASH_TBL_SZ 10
+#define OCE_CPU_TBL_SZ 128
+#define OCE_FLUSH 1 /* RSS flush completion per CQ port */
+struct mbx_config_nic_rss {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t if_id;
+ uint16_t cpu_tbl_sz_log2;
+ uint16_t enable_rss;
+ uint32_t hash[OCE_HASH_TBL_SZ];
+ uint8_t cputable[OCE_CPU_TBL_SZ];
+ uint8_t rsvd[3];
+ uint8_t flush;
+#else
+ uint32_t if_id;
+ uint16_t enable_rss;
+ uint16_t cpu_tbl_sz_log2;
+ uint32_t hash[OCE_HASH_TBL_SZ];
+ uint8_t cputable[OCE_CPU_TBL_SZ];
+ uint8_t flush;
+ uint8_t rsvd[3];
+#endif
+ } req;
+ struct {
+ uint8_t rsvd[3];
+ uint8_t rss_bank;
+ } rsp;
+ } params;
+} __packed;
+
+typedef uint32_t oce_stat_t; /* statistic counter */
+
+enum OCE_RXF_PORT_STATS {
+ RXF_RX_BYTES_LSD,
+ RXF_RX_BYTES_MSD,
+ RXF_RX_TOTAL_FRAMES,
+ RXF_RX_UNICAST_FRAMES,
+ RXF_RX_MULTICAST_FRAMES,
+ RXF_RX_BROADCAST_FRAMES,
+ RXF_RX_CRC_ERRORS,
+ RXF_RX_ALIGNMENT_SYMBOL_ERRORS,
+ RXF_RX_PAUSE_FRAMES,
+ RXF_RX_CONTROL_FRAMES,
+ RXF_RX_IN_RANGE_ERRORS,
+ RXF_RX_OUT_RANGE_ERRORS,
+ RXF_RX_FRAME_TOO_LONG,
+ RXF_RX_ADDRESS_MATCH_ERRORS,
+ RXF_RX_VLAN_MISMATCH,
+ RXF_RX_DROPPED_TOO_SMALL,
+ RXF_RX_DROPPED_TOO_SHORT,
+ RXF_RX_DROPPED_HEADER_TOO_SMALL,
+ RXF_RX_DROPPED_TCP_LENGTH,
+ RXF_RX_DROPPED_RUNT,
+ RXF_RX_64_BYTE_PACKETS,
+ RXF_RX_65_127_BYTE_PACKETS,
+ RXF_RX_128_256_BYTE_PACKETS,
+ RXF_RX_256_511_BYTE_PACKETS,
+ RXF_RX_512_1023_BYTE_PACKETS,
+ RXF_RX_1024_1518_BYTE_PACKETS,
+ RXF_RX_1519_2047_BYTE_PACKETS,
+ RXF_RX_2048_4095_BYTE_PACKETS,
+ RXF_RX_4096_8191_BYTE_PACKETS,
+ RXF_RX_8192_9216_BYTE_PACKETS,
+ RXF_RX_IP_CHECKSUM_ERRS,
+ RXF_RX_TCP_CHECKSUM_ERRS,
+ RXF_RX_UDP_CHECKSUM_ERRS,
+ RXF_RX_NON_RSS_PACKETS,
+ RXF_RX_IPV4_PACKETS,
+ RXF_RX_IPV6_PACKETS,
+ RXF_RX_IPV4_BYTES_LSD,
+ RXF_RX_IPV4_BYTES_MSD,
+ RXF_RX_IPV6_BYTES_LSD,
+ RXF_RX_IPV6_BYTES_MSD,
+ RXF_RX_CHUTE1_PACKETS,
+ RXF_RX_CHUTE2_PACKETS,
+ RXF_RX_CHUTE3_PACKETS,
+ RXF_RX_MANAGEMENT_PACKETS,
+ RXF_RX_SWITCHED_UNICAST_PACKETS,
+ RXF_RX_SWITCHED_MULTICAST_PACKETS,
+ RXF_RX_SWITCHED_BROADCAST_PACKETS,
+ RXF_TX_BYTES_LSD,
+ RXF_TX_BYTES_MSD,
+ RXF_TX_UNICAST_FRAMES,
+ RXF_TX_MULTICAST_FRAMES,
+ RXF_TX_BROADCAST_FRAMES,
+ RXF_TX_PAUSE_FRAMES,
+ RXF_TX_CONTROL_FRAMES,
+ RXF_TX_64_BYTE_PACKETS,
+ RXF_TX_65_127_BYTE_PACKETS,
+ RXF_TX_128_256_BYTE_PACKETS,
+ RXF_TX_256_511_BYTE_PACKETS,
+ RXF_TX_512_1023_BYTE_PACKETS,
+ RXF_TX_1024_1518_BYTE_PACKETS,
+ RXF_TX_1519_2047_BYTE_PACKETS,
+ RXF_TX_2048_4095_BYTE_PACKETS,
+ RXF_TX_4096_8191_BYTE_PACKETS,
+ RXF_TX_8192_9216_BYTE_PACKETS,
+ RXF_RX_FIFO_OVERFLOW,
+ RXF_RX_INPUT_FIFO_OVERFLOW,
+ RXF_PORT_STATS_N_WORDS
+};
+
+enum OCE_RXF_ADDL_STATS {
+ RXF_RX_DROPS_NO_PBUF,
+ RXF_RX_DROPS_NO_TXPB,
+ RXF_RX_DROPS_NO_ERX_DESCR,
+ RXF_RX_DROPS_NO_TPRE_DESCR,
+ RXF_MANAGEMENT_RX_PORT_PACKETS,
+ RXF_MANAGEMENT_RX_PORT_BYTES,
+ RXF_MANAGEMENT_RX_PORT_PAUSE_FRAMES,
+ RXF_MANAGEMENT_RX_PORT_ERRORS,
+ RXF_MANAGEMENT_TX_PORT_PACKETS,
+ RXF_MANAGEMENT_TX_PORT_BYTES,
+ RXF_MANAGEMENT_TX_PORT_PAUSE,
+ RXF_MANAGEMENT_RX_PORT_RXFIFO_OVERFLOW,
+ RXF_RX_DROPS_TOO_MANY_FRAGS,
+ RXF_RX_DROPS_INVALID_RING,
+ RXF_FORWARDED_PACKETS,
+ RXF_RX_DROPS_MTU,
+ RXF_ADDL_STATS_N_WORDS
+};
+
+enum OCE_TX_CHUTE_PORT_STATS {
+ CTPT_XMT_IPV4_PKTS,
+ CTPT_XMT_IPV4_LSD,
+ CTPT_XMT_IPV4_MSD,
+ CTPT_XMT_IPV6_PKTS,
+ CTPT_XMT_IPV6_LSD,
+ CTPT_XMT_IPV6_MSD,
+ CTPT_REXMT_IPV4_PKTs,
+ CTPT_REXMT_IPV4_LSD,
+ CTPT_REXMT_IPV4_MSD,
+ CTPT_REXMT_IPV6_PKTs,
+ CTPT_REXMT_IPV6_LSD,
+ CTPT_REXMT_IPV6_MSD,
+ CTPT_N_WORDS,
+};
+
+enum OCE_RX_ERR_STATS {
+ RX_DROPS_NO_FRAGMENTS_0,
+ RX_DROPS_NO_FRAGMENTS_1,
+ RX_DROPS_NO_FRAGMENTS_2,
+ RX_DROPS_NO_FRAGMENTS_3,
+ RX_DROPS_NO_FRAGMENTS_4,
+ RX_DROPS_NO_FRAGMENTS_5,
+ RX_DROPS_NO_FRAGMENTS_6,
+ RX_DROPS_NO_FRAGMENTS_7,
+ RX_DROPS_NO_FRAGMENTS_8,
+ RX_DROPS_NO_FRAGMENTS_9,
+ RX_DROPS_NO_FRAGMENTS_10,
+ RX_DROPS_NO_FRAGMENTS_11,
+ RX_DROPS_NO_FRAGMENTS_12,
+ RX_DROPS_NO_FRAGMENTS_13,
+ RX_DROPS_NO_FRAGMENTS_14,
+ RX_DROPS_NO_FRAGMENTS_15,
+ RX_DROPS_NO_FRAGMENTS_16,
+ RX_DROPS_NO_FRAGMENTS_17,
+ RX_DROPS_NO_FRAGMENTS_18,
+ RX_DROPS_NO_FRAGMENTS_19,
+ RX_DROPS_NO_FRAGMENTS_20,
+ RX_DROPS_NO_FRAGMENTS_21,
+ RX_DROPS_NO_FRAGMENTS_22,
+ RX_DROPS_NO_FRAGMENTS_23,
+ RX_DROPS_NO_FRAGMENTS_24,
+ RX_DROPS_NO_FRAGMENTS_25,
+ RX_DROPS_NO_FRAGMENTS_26,
+ RX_DROPS_NO_FRAGMENTS_27,
+ RX_DROPS_NO_FRAGMENTS_28,
+ RX_DROPS_NO_FRAGMENTS_29,
+ RX_DROPS_NO_FRAGMENTS_30,
+ RX_DROPS_NO_FRAGMENTS_31,
+ RX_DROPS_NO_FRAGMENTS_32,
+ RX_DROPS_NO_FRAGMENTS_33,
+ RX_DROPS_NO_FRAGMENTS_34,
+ RX_DROPS_NO_FRAGMENTS_35,
+ RX_DROPS_NO_FRAGMENTS_36,
+ RX_DROPS_NO_FRAGMENTS_37,
+ RX_DROPS_NO_FRAGMENTS_38,
+ RX_DROPS_NO_FRAGMENTS_39,
+ RX_DROPS_NO_FRAGMENTS_40,
+ RX_DROPS_NO_FRAGMENTS_41,
+ RX_DROPS_NO_FRAGMENTS_42,
+ RX_DROPS_NO_FRAGMENTS_43,
+ RX_DEBUG_WDMA_SENT_HOLD,
+ RX_DEBUG_WDMA_PBFREE_SENT_HOLD,
+ RX_DEBUG_WDMA_0B_PBFREE_SENT_HOLD,
+ RX_DEBUG_PMEM_PBUF_DEALLOC,
+ RX_ERRORS_N_WORDS
+};
+
+enum OCE_PMEM_ERR_STATS {
+ PMEM_ETH_RED_DROPS,
+ PMEM_LRO_RED_DROPS,
+ PMEM_ULP0_RED_DROPS,
+ PMEM_ULP1_RED_DROPS,
+ PMEM_GLOBAL_RED_DROPS,
+ PMEM_ERRORS_N_WORDS
+};
+
+/**
+ * @brief Statistics for a given Physical Port
+ * These satisfy all the required BE2 statistics and also the
+ * following MIB objects:
+ *
+ * RFC 2863 - The Interfaces Group MIB
+ * RFC 2819 - Remote Network Monitoring Management Information Base (RMON)
+ * RFC 3635 - Managed Objects for the Ethernet-like Interface Types
+ * RFC 4502 - Remote Network Monitoring Mgmt Information Base Ver-2 (RMON2)
+ *
+ */
+enum OCE_PPORT_STATS {
+ PPORT_TX_PKTS = 0,
+ PPORT_TX_UNICAST_PKTS = 2,
+ PPORT_TX_MULTICAST_PKTS = 4,
+ PPORT_TX_BROADCAST_PKTS = 6,
+ PPORT_TX_BYTES = 8,
+ PPORT_TX_UNICAST_BYTES = 10,
+ PPORT_TX_MULTICAST_BYTES = 12,
+ PPORT_TX_BROADCAST_BYTES = 14,
+ PPORT_TX_DISCARDS = 16,
+ PPORT_TX_ERRORS = 18,
+ PPORT_TX_PAUSE_FRAMES = 20,
+ PPORT_TX_PAUSE_ON_FRAMES = 22,
+ PPORT_TX_PAUSE_OFF_FRAMES = 24,
+ PPORT_TX_INTERNAL_MAC_ERRORS = 26,
+ PPORT_TX_CONTROL_FRAMES = 28,
+ PPORT_TX_PKTS_64_BYTES = 30,
+ PPORT_TX_PKTS_65_TO_127_BYTES = 32,
+ PPORT_TX_PKTS_128_TO_255_BYTES = 34,
+ PPORT_TX_PKTS_256_TO_511_BYTES = 36,
+ PPORT_TX_PKTS_512_TO_1023_BYTES = 38,
+ PPORT_TX_PKTS_1024_TO_1518_BYTES = 40,
+ PPORT_TX_PKTS_1519_TO_2047_BYTES = 42,
+ PPORT_TX_PKTS_2048_TO_4095_BYTES = 44,
+ PPORT_TX_PKTS_4096_TO_8191_BYTES = 46,
+ PPORT_TX_PKTS_8192_TO_9216_BYTES = 48,
+ PPORT_TX_LSO_PKTS = 50,
+ PPORT_RX_PKTS = 52,
+ PPORT_RX_UNICAST_PKTS = 54,
+ PPORT_RX_MULTICAST_PKTS = 56,
+ PPORT_RX_BROADCAST_PKTS = 58,
+ PPORT_RX_BYTES = 60,
+ PPORT_RX_UNICAST_BYTES = 62,
+ PPORT_RX_MULTICAST_BYTES = 64,
+ PPORT_RX_BROADCAST_BYTES = 66,
+ PPORT_RX_UNKNOWN_PROTOS = 68,
+ PPORT_RESERVED_WORD69 = 69,
+ PPORT_RX_DISCARDS = 70,
+ PPORT_RX_ERRORS = 72,
+ PPORT_RX_CRC_ERRORS = 74,
+ PPORT_RX_ALIGNMENT_ERRORS = 76,
+ PPORT_RX_SYMBOL_ERRORS = 78,
+ PPORT_RX_PAUSE_FRAMES = 80,
+ PPORT_RX_PAUSE_ON_FRAMES = 82,
+ PPORT_RX_PAUSE_OFF_FRAMES = 84,
+ PPORT_RX_FRAMES_TOO_LONG = 86,
+ PPORT_RX_INTERNAL_MAC_ERRORS = 88,
+ PPORT_RX_UNDERSIZE_PKTS = 90,
+ PPORT_RX_OVERSIZE_PKTS = 91,
+ PPORT_RX_FRAGMENT_PKTS = 92,
+ PPORT_RX_JABBERS = 93,
+ PPORT_RX_CONTROL_FRAMES = 94,
+ PPORT_RX_CONTROL_FRAMES_UNK_OPCODE = 96,
+ PPORT_RX_IN_RANGE_ERRORS = 98,
+ PPORT_RX_OUT_OF_RANGE_ERRORS = 99,
+ PPORT_RX_ADDRESS_MATCH_ERRORS = 100,
+ PPORT_RX_VLAN_MISMATCH_ERRORS = 101,
+ PPORT_RX_DROPPED_TOO_SMALL = 102,
+ PPORT_RX_DROPPED_TOO_SHORT = 103,
+ PPORT_RX_DROPPED_HEADER_TOO_SMALL = 104,
+ PPORT_RX_DROPPED_INVALID_TCP_LENGTH = 105,
+ PPORT_RX_DROPPED_RUNT = 106,
+ PPORT_RX_IP_CHECKSUM_ERRORS = 107,
+ PPORT_RX_TCP_CHECKSUM_ERRORS = 108,
+ PPORT_RX_UDP_CHECKSUM_ERRORS = 109,
+ PPORT_RX_NON_RSS_PKTS = 110,
+ PPORT_RESERVED_WORD111 = 111,
+ PPORT_RX_IPV4_PKTS = 112,
+ PPORT_RX_IPV6_PKTS = 114,
+ PPORT_RX_IPV4_BYTES = 116,
+ PPORT_RX_IPV6_BYTES = 118,
+ PPORT_RX_NIC_PKTS = 120,
+ PPORT_RX_TCP_PKTS = 122,
+ PPORT_RX_ISCSI_PKTS = 124,
+ PPORT_RX_MANAGEMENT_PKTS = 126,
+ PPORT_RX_SWITCHED_UNICAST_PKTS = 128,
+ PPORT_RX_SWITCHED_MULTICAST_PKTS = 130,
+ PPORT_RX_SWITCHED_BROADCAST_PKTS = 132,
+ PPORT_NUM_FORWARDS = 134,
+ PPORT_RX_FIFO_OVERFLOW = 136,
+ PPORT_RX_INPUT_FIFO_OVERFLOW = 137,
+ PPORT_RX_DROPS_TOO_MANY_FRAGS = 138,
+ PPORT_RX_DROPS_INVALID_QUEUE = 140,
+ PPORT_RESERVED_WORD141 = 141,
+ PPORT_RX_DROPS_MTU = 142,
+ PPORT_RX_PKTS_64_BYTES = 144,
+ PPORT_RX_PKTS_65_TO_127_BYTES = 146,
+ PPORT_RX_PKTS_128_TO_255_BYTES = 148,
+ PPORT_RX_PKTS_256_TO_511_BYTES = 150,
+ PPORT_RX_PKTS_512_TO_1023_BYTES = 152,
+ PPORT_RX_PKTS_1024_TO_1518_BYTES = 154,
+ PPORT_RX_PKTS_1519_TO_2047_BYTES = 156,
+ PPORT_RX_PKTS_2048_TO_4095_BYTES = 158,
+ PPORT_RX_PKTS_4096_TO_8191_BYTES = 160,
+ PPORT_RX_PKTS_8192_TO_9216_BYTES = 162,
+ PPORT_N_WORDS = 164
+};
+
+/**
+ * @brief Statistics for a given Virtual Port (vPort)
+ * The following describes the vPort statistics satisfying
+ * requirements of Linux/VMWare netdev statistics and
+ * Microsoft Windows Statistics along with other Operating Systems.
+ */
+enum OCE_VPORT_STATS {
+ VPORT_TX_PKTS = 0,
+ VPORT_TX_UNICAST_PKTS = 2,
+ VPORT_TX_MULTICAST_PKTS = 4,
+ VPORT_TX_BROADCAST_PKTS = 6,
+ VPORT_TX_BYTES = 8,
+ VPORT_TX_UNICAST_BYTES = 10,
+ VPORT_TX_MULTICAST_BYTES = 12,
+ VPORT_TX_BROADCAST_BYTES = 14,
+ VPORT_TX_DISCARDS = 16,
+ VPORT_TX_ERRORS = 18,
+ VPORT_TX_PKTS_64_BYTES = 20,
+ VPORT_TX_PKTS_65_TO_127_BYTES = 22,
+ VPORT_TX_PKTS_128_TO_255_BYTES = 24,
+ VPORT_TX_PKTS_256_TO_511_BYTES = 26,
+ VPORT_TX_PKTS_512_TO_1023_BYTEs = 28,
+ VPORT_TX_PKTS_1024_TO_1518_BYTEs = 30,
+ VPORT_TX_PKTS_1519_TO_9699_BYTEs = 32,
+ VPORT_TX_PKTS_OVER_9699_BYTES = 34,
+ VPORT_RX_PKTS = 36,
+ VPORT_RX_UNICAST_PKTS = 38,
+ VPORT_RX_MULTICAST_PKTS = 40,
+ VPORT_RX_BROADCAST_PKTS = 42,
+ VPORT_RX_BYTES = 44,
+ VPORT_RX_UNICAST_BYTES = 46,
+ VPORT_RX_MULTICAST_BYTES = 48,
+ VPORT_RX_BROADCAST_BYTES = 50,
+ VPORT_RX_DISCARDS = 52,
+ VPORT_RX_ERRORS = 54,
+ VPORT_RX_PKTS_64_BYTES = 56,
+ VPORT_RX_PKTS_65_TO_127_BYTES = 58,
+ VPORT_RX_PKTS_128_TO_255_BYTES = 60,
+ VPORT_RX_PKTS_256_TO_511_BYTES = 62,
+ VPORT_RX_PKTS_512_TO_1023_BYTEs = 64,
+ VPORT_RX_PKTS_1024_TO_1518_BYTEs = 66,
+ VPORT_RX_PKTS_1519_TO_9699_BYTEs = 68,
+ VPORT_RX_PKTS_OVER_9699_BYTES = 70,
+ VPORT_N_WORDS = 72
+};
+
+/**
+ * @brief Statistics for a given queue (NIC WQ, RQ, or HDS RQ)
+ * This set satisfies requirements of VMQare NetQueue and Microsoft VMQ
+ */
+enum OCE_QUEUE_TX_STATS {
+ QUEUE_TX_PKTS = 0,
+ QUEUE_TX_BYTES = 2,
+ QUEUE_TX_ERRORS = 4,
+ QUEUE_TX_DROPS = 6,
+ QUEUE_TX_N_WORDS = 8
+};
+
+enum OCE_QUEUE_RX_STATS {
+ QUEUE_RX_PKTS = 0,
+ QUEUE_RX_BYTES = 2,
+ QUEUE_RX_ERRORS = 4,
+ QUEUE_RX_DROPS = 6,
+ QUEUE_RX_BUFFER_ERRORS = 8,
+ QUEUE_RX_N_WORDS = 10
+};
diff --git a/sys/dev/pci/ocevar.h b/sys/dev/pci/ocevar.h
new file mode 100644
index 00000000000..378c291ae7e
--- /dev/null
+++ b/sys/dev/pci/ocevar.h
@@ -0,0 +1,929 @@
+/* $OpenBSD: ocevar.h,v 1.1 2012/08/02 17:35:52 mikeb Exp $ */
+
+/*-
+ * Copyright (C) 2012 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/* OCE device driver module component revision informaiton */
+#define COMPONENT_REVISION "4.2.127.0"
+
+
+/* OCE devices supported by this driver */
+#define PCI_VENDOR_EMULEX 0x10df /* Emulex */
+#define PCI_VENDOR_SERVERENGINES 0x19a2 /* ServerEngines (BE) */
+#define PCI_PRODUCT_BE2 0x0700 /* BE2 network adapter */
+#define PCI_PRODUCT_BE3 0x0710 /* BE3 network adapter */
+#define PCI_PRODUCT_XE201 0xe220 /* XE201 network adapter */
+#define PCI_PRODUCT_XE201_VF 0xe228 /* XE201 with VF in Lancer */
+
+#define IS_BE(sc) (((sc->flags & OCE_FLAGS_BE3) | \
+ (sc->flags & OCE_FLAGS_BE2))? 1:0)
+#define IS_XE201(sc) ((sc->flags & OCE_FLAGS_XE201) ? 1:0)
+#define HAS_A0_CHIP(sc) ((sc->flags & OCE_FLAGS_HAS_A0_CHIP) ? 1:0)
+
+
+/* proportion Service Level Interface queues */
+#define OCE_MAX_UNITS 2
+#define OCE_MAX_PPORT OCE_MAX_UNITS
+#define OCE_MAX_VPORT OCE_MAX_UNITS
+
+/* This should be powers of 2. Like 2,4,8 & 16 */
+#define OCE_MAX_RSS 4 /* TODO: 8*/
+#define OCE_LEGACY_MODE_RSS 4 /* For BE3 Legacy mode*/
+
+#define OCE_MIN_RQ 1
+#define OCE_MIN_WQ 1
+
+#define OCE_MAX_RQ OCE_MAX_RSS + 1 /* one default queue */
+#define OCE_MAX_WQ 8
+
+#define OCE_MAX_EQ 32
+#define OCE_MAX_CQ OCE_MAX_RQ + OCE_MAX_WQ + 1 /* one MCC queue */
+#define OCE_MAX_CQ_EQ 8 /* Max CQ that can attached to an EQ */
+
+#define OCE_DEFAULT_WQ_EQD 64
+#define OCE_MAX_PACKET_Q 16
+#define OCE_RQ_BUF_SIZE 2048
+#define OCE_LSO_MAX_SIZE (64 * 1024)
+#define LONG_TIMEOUT 30
+#define OCE_MAX_JUMBO_FRAME_SIZE 16360
+#define OCE_MAX_MTU (OCE_MAX_JUMBO_FRAME_SIZE - \
+ ETHER_VLAN_ENCAP_LEN - \
+ ETHER_HDR_LEN)
+
+#define OCE_MAX_TX_ELEMENTS 29
+#define OCE_MAX_TX_DESC 1024
+#define OCE_MAX_TX_SIZE 65535
+#define OCE_MAX_RX_SIZE 4096
+#define OCE_MAX_RQ_POSTS 255
+#define OCE_DEFAULT_PROMISCUOUS 0
+
+#define RSS_ENABLE_IPV4 0x1
+#define RSS_ENABLE_TCP_IPV4 0x2
+#define RSS_ENABLE_IPV6 0x4
+#define RSS_ENABLE_TCP_IPV6 0x8
+
+/* flow control definitions */
+#define OCE_FC_NONE 0x00000000
+#define OCE_FC_TX 0x00000001
+#define OCE_FC_RX 0x00000002
+#define OCE_DEFAULT_FLOW_CONTROL (OCE_FC_TX | OCE_FC_RX)
+
+/* Interface capabilities to give device when creating interface */
+#define OCE_CAPAB_FLAGS (MBX_RX_IFACE_FLAGS_BROADCAST | \
+ MBX_RX_IFACE_FLAGS_UNTAGGED | \
+ MBX_RX_IFACE_FLAGS_PROMISCUOUS | \
+ MBX_RX_IFACE_FLAGS_MCAST_PROMISCUOUS | \
+ MBX_RX_IFACE_FLAGS_RSS)
+ /* MBX_RX_IFACE_FLAGS_RSS | \ */
+ /* MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR) */
+
+/* Interface capabilities to enable by default (others set dynamically) */
+#define OCE_CAPAB_ENABLE (MBX_RX_IFACE_FLAGS_BROADCAST | \
+ MBX_RX_IFACE_FLAGS_UNTAGGED | \
+ MBX_RX_IFACE_FLAGS_RSS)
+ /* MBX_RX_IFACE_FLAGS_RSS | \ */
+ /* MBX_RX_IFACE_FLAGS_PASS_L3L4_ERR) */
+
+#define ETH_ADDR_LEN 6
+#define MAX_VLANFILTER_SIZE 64
+#define MAX_VLANS 4096
+
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#define BSWAP_8(x) ((x) & 0xff)
+#define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
+#define BSWAP_32(x) ((BSWAP_16(x) << 16) | \
+ BSWAP_16((x) >> 16))
+#define BSWAP_64(x) ((BSWAP_32(x) << 32) | \
+ BSWAP_32((x) >> 32))
+
+#define for_all_wq_queues(sc, wq, i) \
+ for (i = 0, wq = sc->wq[0]; i < sc->nwqs; i++, wq = sc->wq[i])
+#define for_all_rq_queues(sc, rq, i) \
+ for (i = 0, rq = sc->rq[0]; i < sc->nrqs; i++, rq = sc->rq[i])
+#define for_all_eq_queues(sc, eq, i) \
+ for (i = 0, eq = sc->eq[0]; i < sc->neqs; i++, eq = sc->eq[i])
+#define for_all_cq_queues(sc, cq, i) \
+ for (i = 0, cq = sc->cq[0]; i < sc->ncqs; i++, cq = sc->cq[i])
+
+/* Flash specific */
+#define IOCTL_COOKIE "SERVERENGINES CORP"
+#define MAX_FLASH_COMP 32
+
+#define IMG_ISCSI 160
+#define IMG_REDBOOT 224
+#define IMG_BIOS 34
+#define IMG_PXEBIOS 32
+#define IMG_FCOEBIOS 33
+#define IMG_ISCSI_BAK 176
+#define IMG_FCOE 162
+#define IMG_FCOE_BAK 178
+#define IMG_NCSI 16
+#define IMG_PHY 192
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+#define FLASHROM_OPER_FLASH_PHY 9
+#define FLASHROM_OPER_SAVE_PHY 10
+#define TN_8022 13
+
+enum {
+ PHY_TYPE_CX4_10GB = 0,
+ PHY_TYPE_XFP_10GB,
+ PHY_TYPE_SFP_1GB,
+ PHY_TYPE_SFP_PLUS_10GB,
+ PHY_TYPE_KR_10GB,
+ PHY_TYPE_KX4_10GB,
+ PHY_TYPE_BASET_10GB,
+ PHY_TYPE_BASET_1GB,
+ PHY_TYPE_BASEX_1GB,
+ PHY_TYPE_SGMII,
+ PHY_TYPE_DISABLED = 255
+};
+
+/* Ring related */
+#define GET_Q_NEXT(_START, _STEP, _END) \
+ ((((_START) + (_STEP)) < (_END)) ? ((_START) + (_STEP)) \
+ : (((_START) + (_STEP)) - (_END)))
+
+#define RING_NUM_FREE(_r) \
+ (uint32_t)((_r)->num_items - (_r)->num_used)
+#define RING_GET(_r, _n) \
+ (_r)->cidx = GET_Q_NEXT((_r)->cidx, _n, (_r)->num_items)
+#define RING_PUT(_r, _n) \
+ (_r)->pidx = GET_Q_NEXT((_r)->pidx, _n, (_r)->num_items)
+
+#define OCE_DMAPTR(_o, _t) ((_t *)(_o)->vaddr)
+
+#define RING_GET_CONSUMER_ITEM_VA(_r, _t) \
+ (OCE_DMAPTR(&(_r)->dma, _t) + (_r)->cidx)
+#define RING_GET_PRODUCER_ITEM_VA(_r, _t) \
+ (OCE_DMAPTR(&(_r)->dma, _t) + (_r)->pidx)
+
+
+struct oce_packet_desc {
+ struct mbuf *mbuf;
+ bus_dmamap_t map;
+ int nsegs;
+ uint32_t wqe_idx;
+};
+
+struct oce_dma_mem {
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t segs;
+ int nsegs;
+ bus_size_t size;
+ caddr_t vaddr;
+ bus_addr_t paddr;
+};
+
+struct oce_ring {
+ uint16_t cidx; /* Get ptr */
+ uint16_t pidx; /* Put Ptr */
+ size_t item_size;
+ size_t num_items;
+ uint32_t num_used;
+ struct oce_dma_mem dma;
+};
+
+/* Stats */
+#define OCE_UNICAST_PACKET 0
+#define OCE_MULTICAST_PACKET 1
+#define OCE_BROADCAST_PACKET 2
+#define OCE_RSVD_PACKET 3
+
+struct oce_rx_stats {
+ /* Total Receive Stats */
+ uint64_t t_rx_pkts;
+ uint64_t t_rx_bytes;
+ uint32_t t_rx_frags;
+ uint32_t t_rx_mcast_pkts;
+ uint32_t t_rx_ucast_pkts;
+ uint32_t t_rxcp_errs;
+};
+
+struct oce_tx_stats {
+ /*Total Transmit Stats */
+ uint64_t t_tx_pkts;
+ uint64_t t_tx_bytes;
+ uint32_t t_tx_reqs;
+ uint32_t t_tx_stops;
+ uint32_t t_tx_wrbs;
+ uint32_t t_tx_compl;
+ uint32_t t_ipv6_ext_hdr_tx_drop;
+};
+
+struct oce_be_stats {
+ uint8_t be_on_die_temperature;
+ uint32_t be_tx_events;
+ uint32_t eth_red_drops;
+ uint32_t rx_drops_no_pbuf;
+ uint32_t rx_drops_no_txpb;
+ uint32_t rx_drops_no_erx_descr;
+ uint32_t rx_drops_no_tpre_descr;
+ uint32_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_ring;
+ uint32_t forwarded_packets;
+ uint32_t rx_drops_mtu;
+ uint32_t rx_crc_errors;
+ uint32_t rx_alignment_symbol_errors;
+ uint32_t rx_pause_frames;
+ uint32_t rx_priority_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_range_errors;
+ uint32_t rx_frame_too_long;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rx_ip_checksum_errs;
+ uint32_t rx_tcp_checksum_errs;
+ uint32_t rx_udp_checksum_errs;
+ uint32_t rx_switched_unicast_packets;
+ uint32_t rx_switched_multicast_packets;
+ uint32_t rx_switched_broadcast_packets;
+ uint32_t tx_pauseframes;
+ uint32_t tx_priority_pauseframes;
+ uint32_t tx_controlframes;
+ uint32_t rxpp_fifo_overflow_drop;
+ uint32_t rx_input_fifo_overflow_drop;
+ uint32_t pmem_fifo_overflow_drop;
+ uint32_t jabber_events;
+};
+
+struct oce_xe201_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_unicast_pkts;
+ uint64_t tx_multicast_pkts;
+ uint64_t tx_broadcast_pkts;
+ uint64_t tx_bytes;
+ uint64_t tx_unicast_bytes;
+ uint64_t tx_multicast_bytes;
+ uint64_t tx_broadcast_bytes;
+ uint64_t tx_discards;
+ uint64_t tx_errors;
+ uint64_t tx_pause_frames;
+ uint64_t tx_pause_on_frames;
+ uint64_t tx_pause_off_frames;
+ uint64_t tx_internal_mac_errors;
+ uint64_t tx_control_frames;
+ uint64_t tx_pkts_64_bytes;
+ uint64_t tx_pkts_65_to_127_bytes;
+ uint64_t tx_pkts_128_to_255_bytes;
+ uint64_t tx_pkts_256_to_511_bytes;
+ uint64_t tx_pkts_512_to_1023_bytes;
+ uint64_t tx_pkts_1024_to_1518_bytes;
+ uint64_t tx_pkts_1519_to_2047_bytes;
+ uint64_t tx_pkts_2048_to_4095_bytes;
+ uint64_t tx_pkts_4096_to_8191_bytes;
+ uint64_t tx_pkts_8192_to_9216_bytes;
+ uint64_t tx_lso_pkts;
+ uint64_t rx_pkts;
+ uint64_t rx_unicast_pkts;
+ uint64_t rx_multicast_pkts;
+ uint64_t rx_broadcast_pkts;
+ uint64_t rx_bytes;
+ uint64_t rx_unicast_bytes;
+ uint64_t rx_multicast_bytes;
+ uint64_t rx_broadcast_bytes;
+ uint32_t rx_unknown_protos;
+ uint64_t rx_discards;
+ uint64_t rx_errors;
+ uint64_t rx_crc_errors;
+ uint64_t rx_alignment_errors;
+ uint64_t rx_symbol_errors;
+ uint64_t rx_pause_frames;
+ uint64_t rx_pause_on_frames;
+ uint64_t rx_pause_off_frames;
+ uint64_t rx_frames_too_long;
+ uint64_t rx_internal_mac_errors;
+ uint32_t rx_undersize_pkts;
+ uint32_t rx_oversize_pkts;
+ uint32_t rx_fragment_pkts;
+ uint32_t rx_jabbers;
+ uint64_t rx_control_frames;
+ uint64_t rx_control_frames_unknown_opcode;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_of_range_errors;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_vlan_mismatch_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_invalid_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rx_ip_checksum_errors;
+ uint32_t rx_tcp_checksum_errors;
+ uint32_t rx_udp_checksum_errors;
+ uint32_t rx_non_rss_pkts;
+ uint64_t rx_ipv4_pkts;
+ uint64_t rx_ipv6_pkts;
+ uint64_t rx_ipv4_bytes;
+ uint64_t rx_ipv6_bytes;
+ uint64_t rx_nic_pkts;
+ uint64_t rx_tcp_pkts;
+ uint64_t rx_iscsi_pkts;
+ uint64_t rx_management_pkts;
+ uint64_t rx_switched_unicast_pkts;
+ uint64_t rx_switched_multicast_pkts;
+ uint64_t rx_switched_broadcast_pkts;
+ uint64_t num_forwards;
+ uint32_t rx_fifo_overflow;
+ uint32_t rx_input_fifo_overflow;
+ uint64_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_queue;
+ uint64_t rx_drops_mtu;
+ uint64_t rx_pkts_64_bytes;
+ uint64_t rx_pkts_65_to_127_bytes;
+ uint64_t rx_pkts_128_to_255_bytes;
+ uint64_t rx_pkts_256_to_511_bytes;
+ uint64_t rx_pkts_512_to_1023_bytes;
+ uint64_t rx_pkts_1024_to_1518_bytes;
+ uint64_t rx_pkts_1519_to_2047_bytes;
+ uint64_t rx_pkts_2048_to_4095_bytes;
+ uint64_t rx_pkts_4096_to_8191_bytes;
+ uint64_t rx_pkts_8192_to_9216_bytes;
+};
+
+struct oce_drv_stats {
+ struct oce_rx_stats rx;
+ struct oce_tx_stats tx;
+ union {
+ struct oce_be_stats be;
+ struct oce_xe201_stats xe201;
+ } u0;
+};
+
+typedef int boolean_t;
+#define TRUE 1
+#define FALSE 0
+
+#define DEFAULT_MQ_MBOX_TIMEOUT (5 * 1000 * 1000)
+#define MBX_READY_TIMEOUT (1 * 1000 * 1000)
+#define DEFAULT_DRAIN_TIME 200
+#define MBX_TIMEOUT_SEC 5
+#define STAT_TIMEOUT 2000000
+
+/* size of the packet descriptor array in a transmit queue */
+#define OCE_TX_RING_SIZE 512
+#define OCE_RX_RING_SIZE 1024
+#define OCE_WQ_PACKET_ARRAY_SIZE (OCE_TX_RING_SIZE/2)
+#define OCE_RQ_PACKET_ARRAY_SIZE (OCE_RX_RING_SIZE)
+
+struct oce_dev;
+
+enum eq_len {
+ EQ_LEN_256 = 256,
+ EQ_LEN_512 = 512,
+ EQ_LEN_1024 = 1024,
+ EQ_LEN_2048 = 2048,
+ EQ_LEN_4096 = 4096
+};
+
+enum eqe_size {
+ EQE_SIZE_4 = 4,
+ EQE_SIZE_16 = 16
+};
+
+enum qtype {
+ QTYPE_EQ,
+ QTYPE_MQ,
+ QTYPE_WQ,
+ QTYPE_RQ,
+ QTYPE_CQ,
+ QTYPE_RSS
+};
+
+typedef enum qstate_e {
+ QDELETED = 0x0,
+ QCREATED = 0x1
+} qstate_t;
+
+struct eq_config {
+ enum eq_len q_len;
+ enum eqe_size item_size;
+ uint32_t q_vector_num;
+ uint8_t min_eqd;
+ uint8_t max_eqd;
+ uint8_t cur_eqd;
+};
+
+struct oce_eq {
+ uint32_t eq_id;
+ void *parent;
+ void *cb_context;
+ struct oce_ring *ring;
+ uint32_t ref_count;
+ qstate_t qstate;
+ struct oce_cq *cq[OCE_MAX_CQ_EQ];
+ int cq_valid;
+ struct eq_config eq_cfg;
+ int vector;
+};
+
+enum cq_len {
+ CQ_LEN_256 = 256,
+ CQ_LEN_512 = 512,
+ CQ_LEN_1024 = 1024
+};
+
+struct cq_config {
+ enum cq_len q_len;
+ uint32_t item_size;
+ boolean_t is_eventable;
+ boolean_t sol_eventable;
+ boolean_t nodelay;
+ uint16_t dma_coalescing;
+};
+
+struct oce_cq {
+ uint32_t cq_id;
+ void *parent;
+ struct oce_eq *eq;
+ void (*cq_handler)(void *);
+ void *cb_arg;
+ struct oce_ring *ring;
+ qstate_t qstate;
+ struct cq_config cq_cfg;
+ uint32_t ref_count;
+};
+
+struct mq_config {
+ uint32_t eqd;
+ uint8_t q_len;
+};
+
+struct oce_mq {
+ void *parent;
+ struct oce_ring *ring;
+ uint32_t mq_id;
+ struct oce_cq *cq;
+ struct oce_cq *async_cq;
+ uint32_t mq_free;
+ qstate_t qstate;
+ struct mq_config cfg;
+};
+
+struct oce_mbx_ctx {
+ struct oce_mbx *mbx;
+ void (*cb) (void *ctx);
+ void *cb_ctx;
+};
+
+struct wq_config {
+ uint8_t wq_type;
+ uint16_t buf_size;
+ uint32_t q_len;
+ uint16_t pd_id;
+ uint16_t pci_fn_num;
+ uint32_t eqd; /* interrupt delay */
+ uint32_t nbufs;
+ uint32_t nhdl;
+};
+
+struct oce_tx_queue_stats {
+ uint64_t tx_pkts;
+ uint64_t tx_bytes;
+ uint32_t tx_reqs;
+ uint32_t tx_stops; /* number of times TX Q was stopped */
+ uint32_t tx_wrbs;
+ uint32_t tx_compl;
+ uint32_t tx_rate;
+ uint32_t ipv6_ext_hdr_tx_drop;
+};
+
+struct oce_wq {
+ void *parent;
+ struct oce_ring *ring;
+ struct oce_cq *cq;
+ bus_dma_tag_t tag;
+ struct oce_packet_desc pckts[OCE_WQ_PACKET_ARRAY_SIZE];
+ uint32_t packets_in;
+ uint32_t packets_out;
+ uint32_t wqm_used;
+ boolean_t resched;
+ uint32_t wq_free;
+ uint32_t tx_deferd;
+ uint32_t pkt_drops;
+ qstate_t qstate;
+ uint16_t wq_id;
+ struct wq_config cfg;
+ int queue_index;
+ struct oce_tx_queue_stats tx_stats;
+};
+
+struct rq_config {
+ uint32_t q_len;
+ uint32_t frag_size;
+ uint32_t mtu;
+ uint32_t if_id;
+ uint32_t is_rss_queue;
+ uint32_t eqd;
+ uint32_t nbufs;
+};
+
+struct oce_rx_queue_stats {
+ uint32_t rx_post_fail;
+ uint32_t rx_ucast_pkts;
+ uint32_t rx_compl;
+ uint64_t rx_bytes;
+ uint64_t rx_bytes_prev;
+ uint64_t rx_pkts;
+ uint32_t rx_rate;
+ uint32_t rx_mcast_pkts;
+ uint32_t rxcp_err;
+ uint32_t rx_frags;
+ uint32_t prev_rx_frags;
+ uint32_t rx_fps;
+};
+
+struct oce_rq {
+ struct rq_config cfg;
+ uint32_t rq_id;
+ int queue_index;
+ uint32_t rss_cpuid;
+ void *parent;
+ struct oce_ring *ring;
+ struct oce_cq *cq;
+ bus_dma_tag_t tag;
+ struct oce_packet_desc pckts[OCE_RQ_PACKET_ARRAY_SIZE];
+ uint32_t packets_in;
+ uint32_t packets_out;
+ uint32_t pending;
+#ifdef notdef
+ struct mbuf *head;
+ struct mbuf *tail;
+ int fragsleft;
+#endif
+ qstate_t qstate;
+ struct oce_rx_queue_stats rx_stats;
+#ifdef OCE_LRO
+ struct lro_ctrl lro;
+ int lro_pkts_queued;
+#endif
+};
+
+struct link_status {
+ uint8_t physical_port;
+ uint8_t mac_duplex;
+ uint8_t mac_speed;
+ uint8_t mac_fault;
+ uint8_t mgmt_mac_duplex;
+ uint8_t mgmt_mac_speed;
+ uint16_t qos_link_speed;
+ uint32_t logical_link_status;
+} __packed;
+
+#define OCE_FLAGS_PCIX 0x00000001
+#define OCE_FLAGS_PCIE 0x00000002
+#define OCE_FLAGS_MSI_CAPABLE 0x00000004
+#define OCE_FLAGS_MSIX_CAPABLE 0x00000008
+#define OCE_FLAGS_USING_MSI 0x00000010
+#define OCE_FLAGS_USING_MSIX 0x00000020
+#define OCE_FLAGS_FUNCRESET_RQD 0x00000040
+#define OCE_FLAGS_VIRTUAL_PORT 0x00000080
+#define OCE_FLAGS_MBOX_ENDIAN_RQD 0x00000100
+#define OCE_FLAGS_BE3 0x00000200
+#define OCE_FLAGS_XE201 0x00000400
+#define OCE_FLAGS_BE2 0x00000800
+
+struct oce_softc {
+ struct device dev;
+
+ uint32_t flags;
+
+ struct pci_attach_args pa;
+
+ bus_space_tag_t cfg_btag;
+ bus_space_handle_t cfg_bhandle;
+ bus_size_t cfg_size;
+
+ bus_space_tag_t csr_btag;
+ bus_space_handle_t csr_bhandle;
+ bus_size_t csr_size;
+
+ bus_space_tag_t db_btag;
+ bus_space_handle_t db_bhandle;
+ bus_size_t db_size;
+
+ struct arpcom arpcom;
+ struct ifmedia media;
+ int link_active;
+ uint8_t link_status;
+ uint8_t link_speed;
+ uint8_t duplex;
+ uint32_t qos_link_speed;
+
+ char fw_version[32];
+ struct mac_address_format macaddr;
+
+ struct oce_dma_mem bsmbx;
+
+ uint32_t config_number;
+ uint32_t asic_revision;
+ uint32_t port_id;
+ uint32_t function_mode;
+ uint32_t function_caps;
+ uint32_t max_tx_rings;
+ uint32_t max_rx_rings;
+
+ struct oce_wq *wq[OCE_MAX_WQ]; /* TX work queues */
+ struct oce_rq *rq[OCE_MAX_RQ]; /* RX work queues */
+ struct oce_cq *cq[OCE_MAX_CQ]; /* Completion queues */
+ struct oce_eq *eq[OCE_MAX_EQ]; /* Event queues */
+ struct oce_mq *mq; /* Mailbox queue */
+
+ ushort neqs;
+ ushort ncqs;
+ ushort nrqs;
+ ushort nwqs;
+ ushort intr_count;
+ ushort tx_ring_size;
+ ushort rx_ring_size;
+ ushort rq_frag_size;
+ ushort rss_enable;
+
+ uint32_t if_id; /* interface ID */
+ uint32_t nifs; /* number of adapter interfaces, 0 or 1 */
+ uint32_t pmac_id; /* PMAC id */
+
+ uint32_t if_cap_flags;
+
+ uint32_t flow_control;
+ char promisc;
+
+ char be3_native;
+ uint32_t pvid;
+
+ struct oce_dma_mem stats_mem;
+ struct oce_drv_stats oce_stats_info;
+ struct timeout timer;
+ struct timeout rxrefill;
+};
+
+/**************************************************
+ * BUS memory read/write macros
+ * BE3: accesses three BAR spaces (CFG, CSR, DB)
+ * Lancer: accesses one BAR space (CFG)
+ **************************************************/
+#if 1
+#define OCE_READ_REG32(sc, space, o) \
+ ((IS_BE(sc)) ? (bus_space_read_4((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (bus_space_read_4((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o)))
+#define OCE_READ_REG16(sc, space, o) \
+ ((IS_BE(sc)) ? (bus_space_read_2((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (bus_space_read_2((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o)))
+#define OCE_READ_REG8(sc, space, o) \
+ ((IS_BE(sc)) ? (bus_space_read_1((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (bus_space_read_1((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o)))
+
+#define OCE_WRITE_REG32(sc, space, o, v) \
+ ((IS_BE(sc)) ? (bus_space_write_4((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (bus_space_write_4((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o,v)))
+#define OCE_WRITE_REG16(sc, space, o, v) \
+ ((IS_BE(sc)) ? (bus_space_write_2((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (bus_space_write_2((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o,v)))
+#define OCE_WRITE_REG8(sc, space, o, v) \
+ ((IS_BE(sc)) ? (bus_space_write_1((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (bus_space_write_1((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o,v)))
+#else
+static __inline u_int32_t
+oce_bus_read_4(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t reg)
+{
+ bus_space_barrier(tag, handle, reg, 4, BUS_SPACE_BARRIER_READ);
+ return (bus_space_read_4(tag, handle, reg));
+}
+
+static __inline u_int16_t
+oce_bus_read_2(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t reg)
+{
+ bus_space_barrier(tag, handle, reg, 2, BUS_SPACE_BARRIER_READ);
+ return (bus_space_read_2(tag, handle, reg));
+}
+
+static __inline u_int8_t
+oce_bus_read_1(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t reg)
+{
+ bus_space_barrier(tag, handle, reg, 1, BUS_SPACE_BARRIER_READ);
+ return (bus_space_read_1(tag, handle, reg));
+}
+
+static __inline void
+oce_bus_write_4(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t reg,
+ u_int32_t val)
+{
+ bus_space_write_4(tag, handle, reg, val);
+ bus_space_barrier(tag, handle, reg, 4, BUS_SPACE_BARRIER_WRITE);
+}
+
+static __inline void
+oce_bus_write_2(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t reg,
+ u_int16_t val)
+{
+ bus_space_write_2(tag, handle, reg, val);
+ bus_space_barrier(tag, handle, reg, 2, BUS_SPACE_BARRIER_WRITE);
+}
+
+static __inline void
+oce_bus_write_1(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t reg,
+ u_int8_t val)
+{
+ bus_space_write_1(tag, handle, reg, val);
+ bus_space_barrier(tag, handle, reg, 1, BUS_SPACE_BARRIER_WRITE);
+}
+
+#define OCE_READ_REG32(sc, space, o) \
+ ((IS_BE(sc)) ? (oce_bus_read_4((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (oce_bus_read_4((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o)))
+#define OCE_READ_REG16(sc, space, o) \
+ ((IS_BE(sc)) ? (oce_bus_read_2((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (oce_bus_read_2((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o)))
+#define OCE_READ_REG8(sc, space, o) \
+ ((IS_BE(sc)) ? (oce_bus_read_1((sc)->space##_btag, \
+ (sc)->space##_bhandle,o)) \
+ : (oce_bus_read_1((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o)))
+
+#define OCE_WRITE_REG32(sc, space, o, v) \
+ ((IS_BE(sc)) ? (oce_bus_write_4((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (oce_bus_write_4((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o,v)))
+#define OCE_WRITE_REG16(sc, space, o, v) \
+ ((IS_BE(sc)) ? (oce_bus_write_2((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (oce_bus_write_2((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o,v)))
+#define OCE_WRITE_REG8(sc, space, o, v) \
+ ((IS_BE(sc)) ? (oce_bus_write_1((sc)->space##_btag, \
+ (sc)->space##_bhandle,o,v)) \
+ : (oce_bus_write_1((sc)->cfg_btag, \
+ (sc)->cfg_bhandle,o,v)))
+#endif
+
+/***********************************************************
+ * DMA memory functions
+ ***********************************************************/
+#define oce_dma_sync(d, f) \
+ bus_dmamap_sync((d)->tag, (d)->map, 0, (d)->map->dm_mapsize, f)
+#define oce_dmamap_sync(t, m, f) \
+ bus_dmamap_sync(t, m, 0, (m)->dm_mapsize, f)
+int oce_dma_alloc(struct oce_softc *sc, bus_size_t size,
+ struct oce_dma_mem *dma, int flags);
+void oce_dma_free(struct oce_softc *sc, struct oce_dma_mem *dma);
+void oce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg,
+ int error);
+void oce_destroy_ring(struct oce_softc *sc, struct oce_ring *ring);
+struct oce_ring *oce_create_ring(struct oce_softc *sc, int q_len,
+ int num_entries, int max_segs);
+uint32_t oce_page_list(struct oce_softc *sc, struct oce_ring *ring,
+ struct phys_addr *pa_list, int max_segs);
+
+/************************************************************
+ * oce_hw_xxx functions
+ ************************************************************/
+int oce_hw_pci_alloc(struct oce_softc *sc);
+int oce_hw_init(struct oce_softc *sc);
+int oce_create_nw_interface(struct oce_softc *sc);
+void oce_delete_nw_interface(struct oce_softc *sc);
+int oce_hw_update_multicast(struct oce_softc *sc);
+void oce_hw_intr_enable(struct oce_softc *sc);
+void oce_hw_intr_disable(struct oce_softc *sc);
+
+/************************************************************
+ * Mailbox functions
+ ************************************************************/
+int oce_mbox_init(struct oce_softc *sc);
+int oce_mbox_dispatch(struct oce_softc *sc, uint32_t tmo_sec);
+int oce_mbox_post(struct oce_softc *sc, struct oce_mbx *mbx,
+ struct oce_mbx_ctx *mbxctx);
+int oce_mbox_wait(struct oce_softc *sc, uint32_t tmo_sec);
+int oce_first_mcc_cmd(struct oce_softc *sc);
+
+int oce_get_link_status(struct oce_softc *sc);
+int oce_rxf_set_promiscuous(struct oce_softc *sc, uint32_t enable);
+int oce_config_nic_rss(struct oce_softc *sc, uint32_t if_id,
+ uint16_t enable_rss);
+
+int oce_mbox_macaddr_del(struct oce_softc *sc, uint32_t if_id,
+ uint32_t pmac_id);
+int oce_mbox_macaddr_add(struct oce_softc *sc, uint8_t *mac_addr,
+ uint32_t if_id, uint32_t *pmac_id);
+int oce_read_mac_addr(struct oce_softc *sc, uint32_t if_id, uint8_t perm,
+ uint8_t type, struct mac_address_format *mac);
+
+int oce_mbox_create_rq(struct oce_rq *rq);
+int oce_mbox_create_wq(struct oce_wq *wq);
+int oce_mbox_create_mq(struct oce_mq *mq);
+int oce_mbox_create_eq(struct oce_eq *eq);
+int oce_mbox_create_cq(struct oce_cq *cq, uint32_t ncoalesce,
+ uint32_t is_eventable);
+void mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port,
+ uint8_t subsys, uint8_t opcode, uint32_t timeout, uint32_t payload_len,
+ uint8_t version);
+
+/************************************************************
+ * Statistics functions
+ ************************************************************/
+void oce_refresh_queue_stats(struct oce_softc *sc);
+int oce_refresh_nic_stats(struct oce_softc *sc);
+int oce_stats_init(struct oce_softc *sc);
+void oce_stats_free(struct oce_softc *sc);
+
+/* Capabilities */
+#define OCE_MODCAP_RSS 0
+#define OCE_MAX_RSP_HANDLED 64
+extern uint32_t oce_max_rsp_handled; /* max responses */
+
+#define OCE_MAC_LOOPBACK 0x0
+#define OCE_PHY_LOOPBACK 0x1
+#define OCE_ONE_PORT_EXT_LOOPBACK 0x2
+#define OCE_NO_LOOPBACK 0xff
+
+#define DW_SWAP(x, l)
+#define IS_ALIGNED(x,a) ((x % a) == 0)
+#define ADDR_HI(x) ((uint32_t)((uint64_t)(x) >> 32))
+#define ADDR_LO(x) ((uint32_t)((uint64_t)(x) & 0xffffffff));
+
+#define IFCAP_HWCSUM \
+ (IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4)
+#define IF_LRO_ENABLED(ifp) (((ifp)->if_capabilities & IFCAP_LRO) ? 1:0)
+#define IF_LSO_ENABLED(ifp) (((ifp)->if_capabilities & IFCAP_TSO4) ? 1:0)
+#define IF_CSUM_ENABLED(ifp) (((ifp)->if_capabilities & IFCAP_HWCSUM) ? 1:0)
+
+#define OCE_LOG2(x) (oce_highbit(x))
+static inline uint32_t oce_highbit(uint32_t x)
+{
+ int i;
+ int c;
+ int b;
+
+ c = 0;
+ b = 0;
+
+ for (i = 0; i < 32; i++) {
+ if ((1 << i) & x) {
+ c++;
+ b = i;
+ }
+ }
+
+ if (c == 1)
+ return b;
+
+ return 0;
+}