summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorPatrick Wildt <patrick@cvs.openbsd.org>2020-06-25 12:09:12 +0000
committerPatrick Wildt <patrick@cvs.openbsd.org>2020-06-25 12:09:12 +0000
commit8011ee159252cd4be876a4ebc98d42c7c902cf1d (patch)
tree0e008ea2fdb22083c681363891c62480f82226d6 /sys/dev
parentadcfecf9d1bdfc272f163bce54b32d1d8cad9cbd (diff)
Add mvpp(4), a driver for the Marvell Packet Processor v2, as used on
the Armada 7K and 8K SoCs. Additionally, it seems to be used on the upcoming CN9K SoCs. This only supports version v2.2, since the older v2.1 is only used on some old ARMv7 we have no support for, and where there is no hardware for us to support. A huge part of this driver is configuring the TCAM/SRAM-based "parser". Thus most of the code, especially the parser handling, was ported from EDK2 with a sed script doing plenty of automatic renaming and un-camel- casing. The controller supports multiple TX/RX queues/vectors and RSS, so this would be a nice platform for testing network processing improvement on arm64. For now though we only configure and use a single queue. The driver is still unfinished, but it's time to continue the work in tree. At least we can already send and receive packets, good enough for dhclient to work. Since the MACCHIATObin uses Clause 45 10G PHYs and SFPs, link state handling is still a bit bogus, as we have no support for that. The 1G RJ45 port works fine though. ok dlg@
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/fdt/files.fdt8
-rw-r--r--sys/dev/fdt/if_mvpp.c4122
-rw-r--r--sys/dev/fdt/if_mvppreg.h2077
3 files changed, 6206 insertions, 1 deletions
diff --git a/sys/dev/fdt/files.fdt b/sys/dev/fdt/files.fdt
index 8e516ff8b67..d1bfe1e2959 100644
--- a/sys/dev/fdt/files.fdt
+++ b/sys/dev/fdt/files.fdt
@@ -1,4 +1,4 @@
-# $OpenBSD: files.fdt,v 1.140 2020/06/20 18:13:18 kettenis Exp $
+# $OpenBSD: files.fdt,v 1.141 2020/06/25 12:09:11 patrick Exp $
#
# Config file and device description for machine-independent FDT code.
# Included by ports that need it.
@@ -406,6 +406,12 @@ device mvneta: ether, ifnet, mii, ifmedia
attach mvneta at fdt
file dev/fdt/if_mvneta.c mvneta
+device mvppc {}
+attach mvppc at fdt
+device mvpp: ether, ifnet, mii, ifmedia
+attach mvpp at mvppc
+file dev/fdt/if_mvpp.c mvppc | mvpp
+
device mvrng
attach mvrng at fdt
file dev/fdt/mvrng.c mvrng
diff --git a/sys/dev/fdt/if_mvpp.c b/sys/dev/fdt/if_mvpp.c
new file mode 100644
index 00000000000..f48d17188e3
--- /dev/null
+++ b/sys/dev/fdt/if_mvpp.c
@@ -0,0 +1,4122 @@
+/* $OpenBSD: if_mvpp.c,v 1.1 2020/06/25 12:09:11 patrick Exp $ */
+/*
+ * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
+ * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright (C) 2016 Marvell International Ltd.
+ *
+ * Marvell BSD License Option
+ *
+ * If you received this File from Marvell, you may opt to use, redistribute
+ * and/or modify this File under the following licensing terms.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Marvell nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bpfilter.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/queue.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/timeout.h>
+
+#include <machine/bus.h>
+#include <machine/fdt.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_clock.h>
+#include <dev/ofw/ofw_gpio.h>
+#include <dev/ofw/ofw_misc.h>
+#include <dev/ofw/ofw_pinctrl.h>
+#include <dev/ofw/ofw_regulator.h>
+#include <dev/ofw/fdt.h>
+
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#if NBPFILTER > 0
+#include <net/bpf.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/if_ether.h>
+
+#include <netinet6/in6_var.h>
+#include <netinet/ip6.h>
+
+#include <dev/fdt/if_mvppreg.h>
+
+struct mvpp2_buf {
+ bus_dmamap_t mb_map;
+ struct mbuf *mb_m;
+};
+
+#define MVPP2_NTXDESC 512
+#define MVPP2_NTXSEGS 16
+#define MVPP2_NRXDESC 512
+
+struct mvpp2_bm_pool {
+ struct mvpp2_dmamem *bm_mem;
+ struct mvpp2_buf *rxbuf;
+};
+
+#define MVPP2_BM_SIZE 64
+#define MVPP2_BM_POOL_PTR_ALIGN 128
+#define MVPP2_BM_POOLS_NUM 8
+#define MVPP2_BM_ALIGN 32
+#define MVPP2_MAX_PORT 3
+
+struct mvpp2_tx_queue {
+ uint8_t id;
+ uint8_t log_id;
+ struct mvpp2_dmamem *ring;
+ struct mvpp2_buf *buf;
+ struct mvpp2_tx_desc *descs;
+ int prod;
+ int cnt;
+ int cons;
+
+ uint32_t done_pkts_coal;
+};
+
+struct mvpp2_rx_queue {
+ uint8_t id;
+ struct mvpp2_dmamem *ring;
+ struct mvpp2_rx_desc *descs;
+ int prod;
+ struct if_rxring rxring;
+ int cons;
+
+ uint32_t pkts_coal;
+ uint32_t time_coal;
+};
+
+struct mvpp2_dmamem {
+ bus_dmamap_t mdm_map;
+ bus_dma_segment_t mdm_seg;
+ size_t mdm_size;
+ caddr_t mdm_kva;
+};
+#define MVPP2_DMA_MAP(_mdm) ((_mdm)->mdm_map)
+#define MVPP2_DMA_LEN(_mdm) ((_mdm)->mdm_size)
+#define MVPP2_DMA_DVA(_mdm) ((_mdm)->mdm_map->dm_segs[0].ds_addr)
+#define MVPP2_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
+
+struct mvpp2_port;
+struct mvpp2_softc {
+ struct device sc_dev;
+ int sc_node;
+ bus_space_tag_t sc_iot;
+ bus_space_handle_t sc_ioh_base;
+ bus_space_handle_t sc_ioh_iface;
+ bus_size_t sc_iosize_base;
+ bus_size_t sc_iosize_iface;
+ bus_dma_tag_t sc_dmat;
+
+ uint32_t sc_tclk;
+
+ struct mvpp2_bm_pool sc_bm_pools[MVPP2_MAX_PORT];
+
+ struct mvpp2_prs_shadow *sc_prs_shadow;
+ uint8_t *sc_prs_double_vlans;
+
+ int sc_aggr_ntxq;
+ struct mvpp2_tx_queue *sc_aggr_txqs;
+
+ struct mvpp2_port **sc_ports;
+};
+
+struct mvpp2_port {
+ struct device sc_dev;
+ struct mvpp2_softc *sc;
+ int sc_node;
+ bus_dma_tag_t sc_dmat;
+ int sc_id;
+ int sc_gop_id;
+
+ struct arpcom sc_ac;
+#define sc_lladdr sc_ac.ac_enaddr
+ struct mii_data sc_mii;
+#define sc_media sc_mii.mii_media
+ struct mii_device *sc_mdio;
+ char sc_cur_lladdr[ETHER_ADDR_LEN];
+
+ enum {
+ PHY_MODE_XAUI,
+ PHY_MODE_10GBASER,
+ PHY_MODE_2500BASEX,
+ PHY_MODE_1000BASEX,
+ PHY_MODE_SGMII,
+ PHY_MODE_RGMII,
+ PHY_MODE_RGMII_ID,
+ PHY_MODE_RGMII_RXID,
+ PHY_MODE_RGMII_TXID,
+ } sc_phy_mode;
+ int sc_fixed_link;
+ int sc_inband_status;
+ int sc_link;
+ int sc_phyloc;
+ int sc_sfp;
+
+ int sc_ntxq;
+ int sc_nrxq;
+
+ struct mvpp2_tx_queue *sc_txqs;
+ struct mvpp2_rx_queue *sc_rxqs;
+
+ struct timeout sc_tick;
+
+ uint32_t sc_tx_time_coal;
+};
+
+#define MVPP2_MAX_PORTS 4
+
+struct mvpp2_attach_args {
+ int ma_node;
+ bus_dma_tag_t ma_dmat;
+};
+
+#define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
+
+static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
+
+int mvpp2_match(struct device *, void *, void *);
+void mvpp2_attach(struct device *, struct device *, void *);
+void mvpp2_attach_deferred(struct device *);
+
+struct cfattach mvppc_ca = {
+ sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
+};
+
+struct cfdriver mvppc_cd = {
+ NULL, "mvppc", DV_DULL
+};
+
+int mvpp2_port_match(struct device *, void *, void *);
+void mvpp2_port_attach(struct device *, struct device *, void *);
+
+struct cfattach mvpp_ca = {
+ sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
+};
+
+struct cfdriver mvpp_cd = {
+ NULL, "mvpp", DV_IFNET
+};
+
+uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
+void mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
+uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
+void mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
+uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
+void mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
+uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
+void mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
+uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
+void mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
+
+int mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
+void mvpp2_start(struct ifnet *);
+int mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
+void mvpp2_watchdog(struct ifnet *);
+
+int mvpp2_media_change(struct ifnet *);
+void mvpp2_media_status(struct ifnet *, struct ifmediareq *);
+
+int mvpp2_mii_readreg(struct device *, int, int);
+void mvpp2_mii_writereg(struct device *, int, int, int);
+void mvpp2_mii_statchg(struct device *);
+void mvpp2_inband_statchg(struct mvpp2_port *);
+void mvpp2_port_change(struct mvpp2_port *);
+
+void mvpp2_tick(void *);
+void mvpp2_rxtick(void *);
+
+int mvpp2_link_intr(void *);
+int mvpp2_intr(void *);
+void mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
+void mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
+void mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
+
+void mvpp2_up(struct mvpp2_port *);
+void mvpp2_down(struct mvpp2_port *);
+void mvpp2_iff(struct mvpp2_port *);
+int mvpp2_encap(struct mvpp2_port *, struct mbuf *, int *);
+
+void mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
+void mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
+void mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
+void mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
+void mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
+void mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
+void mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
+
+void mvpp2_mac_config(struct mvpp2_port *);
+void mvpp2_xlg_config(struct mvpp2_port *);
+void mvpp2_gmac_config(struct mvpp2_port *);
+
+struct mvpp2_dmamem *
+ mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
+void mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
+struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
+void mvpp2_fill_rx_ring(struct mvpp2_softc *);
+
+void mvpp2_interrupts_enable(struct mvpp2_port *, int);
+void mvpp2_interrupts_disable(struct mvpp2_port *, int);
+int mvpp2_egress_port(struct mvpp2_port *);
+int mvpp2_txq_phys(int, int);
+void mvpp2_defaults_set(struct mvpp2_port *);
+void mvpp2_ingress_enable(struct mvpp2_port *);
+void mvpp2_ingress_disable(struct mvpp2_port *);
+void mvpp2_egress_enable(struct mvpp2_port *);
+void mvpp2_egress_disable(struct mvpp2_port *);
+void mvpp2_port_enable(struct mvpp2_port *);
+void mvpp2_port_disable(struct mvpp2_port *);
+void mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
+int mvpp2_rxq_received(struct mvpp2_port *, int);
+void mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
+void mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
+void mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
+ uint32_t);
+void mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
+ uint32_t);
+void mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
+ uint32_t);
+void mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
+
+void mvpp2_axi_config(struct mvpp2_softc *);
+void mvpp2_bm_pool_init(struct mvpp2_softc *);
+void mvpp2_rx_fifo_init(struct mvpp2_softc *);
+void mvpp2_tx_fifo_init(struct mvpp2_softc *);
+int mvpp2_prs_default_init(struct mvpp2_softc *);
+void mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
+void mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
+void mvpp2_prs_def_flow_init(struct mvpp2_softc *);
+void mvpp2_prs_mh_init(struct mvpp2_softc *);
+void mvpp2_prs_mac_init(struct mvpp2_softc *);
+void mvpp2_prs_dsa_init(struct mvpp2_softc *);
+int mvpp2_prs_etype_init(struct mvpp2_softc *);
+int mvpp2_prs_vlan_init(struct mvpp2_softc *);
+int mvpp2_prs_pppoe_init(struct mvpp2_softc *);
+int mvpp2_prs_ip6_init(struct mvpp2_softc *);
+int mvpp2_prs_ip4_init(struct mvpp2_softc *);
+void mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
+ uint32_t, uint32_t);
+void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
+void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
+void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
+uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
+void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
+ uint8_t, uint8_t);
+void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
+ uint8_t *, uint8_t *);
+int mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
+void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
+int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
+int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
+void mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
+ uint32_t *, uint32_t *);
+void mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
+int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
+void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
+void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
+void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
+void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
+void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
+void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
+ uint32_t);
+void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
+void mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
+int mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
+int mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *);
+struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2_softc *, int);
+int mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
+void mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
+void mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int);
+void mvpp2_prs_mac_multi_set(struct mvpp2_softc *, uint32_t, uint32_t, int);
+void mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
+void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
+ int, int, int);
+struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
+ int);
+int mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
+int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
+struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
+ uint16_t, uint16_t);
+int mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
+ uint32_t);
+int mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
+int mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
+int mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
+int mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
+struct mvpp2_prs_entry *mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int,
+ const uint8_t *, uint8_t *, int);
+int mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
+ uint8_t *);
+int mvpp2_prs_mac_da_accept(struct mvpp2_softc *, int, const uint8_t *, int);
+int mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
+int mvpp2_prs_def_flow(struct mvpp2_port *);
+void mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
+void mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
+void mvpp2_cls_init(struct mvpp2_softc *);
+void mvpp2_cls_port_config(struct mvpp2_port *);
+void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
+
+int
+mvpp2_match(struct device *parent, void *cfdata, void *aux)
+{
+ struct fdt_attach_args *faa = aux;
+
+ return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
+}
+
+void
+mvpp2_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct mvpp2_softc *sc = (void *)self;
+ struct fdt_attach_args *faa = aux;
+
+ if (faa->fa_nreg < 2) {
+ printf(": no registers\n");
+ return;
+ }
+
+ sc->sc_node = faa->fa_node;
+ sc->sc_iot = faa->fa_iot;
+ sc->sc_dmat = faa->fa_dmat;
+
+ if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
+ faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
+ printf(": can't map registers\n");
+ return;
+ }
+ sc->sc_iosize_base = faa->fa_reg[0].size;
+
+ if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
+ faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
+ printf(": can't map registers\n");
+ bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
+ sc->sc_iosize_base);
+ return;
+ }
+ sc->sc_iosize_iface = faa->fa_reg[1].size;
+
+ clock_enable_all(faa->fa_node);
+ sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
+
+ printf("\n");
+
+ config_defer(self, mvpp2_attach_deferred);
+}
+
+void
+mvpp2_attach_deferred(struct device *self)
+{
+ struct mvpp2_softc *sc = (void *)self;
+ struct mvpp2_attach_args maa;
+ struct mvpp2_tx_queue *txq;
+ int i, node;
+
+ mvpp2_axi_config(sc);
+
+ sc->sc_aggr_ntxq = 1;
+ sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
+ sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ for (i = 0; i < sc->sc_aggr_ntxq; i++) {
+ txq = &sc->sc_aggr_txqs[i];
+ txq->id = i;
+ mvpp2_aggr_txq_hw_init(sc, txq);
+ }
+
+ mvpp2_rx_fifo_init(sc);
+ mvpp2_tx_fifo_init(sc);
+
+ mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
+
+ mvpp2_bm_pool_init(sc);
+
+ sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ mvpp2_prs_default_init(sc);
+ mvpp2_cls_init(sc);
+
+ memset(&maa, 0, sizeof(maa));
+ for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
+ maa.ma_node = node;
+ maa.ma_dmat = sc->sc_dmat;
+ config_found(self, &maa, NULL);
+ }
+}
+
+void
+mvpp2_axi_config(struct mvpp2_softc *sc)
+{
+ uint32_t reg;
+
+ mvpp2_write(sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG, 0);
+
+ reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
+ (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
+ mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
+
+ reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
+ (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
+ mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
+
+ reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
+ (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
+ mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
+ mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
+
+ reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
+ (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
+ mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
+
+ reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
+ (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
+ mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
+}
+
+void
+mvpp2_bm_pool_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_bm_pool *bm;
+ struct mvpp2_buf *rxb;
+ uint64_t phys, virt;
+ int i, j;
+
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
+ mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+ }
+
+ for (i = 0; i < MVPP2_MAX_PORT; i++) {
+ bm = &sc->sc_bm_pools[i];
+ bm->bm_mem = mvpp2_dmamem_alloc(sc,
+ MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
+ MVPP2_BM_POOL_PTR_ALIGN);
+ memset(MVPP2_DMA_KVA(bm->bm_mem), 0, MVPP2_DMA_LEN(bm->bm_mem));
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
+ MVPP2_DMA_LEN(bm->bm_mem),
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
+ (uint64_t)MVPP2_DMA_KVA(bm->bm_mem) & 0xffffffff);
+ mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
+ ((uint64_t)MVPP2_DMA_KVA(bm->bm_mem) >> 32)
+ & MVPP22_BM_POOL_BASE_HIGH_MASK);
+ mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
+ MVPP2_BM_SIZE);
+
+ mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
+ mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
+ MVPP2_BM_START_MASK);
+
+ mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
+ roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
+
+ bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
+ M_DEVBUF, M_WAITOK);
+
+ for (j = 0; j < MVPP2_BM_SIZE; j++) {
+ rxb = &bm->rxbuf[j];
+ bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
+ MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
+ }
+
+ for (j = 0; j < MVPP2_BM_SIZE; j++) {
+ rxb = &bm->rxbuf[j];
+ rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
+ if (rxb->mb_m == NULL)
+ break;
+ virt = (i << 16) | (j << 0); /* XXX use cookie? */
+ phys = rxb->mb_map->dm_segs[0].ds_addr;
+ mvpp2_write(sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG,
+ (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
+ << MVPP22_BM_VIRT_HIGH_RLS_OFFST) |
+ (((phys >> 32) & MVPP22_ADDR_HIGH_MASK)
+ << MVPP22_BM_PHY_HIGH_RLS_OFFSET));
+ mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
+ virt & 0xffffffff);
+ mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
+ phys & 0xffffffff);
+ }
+ }
+}
+
+void
+mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
+{
+ int i;
+
+ mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
+
+ mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
+
+ for (i = 2; i < MVPP2_MAX_PORTS; i++) {
+ mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ }
+
+ mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
+ mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+void
+mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
+{
+ int i;
+
+ mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
+ MVPP22_TX_FIFO_DATA_SIZE_10KB);
+ mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
+ MVPP2_TX_FIFO_THRESHOLD_10KB);
+
+ for (i = 1; i < MVPP2_MAX_PORTS; i++) {
+ mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
+ MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
+ MVPP2_TX_FIFO_THRESHOLD_3KB);
+ }
+}
+
+int
+mvpp2_prs_default_init(struct mvpp2_softc *sc)
+{
+ int i, j, ret;
+
+ mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
+ mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
+ for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
+ mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
+
+ mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
+ for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
+ mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
+ }
+
+ for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
+ mvpp2_prs_hw_inv(sc, i);
+
+ for (i = 0; i < MVPP2_MAX_PORTS; i++)
+ mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
+ MVPP2_PRS_PORT_LU_MAX, 0);
+
+ mvpp2_prs_def_flow_init(sc);
+ mvpp2_prs_mh_init(sc);
+ mvpp2_prs_mac_init(sc);
+ mvpp2_prs_dsa_init(sc);
+ ret = mvpp2_prs_etype_init(sc);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_vlan_init(sc);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_pppoe_init(sc);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip6_init(sc);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip4_init(sc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
+{
+ mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
+ mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+ MVPP2_PRS_TCAM_INV_MASK);
+}
+
+void
+mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
+ int lu_first, int lu_max, int offset)
+{
+ uint32_t reg;
+
+ reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
+ reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
+ reg |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+ mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
+
+ reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
+ reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+ reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+ mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
+
+ reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
+ reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+ reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+ mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
+}
+
+void
+mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+ int i;
+
+ for (i = 0; i < MVPP2_MAX_PORTS; i++) {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_hw_write(sc, &pe);
+ }
+}
+
+void
+mvpp2_prs_mh_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+ pe.index = MVPP2_PE_MH_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+void
+mvpp2_prs_mac_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(sc, &pe);
+ mvpp2_prs_mac_drop_all_set(sc, 0, 0);
+ mvpp2_prs_mac_promisc_set(sc, 0, 0);
+ mvpp2_prs_mac_multi_set(sc, MVPP2_PE_MAC_MC_ALL, 0, 0);
+ mvpp2_prs_mac_multi_set(sc, MVPP2_PE_MAC_MC_IP6, 0, 0);
+}
+
+void
+mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+
+ mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = MVPP2_PE_DSA_DEFAULT;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+int
+mvpp2_prs_etype_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Ethertype: PPPoE */
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_PPP_SES);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 0;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ /* Ethertype: ARP */
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_ARP);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 1;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ /* Ethertype: LBTD */
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 1;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ /* Ethertype: IPv4 without options */
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 0;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ /* Ethertype: IPv4 with options */
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+ pe.index = tid;
+
+ pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+ pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 0;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ /* Ethertype: IPv6 without options */
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_IPV6);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 0;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = MVPP2_PE_ETH_TYPE_UN;
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
+ sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ sc->sc_prs_shadow[pe.index].finish = 1;
+ mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+ int ret;
+
+ sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
+ sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
+
+ ret = mvpp2_prs_double_vlan_add(sc, MV_ETH_P_8021Q, MV_ETH_P_8021AD,
+ MVPP2_PRS_PORT_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_double_vlan_add(sc, MV_ETH_P_8021Q, MV_ETH_P_8021Q,
+ MVPP2_PRS_PORT_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_vlan_add(sc, MV_ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_vlan_add(sc, MV_ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (ret)
+ return ret;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_DBL;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_DBL_VLAN_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_NONE;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_PPP_IP);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_PPP_IPV6);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+ int tid, ret;
+
+ ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_ICMPV6,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
+ MVPP2_PRS_RI_UDF7_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
+ if (ret)
+ return ret;
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe,
+ MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
+ mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_PROTO_UN;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_EXT_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_ADDR_UN;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_prs_entry pe;
+ int ret;
+
+ ret = mvpp2_prs_ip4_proto(sc, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip4_proto(sc, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip4_proto(sc, MV_IPPR_IGMP,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
+ if (ret)
+ return ret;
+ ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
+ if (ret)
+ return ret;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_PROTO_UN;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_offset_set( &pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_ADDR_UN;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
+{
+ struct mvpp2_attach_args *maa = aux;
+ char buf[32];
+
+ if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
+ strcmp(buf, "disabled") == 0)
+ return 0;
+
+ return 1;
+}
+
+void
+mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct mvpp2_port *sc = (void *)self;
+ struct mvpp2_attach_args *maa = aux;
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_rx_queue *rxq;
+ struct ifnet *ifp;
+ uint32_t phy, reg;
+ int i, idx, len, node;
+ char *phy_mode;
+ char *managed;
+
+ sc->sc = (void *)parent;
+ sc->sc_node = maa->ma_node;
+ sc->sc_dmat = maa->ma_dmat;
+
+ sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
+ sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
+ sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
+
+ len = OF_getproplen(sc->sc_node, "phy-mode");
+ if (len <= 0) {
+ printf("%s: cannot extract phy-mode\n", self->dv_xname);
+ return;
+ }
+
+ phy_mode = malloc(len, M_TEMP, M_WAITOK);
+ OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
+ if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
+ sc->sc_phy_mode = PHY_MODE_10GBASER;
+ else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
+ sc->sc_phy_mode = PHY_MODE_2500BASEX;
+ else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
+ sc->sc_phy_mode = PHY_MODE_1000BASEX;
+ else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
+ sc->sc_phy_mode = PHY_MODE_SGMII;
+ else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
+ sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
+ else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
+ sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
+ else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
+ sc->sc_phy_mode = PHY_MODE_RGMII_ID;
+ else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
+ sc->sc_phy_mode = PHY_MODE_RGMII;
+ else {
+ printf("%s: cannot use phy-mode %s\n", self->dv_xname,
+ phy_mode);
+ return;
+ }
+ free(phy_mode, M_TEMP, len);
+
+ /* Lookup PHY. */
+ phy = OF_getpropint(sc->sc_node, "phy", 0);
+ if (phy) {
+ node = OF_getnodebyphandle(phy);
+ if (!node) {
+ printf(": no phy\n");
+ return;
+ }
+ sc->sc_mdio = mii_byphandle(phy);
+ sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
+ sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
+ }
+
+ if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
+ managed = malloc(len, M_TEMP, M_WAITOK);
+ OF_getprop(sc->sc_node, "managed", managed, len);
+ if (!strncmp(managed, "in-band-status",
+ strlen("in-band-status")))
+ sc->sc_inband_status = 1;
+ free(managed, M_TEMP, len);
+ }
+
+ if (OF_getprop(sc->sc_node, "local-mac-address",
+ &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
+ memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
+ printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
+
+ sc->sc_ntxq = sc->sc_nrxq = 1;
+ sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
+ M_DEVBUF, M_WAITOK | M_ZERO);
+ sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
+ M_DEVBUF, M_WAITOK | M_ZERO);
+
+ for (i = 0; i < sc->sc_ntxq; i++) {
+ txq = &sc->sc_txqs[i];
+ txq->id = mvpp2_txq_phys(sc->sc_id, i);
+ txq->log_id = i;
+ txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+ }
+
+ sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
+
+ for (i = 0; i < sc->sc_nrxq; i++) {
+ rxq = &sc->sc_rxqs[i];
+ rxq->id = sc->sc_id * 32 + i;
+ rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+ rxq->time_coal = MVPP2_RX_COAL_USEC;
+ }
+
+ mvpp2_egress_disable(sc);
+ mvpp2_port_disable(sc);
+
+ mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
+ sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
+ 0 /* queue vector id */);
+ mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
+ sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
+ 0 /* first rxq */);
+
+ mvpp2_ingress_disable(sc);
+ mvpp2_defaults_set(sc);
+
+ mvpp2_cls_oversize_rxq_set(sc);
+ mvpp2_cls_port_config(sc);
+
+ for (i = 0; i < sc->sc_nrxq; i++) {
+ mvpp2_rxq_long_pool_set(sc, i, sc->sc_id);
+ mvpp2_rxq_short_pool_set(sc, i, sc->sc_id);
+ }
+
+ /* Reset Mac */
+ mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
+ mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
+ MVPP2_PORT_CTRL2_PORTMACRESET_MASK);
+ if (sc->sc_gop_id == 0) {
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
+ mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
+ ~MV_XLG_MAC_CTRL0_MACRESETN_MASK);
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
+ reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
+ reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
+ reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
+ reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
+ reg &= ~MVPP22_MPCS_MAC_CLK_RESET_MASK;
+ mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
+ } else if (sc->sc_phy_mode == PHY_MODE_XAUI)
+ mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG,
+ mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG) &
+ ~MVPP22_XPCS_PCSRESET);
+ }
+
+ timeout_set(&sc->sc_tick, mvpp2_tick, sc);
+
+ ifp = &sc->sc_ac.ac_if;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = mvpp2_ioctl;
+ ifp->if_start = mvpp2_start;
+ ifp->if_watchdog = mvpp2_watchdog;
+ IFQ_SET_MAXLEN(&ifp->if_snd, MVPP2_NTXDESC - 1);
+ bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
+
+ ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+ sc->sc_mii.mii_ifp = ifp;
+ sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
+ sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
+ sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
+
+ ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
+
+ if (sc->sc_mdio) {
+ mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
+ (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
+ if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
+ printf("%s: no PHY found!\n", self->dv_xname);
+ ifmedia_add(&sc->sc_mii.mii_media,
+ IFM_ETHER|IFM_MANUAL, 0, NULL);
+ ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
+ } else
+ ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
+ } else {
+ ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
+
+ if (sc->sc_inband_status) {
+ mvpp2_inband_statchg(sc);
+ } else {
+ sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
+ sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
+ mvpp2_mii_statchg(self);
+ }
+
+ ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
+ ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
+ }
+
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
+ sc->sc_phy_mode == PHY_MODE_1000BASEX ||
+ sc->sc_phy_mode == PHY_MODE_SGMII ||
+ sc->sc_phy_mode == PHY_MODE_RGMII ||
+ sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
+ sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
+ sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
+ reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_MASK_REG);
+ reg |= MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK;
+ mvpp2_gmac_write(sc, MV_GMAC_INTERRUPT_MASK_REG, reg);
+ reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_SUM_MASK_REG);
+ reg |= MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK;
+ mvpp2_gmac_write(sc, MV_GMAC_INTERRUPT_SUM_MASK_REG, reg);
+ }
+
+ if (sc->sc_gop_id == 0) {
+ reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
+ reg |= MV_XLG_INTERRUPT_LINK_CHANGE_MASK;
+ mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
+ reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
+ reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK;
+ reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK;
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI)
+ reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK;
+ else
+ reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK;
+ mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
+ }
+
+ idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
+ if (idx >= 0)
+ fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
+ mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
+ idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
+ if (idx < 0)
+ idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
+ if (idx >= 0)
+ fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
+ mvpp2_intr, sc, sc->sc_dev.dv_xname);
+}
+
+uint32_t
+mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
+{
+ return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
+}
+
+void
+mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
+{
+ bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
+}
+
+uint32_t
+mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
+{
+ return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
+}
+
+void
+mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
+{
+ bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
+ data);
+}
+
+uint32_t
+mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
+{
+ return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
+}
+
+void
+mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
+{
+ bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
+ data);
+}
+
+uint32_t
+mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
+{
+ return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
+}
+
+void
+mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
+{
+ bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
+ data);
+}
+
+uint32_t
+mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
+{
+ return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
+}
+
+void
+mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
+{
+ bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
+ MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
+ data);
+}
+
+void
+mvpp2_start(struct ifnet *ifp)
+{
+ struct mvpp2_port *sc = ifp->if_softc;
+ struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
+ struct mbuf *m;
+ int error, idx;
+
+ if (!(ifp->if_flags & IFF_RUNNING))
+ return;
+ if (ifq_is_oactive(&ifp->if_snd))
+ return;
+ if (IFQ_IS_EMPTY(&ifp->if_snd))
+ return;
+ if (!sc->sc_link)
+ return;
+
+ idx = txq->prod;
+ while (txq->cnt < MVPP2_NTXDESC) {
+ m = ifq_dequeue(&ifp->if_snd);
+ if (m == NULL)
+ break;
+
+ error = mvpp2_encap(sc, m, &idx);
+ if (error == ENOBUFS) {
+ m_freem(m); /* give up: drop it */
+ ifq_set_oactive(&ifp->if_snd);
+ break;
+ }
+ if (error == EFBIG) {
+ m_freem(m); /* give up: drop it */
+ ifp->if_oerrors++;
+ continue;
+ }
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
+#endif
+ }
+
+ if (txq->prod != idx) {
+ txq->prod = idx;
+
+ /* Set a timeout in case the chip goes out to lunch. */
+ ifp->if_timer = 5;
+ }
+}
+
+int
+mvpp2_encap(struct mvpp2_port *sc, struct mbuf *m, int *idx)
+{
+ struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
+ struct mvpp2_tx_desc *txd;
+ bus_dmamap_t map;
+ uint32_t command;
+ int i, current, first, last;
+
+ first = last = current = *idx;
+ map = txq->buf[current].mb_map;
+
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
+ return ENOBUFS;
+
+ if (map->dm_nsegs > (MVPP2_NTXDESC - txq->cnt - 2)) {
+ bus_dmamap_unload(sc->sc_dmat, map);
+ return ENOBUFS;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
+ BUS_DMASYNC_PREWRITE);
+
+ command = MVPP2_TXD_L4_CSUM_NOT |
+ MVPP2_TXD_IP_CSUM_DISABLE;
+ for (i = 0; i < map->dm_nsegs; i++) {
+ txd = &txq->descs[current];
+ memset(txd, 0, sizeof(*txd));
+ txd->buf_phys_addr_hw_cmd2 =
+ map->dm_segs[i].ds_addr & ~0x1f;
+ txd->packet_offset =
+ map->dm_segs[i].ds_addr & 0x1f;
+ txd->data_size = map->dm_segs[i].ds_len;
+ txd->phys_txq = sc->sc_txqs[0].id;
+ txd->command = command |
+ MVPP2_TXD_PADDING_DISABLE;
+ if (i == 0)
+ txd->command |= MVPP2_TXD_F_DESC;
+ if (i == (map->dm_nsegs - 1))
+ txd->command |= MVPP2_TXD_L_DESC;
+
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
+ current * sizeof(*txd), sizeof(*txd),
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ last = current;
+ current = (current + 1) % MVPP2_NTXDESC;
+ KASSERT(current != txq->cons);
+ }
+
+ KASSERT(txq->buf[last].mb_m == NULL);
+ txq->buf[first].mb_map = txq->buf[last].mb_map;
+ txq->buf[last].mb_map = map;
+ txq->buf[last].mb_m = m;
+
+ txq->cnt += map->dm_nsegs;
+ *idx = current;
+
+ mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, map->dm_nsegs);
+
+ return 0;
+}
+
+int
+mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
+{
+ struct mvpp2_port *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)addr;
+ int error = 0, s;
+
+ s = splnet();
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ ifp->if_flags |= IFF_UP;
+ /* FALLTHROUGH */
+ case SIOCSIFFLAGS:
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_flags & IFF_RUNNING)
+ error = ENETRESET;
+ else
+ mvpp2_up(sc);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ mvpp2_down(sc);
+ }
+ break;
+
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
+ break;
+
+ case SIOCGIFRXR:
+ error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
+ break;
+
+ case SIOCGIFSFFPAGE:
+ error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
+ if (error != 0)
+ break;
+
+ error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
+ rw_exit(&mvpp2_sff_lock);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
+ break;
+ }
+
+ if (error == ENETRESET) {
+ if (ifp->if_flags & IFF_RUNNING)
+ mvpp2_iff(sc);
+ error = 0;
+ }
+
+ splx(s);
+ return (error);
+}
+
+int
+mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
+{
+ struct mvpp2_rx_queue *rxq;
+ struct if_rxring_info *ifrs, *ifr;
+ unsigned int i;
+ int error;
+
+ ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
+ M_WAITOK|M_ZERO|M_CANFAIL);
+ if (ifrs == NULL)
+ return (ENOMEM);
+
+ for (i = 0; i < sc->sc_nrxq; i++) {
+ rxq = &sc->sc_rxqs[i];
+ ifr = &ifrs[i];
+
+ snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
+ ifr->ifr_size = MCLBYTES;
+ ifr->ifr_info = rxq->rxring;
+ }
+
+ error = if_rxr_info_ioctl(ifri, i, ifrs);
+ free(ifrs, M_TEMP, i * sizeof(*ifrs));
+
+ return (error);
+}
+
+void
+mvpp2_watchdog(struct ifnet *ifp)
+{
+ printf("%s\n", __func__);
+}
+
+int
+mvpp2_media_change(struct ifnet *ifp)
+{
+ struct mvpp2_port *sc = ifp->if_softc;
+
+ if (LIST_FIRST(&sc->sc_mii.mii_phys))
+ mii_mediachg(&sc->sc_mii);
+
+ return (0);
+}
+
+void
+mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct mvpp2_port *sc = ifp->if_softc;
+
+ if (LIST_FIRST(&sc->sc_mii.mii_phys))
+ mii_pollstat(&sc->sc_mii);
+
+ ifmr->ifm_active = sc->sc_mii.mii_media_active;
+ ifmr->ifm_status = sc->sc_mii.mii_media_status;
+}
+
+int
+mvpp2_mii_readreg(struct device *self, int phy, int reg)
+{
+ struct mvpp2_port *sc = (void *)self;
+ return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
+}
+
+void
+mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
+{
+ struct mvpp2_port *sc = (void *)self;
+ return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
+}
+
+void
+mvpp2_mii_statchg(struct device *self)
+{
+ struct mvpp2_port *sc = (void *)self;
+ mvpp2_port_change(sc);
+}
+
+void
+mvpp2_inband_statchg(struct mvpp2_port *sc)
+{
+ uint32_t reg;
+
+ sc->sc_mii.mii_media_status = IFM_AVALID;
+ sc->sc_mii.mii_media_active = IFM_ETHER;
+
+ if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI)) {
+ reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
+ if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK)
+ sc->sc_mii.mii_media_status |= IFM_ACTIVE;
+ sc->sc_mii.mii_media_active |= IFM_FDX;
+ sc->sc_mii.mii_media_active |= IFM_10G_SR;
+ } else {
+ reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
+ if (reg & MVPP2_PORT_STATUS0_LINKUP_MASK)
+ sc->sc_mii.mii_media_status |= IFM_ACTIVE;
+ if (reg & MVPP2_PORT_STATUS0_FULLDX_MASK)
+ sc->sc_mii.mii_media_active |= IFM_FDX;
+ if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
+ sc->sc_mii.mii_media_active |= IFM_2500_SX;
+ else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
+ sc->sc_mii.mii_media_active |= IFM_1000_SX;
+ else if (reg & MVPP2_PORT_STATUS0_GMIISPEED_MASK)
+ sc->sc_mii.mii_media_active |= IFM_1000_T;
+ else if (reg & MVPP2_PORT_STATUS0_MIISPEED_MASK)
+ sc->sc_mii.mii_media_active |= IFM_100_TX;
+ else
+ sc->sc_mii.mii_media_active |= IFM_10_T;
+ }
+
+ mvpp2_port_change(sc);
+}
+
+void
+mvpp2_port_change(struct mvpp2_port *sc)
+{
+ uint32_t reg;
+
+ if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) == sc->sc_link)
+ return;
+
+ sc->sc_link = !sc->sc_link;
+
+ if (sc->sc_inband_status)
+ return;
+
+ if (sc->sc_link) {
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI) {
+ reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
+ reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
+ reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
+ } else {
+ reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
+ reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+ reg |= MVPP2_GMAC_FORCE_LINK_PASS;
+ reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
+ reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
+ reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
+ IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
+ IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
+ reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
+ reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
+ if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
+ reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
+ }
+ } else {
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI) {
+ reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
+ reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
+ reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
+ } else {
+ reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
+ reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
+ }
+ }
+}
+
+void
+mvpp2_tick(void *arg)
+{
+ struct mvpp2_port *sc = arg;
+ int s;
+
+ s = splnet();
+ mii_tick(&sc->sc_mii);
+ splx(s);
+
+ timeout_add_sec(&sc->sc_tick, 1);
+}
+
+int
+mvpp2_link_intr(void *arg)
+{
+ struct mvpp2_port *sc = arg;
+ uint32_t reg;
+ int event = 0;
+
+ if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI)) {
+ reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
+ if (reg & MV_XLG_INTERRUPT_LINK_CHANGE_MASK)
+ event = 1;
+ } else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
+ sc->sc_phy_mode == PHY_MODE_1000BASEX ||
+ sc->sc_phy_mode == PHY_MODE_SGMII ||
+ sc->sc_phy_mode == PHY_MODE_RGMII ||
+ sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
+ sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
+ sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
+ reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_CAUSE_REG);
+ if (reg & MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK)
+ event = 1;
+ }
+
+ if (event && sc->sc_inband_status)
+ mvpp2_inband_statchg(sc);
+
+ return (1);
+}
+
+int
+mvpp2_intr(void *arg)
+{
+ struct mvpp2_port *sc = arg;
+ uint32_t reg;
+
+ reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
+ if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
+ mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
+ mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
+ reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
+ }
+ if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
+ mvpp2_tx_proc(sc,
+ (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
+ MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
+
+ if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
+ mvpp2_rx_proc(sc,
+ reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
+
+ return (1);
+}
+
+void
+mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
+{
+// struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct mvpp2_tx_queue *txq;
+ uint32_t reg;
+ int i;
+
+ for (i = 0; i < sc->sc_ntxq; i++) {
+ txq = &sc->sc_txqs[i];
+ if ((queues & (1 << i)) == 0)
+ continue;
+ reg = mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
+ printf("%s: txq %u sent reg %u\n", sc->sc_dev.dv_xname,
+ i, (reg & MVPP2_TRANSMITTED_COUNT_MASK) >>
+ MVPP2_TRANSMITTED_COUNT_OFFSET);
+ }
+
+ /* FIXME: tx done processing */
+}
+
+void
+mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
+{
+ struct mvpp2_rx_queue *rxq;
+ int i;
+
+ for (i = 0; i < sc->sc_nrxq; i++) {
+ rxq = &sc->sc_rxqs[i];
+ if ((queues & (1 << i)) == 0)
+ continue;
+ mvpp2_rxq_proc(sc, rxq);
+ }
+}
+
+void
+mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ struct mbuf_list ml = MBUF_LIST_INITIALIZER();
+ struct mvpp2_rx_desc *rxd;
+ struct mvpp2_bm_pool *bm;
+ struct mvpp2_buf *rxb;
+ struct mbuf *m;
+ uint64_t virt;
+ uint32_t i, nrecv;
+
+ nrecv = mvpp2_rxq_received(sc, rxq->id);
+ if (!nrecv)
+ return;
+
+ printf("%s: rxq %u recv %u\n", sc->sc_dev.dv_xname,
+ rxq->id, nrecv);
+
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
+ MVPP2_DMA_LEN(rxq->ring),
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ for (i = 0; i < nrecv; i++) {
+ rxd = &rxq->descs[rxq->cons];
+ virt = rxd->buf_cookie_bm_qset_cls_info;
+ bm = &sc->sc->sc_bm_pools[(virt >> 16) & 0xffff];
+ rxb = &bm->rxbuf[virt & 0xffff];
+ KASSERT(rxb);
+ KASSERT(rxb->mb_m);
+
+ bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
+ rxd->data_size, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
+
+ m = rxb->mb_m;
+ rxb->mb_m = NULL;
+
+ m->m_pkthdr.len = m->m_len = rxd->data_size;
+ m_adj(m, MVPP2_MH_SIZE);
+ ml_enqueue(&ml, m);
+
+ rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
+ }
+
+ /*mvpp2_fill_rx_ring(sc);*/
+
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
+ MVPP2_DMA_LEN(rxq->ring),
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
+
+ if_input(ifp, &ml);
+}
+
+void
+mvpp2_up(struct mvpp2_port *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ int i;
+
+ memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
+ mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, etherbroadcastaddr, 1);
+ mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 1);
+ /* FIXME: not promisc!!! */
+ mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id, 1);
+ mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
+ mvpp2_prs_def_flow(sc);
+
+ for (i = 0; i < sc->sc_ntxq; i++)
+ mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
+
+ mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
+
+ for (i = 0; i < sc->sc_nrxq; i++)
+ mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
+
+ /* FIXME: rx buffer fill */
+
+ /* Configure media. */
+ if (LIST_FIRST(&sc->sc_mii.mii_phys))
+ mii_mediachg(&sc->sc_mii);
+
+ /* Program promiscuous mode and multicast filters. */
+ mvpp2_iff(sc);
+
+ ifp->if_flags |= IFF_RUNNING;
+ ifq_clr_oactive(&ifp->if_snd);
+
+ mvpp2_txp_max_tx_size_set(sc);
+
+ /* XXX: single vector */
+ mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
+ MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
+ MVPP2_CAUSE_MISC_SUM_MASK);
+ mvpp2_interrupts_enable(sc, (1 << 0));
+
+ mvpp2_mac_config(sc);
+ mvpp2_egress_enable(sc);
+ mvpp2_ingress_enable(sc);
+
+ timeout_add_sec(&sc->sc_tick, 1);
+}
+
+void
+mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_buf *txb;
+ int i;
+
+ txq->ring = mvpp2_dmamem_alloc(sc,
+ MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
+ txq->descs = MVPP2_DMA_KVA(txq->ring);
+
+ txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
+ M_DEVBUF, M_WAITOK);
+
+ for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
+ txb = &txq->buf[i];
+ bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
+ MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
+ txb->mb_m = NULL;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
+ MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
+
+ txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
+ mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
+ MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_SHIFT);
+ mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
+ MVPP2_AGGR_TXQ_SIZE);
+}
+
+void
+mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_buf *txb;
+ int desc, desc_per_txq;
+ uint32_t reg;
+ int i;
+
+ txq->prod = txq->cons = txq->cnt = 0;
+// txq->last_desc = txq->size - 1;
+
+ txq->ring = mvpp2_dmamem_alloc(sc->sc,
+ MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
+ txq->descs = MVPP2_DMA_KVA(txq->ring);
+
+ txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
+ M_DEVBUF, M_WAITOK);
+
+ for (i = 0; i < MVPP2_NTXDESC; i++) {
+ txb = &txq->buf[i];
+ bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
+ MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
+ txb->mb_m = NULL;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
+ MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
+
+ mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
+ MVPP2_DMA_DVA(txq->ring));
+ mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
+ MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
+ mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
+ mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
+ txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
+ reg &= ~MVPP2_TXQ_PENDING_MASK;
+ mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
+
+ desc_per_txq = 16;
+ desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
+ (txq->log_id * desc_per_txq);
+
+ mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+ MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
+
+ /* WRR / EJP configuration - indirect access */
+ mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ mvpp2_egress_port(sc));
+
+ reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+ reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+ reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+ reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+ mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
+
+ mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+ MVPP2_TXQ_TOKEN_SIZE_MAX);
+
+ mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
+}
+
+void
+mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
+{
+ rxq->prod = rxq->cons = 0;
+
+ rxq->ring = mvpp2_dmamem_alloc(sc->sc,
+ MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
+ rxq->descs = MVPP2_DMA_KVA(rxq->ring);
+
+ bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
+ 0, MVPP2_DMA_LEN(rxq->ring),
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+ mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+ mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
+ MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_SHIFT);
+ mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
+ mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
+ mvpp2_rxq_offset_set(sc, rxq->id, 0);
+ mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
+ mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
+ mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
+}
+
+void
+mvpp2_mac_config(struct mvpp2_port *sc)
+{
+ uint32_t reg;
+
+ mvpp2_port_disable(sc);
+
+ mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
+ mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
+ MVPP2_PORT_CTRL2_PORTMACRESET_MASK);
+ if (sc->sc_gop_id == 0) {
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
+ mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
+ ~MV_XLG_MAC_CTRL0_MACRESETN_MASK);
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
+ reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
+ reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
+ reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
+ reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
+ reg &= ~MVPP22_MPCS_MAC_CLK_RESET_MASK;
+ mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
+ } else if (sc->sc_phy_mode == PHY_MODE_XAUI)
+ mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG,
+ mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG) &
+ ~MVPP22_XPCS_PCSRESET);
+ }
+
+ if (sc->sc_gop_id == 0) {
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
+ reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
+ reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
+ reg |= MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
+ reg |= MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
+ reg |= MVPP22_MPCS_MAC_CLK_RESET_MASK;
+ mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
+ } else if (sc->sc_phy_mode == PHY_MODE_XAUI)
+ mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG,
+ mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG) |
+ MVPP22_XPCS_PCSRESET);
+
+ reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
+ reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
+ if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI)
+ reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
+ else
+ reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
+ }
+
+ if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI)) {
+ reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
+ reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
+ reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
+ MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
+ } else {
+ reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
+ reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+ reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
+ MVPP2_GMAC_MAX_RX_SIZE_OFFS;
+ mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
+ }
+
+ if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
+ sc->sc_phy_mode == PHY_MODE_XAUI))
+ mvpp2_xlg_config(sc);
+ else
+ mvpp2_gmac_config(sc);
+
+ mvpp2_port_enable(sc);
+}
+
+void
+mvpp2_xlg_config(struct mvpp2_port *sc)
+{
+ uint32_t ctl0, ctl4;
+
+ ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
+ ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
+
+ ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN_MASK;
+ ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
+ ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS;
+ ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK;
+
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
+ mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
+
+ /* Port reset */
+ while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
+ MV_XLG_MAC_CTRL0_MACRESETN_MASK) == 0)
+ ;
+}
+
+void
+mvpp2_gmac_config(struct mvpp2_port *sc)
+{
+ uint32_t ctl0, ctl2, ctl4, panc;
+
+ /* Setup phy. */
+ ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
+ ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
+ ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
+ panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* Force link down to change in-band settings. */
+ panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ panc |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
+
+ ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
+ ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
+ MVPP2_GMAC_INBAND_AN_MASK);
+ panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
+ MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+ MVPP2_GMAC_IN_BAND_AUTONEG);
+
+ switch (sc->sc_phy_mode) {
+ case PHY_MODE_XAUI:
+ case PHY_MODE_10GBASER:
+ break;
+ case PHY_MODE_2500BASEX:
+ case PHY_MODE_1000BASEX:
+ ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
+ ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ break;
+ case PHY_MODE_SGMII:
+ ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
+ ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
+ ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ break;
+ case PHY_MODE_RGMII:
+ case PHY_MODE_RGMII_ID:
+ case PHY_MODE_RGMII_RXID:
+ case PHY_MODE_RGMII_TXID:
+ ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+ ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ break;
+ }
+
+ /* Use Auto-Negotiation for Inband Status only */
+ if (sc->sc_inband_status) {
+ panc &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+ panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
+ panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
+ panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
+ /* TODO: read mode from SFP */
+ if (1) {
+ /* 802.3z */
+ ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
+ panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ } else {
+ /* SGMII */
+ panc |= MVPP2_GMAC_AN_SPEED_EN;
+ panc |= MVPP2_GMAC_AN_DUPLEX_EN;
+ }
+ }
+
+ mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
+ mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
+ mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
+ mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
+
+ /* Port reset */
+ while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
+ MVPP2_PORT_CTRL2_PORTMACRESET_MASK)
+ ;
+}
+
+void
+mvpp2_down(struct mvpp2_port *sc)
+{
+ struct ifnet *ifp = &sc->sc_ac.ac_if;
+ uint32_t reg;
+ int i;
+
+ timeout_del(&sc->sc_tick);
+
+ ifp->if_flags &= ~IFF_RUNNING;
+ ifq_clr_oactive(&ifp->if_snd);
+ ifp->if_timer = 0;
+
+ mvpp2_egress_disable(sc);
+ mvpp2_ingress_disable(sc);
+ mvpp2_port_disable(sc);
+
+ /* XXX: single vector */
+ mvpp2_interrupts_disable(sc, (1 << 0));
+ mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
+
+ reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
+ reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
+ mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
+
+ for (i = 0; i < sc->sc_ntxq; i++)
+ mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
+
+ reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
+ mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
+
+ for (i = 0; i < sc->sc_nrxq; i++)
+ mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
+
+ mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 0);
+}
+
+void
+mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
+{
+ struct mvpp2_buf *txb;
+ int i, pending;
+ uint32_t reg;
+
+ mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
+ reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
+ reg |= MVPP2_TXQ_DRAIN_EN_MASK;
+ mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
+
+ /*
+ * the queue has been stopped so wait for all packets
+ * to be transmitted.
+ */
+ i = 0;
+ do {
+ if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+ printf("%s: port %d: cleaning queue %d timed out\n",
+ sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
+ break;
+ }
+ delay(1000);
+ i++;
+
+ pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
+ MVPP2_TXQ_PENDING_MASK;
+ } while (pending);
+
+ reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+ mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
+
+ mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+ mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
+
+ for (i = 0; i < MVPP2_NTXDESC; i++) {
+ txb = &txq->buf[i];
+ if (txb->mb_m) {
+ bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
+ txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
+ m_freem(txb->mb_m);
+ }
+ bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
+ }
+
+ mvpp2_dmamem_free(sc->sc, txq->ring);
+ free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
+ MVPP2_NTXDESC);
+}
+
+void
+mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
+{
+ uint32_t nrecv;
+
+ nrecv = mvpp2_rxq_received(sc, rxq->id);
+ if (nrecv)
+ mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
+
+ mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+ mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
+
+ mvpp2_dmamem_free(sc->sc, rxq->ring);
+}
+
+void
+mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
+{
+ uint32_t val;
+ int prxq;
+
+ /* get queue physical ID */
+ prxq = port->sc_rxqs[lrxq].id;
+
+ val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP2_RXQ_POOL_LONG_MASK;
+ val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
+
+ mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+void
+mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
+{
+ uint32_t val;
+ int prxq;
+
+ /* get queue physical ID */
+ prxq = port->sc_rxqs[lrxq].id;
+
+ val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
+ val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
+
+ mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+void
+mvpp2_iff(struct mvpp2_port *sc)
+{
+ /* FIXME: multicast handling */
+
+ mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 0);
+ memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
+ mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 1);
+}
+
+struct mvpp2_dmamem *
+mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
+{
+ struct mvpp2_dmamem *mdm;
+ int nsegs;
+
+ mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
+ mdm->mdm_size = size;
+
+ if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
+ BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
+ goto mdmfree;
+
+ if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
+ &nsegs, BUS_DMA_WAITOK) != 0)
+ goto destroy;
+
+ if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
+ &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
+ goto free;
+
+ if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
+ NULL, BUS_DMA_WAITOK) != 0)
+ goto unmap;
+
+ bzero(mdm->mdm_kva, size);
+
+ return (mdm);
+
+unmap:
+ bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
+free:
+ bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
+destroy:
+ bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
+mdmfree:
+ free(mdm, M_DEVBUF, 0);
+
+ return (NULL);
+}
+
+void
+mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
+{
+ bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
+ bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
+ bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
+ free(mdm, M_DEVBUF, 0);
+}
+
+struct mbuf *
+mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
+{
+ struct mbuf *m = NULL;
+
+ m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
+ if (!m)
+ return (NULL);
+ m->m_len = m->m_pkthdr.len = MCLBYTES;
+
+ if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
+ printf("%s: could not load mbuf DMA map", DEVNAME(sc));
+ m_freem(m);
+ return (NULL);
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, map, 0,
+ m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
+
+ return (m);
+}
+
+void
+mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
+{
+ mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
+ MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
+}
+
+void
+mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
+{
+ mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
+ MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
+}
+
+int
+mvpp2_egress_port(struct mvpp2_port *port)
+{
+ return MVPP2_MAX_TCONT + port->sc_id;
+}
+
+int
+mvpp2_txq_phys(int port, int txq)
+{
+ return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+void
+mvpp2_defaults_set(struct mvpp2_port *port)
+{
+ int val, queue, p_txq;
+
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ mvpp2_egress_port(port));
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+ p_txq = mvpp2_txq_phys(port->sc_id, queue);
+ mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(p_txq), 0);
+ }
+
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
+ (1000 * 1000));
+ val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
+ val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+ val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+ val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
+ val = MVPP2_TXP_TOKEN_SIZE_MAX;
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+
+ /* set maximum_low_latency_packet_size value to 256 */
+ mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
+ MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+ MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+ /* mask all interrupts to all present cpus */
+ mvpp2_interrupts_disable(port, (0xf << 0));
+}
+
+void
+mvpp2_ingress_enable(struct mvpp2_port *port)
+{
+ uint32_t val;
+ int lrxq, queue;
+
+ for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
+ queue = port->sc_rxqs[lrxq].id;
+ val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
+ val &= ~MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+void
+mvpp2_ingress_disable(struct mvpp2_port *port)
+{
+ uint32_t val;
+ int lrxq, queue;
+
+ for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
+ queue = port->sc_rxqs[lrxq].id;
+ val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
+ val |= MVPP2_RXQ_DISABLE_MASK;
+ mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+void
+mvpp2_egress_enable(struct mvpp2_port *port)
+{
+ struct mvpp2_tx_queue *txq;
+ uint32_t qmap;
+ int queue;
+
+ qmap = 0;
+ for (queue = 0; queue < port->sc_ntxq; queue++) {
+ txq = &port->sc_txqs[queue];
+
+ if (txq->descs != NULL) {
+ qmap |= (1 << queue);
+ }
+ }
+
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ mvpp2_egress_port(port));
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+void
+mvpp2_egress_disable(struct mvpp2_port *port)
+{
+ uint32_t reg_data;
+ int i;
+
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ mvpp2_egress_port(port));
+ reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+ MVPP2_TXP_SCHED_ENQ_MASK;
+ if (reg_data)
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,(reg_data <<
+ MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+ i = 0;
+ do {
+ if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+ printf("%s: tx stop timed out, status=0x%08x\n",
+ port->sc_dev.dv_xname, reg_data);
+ break;
+ }
+ delay(1000);
+ i++;
+ reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
+ } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+void
+mvpp2_port_enable(struct mvpp2_port *port)
+{
+ uint32_t val;
+
+ if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
+ port->sc_phy_mode == PHY_MODE_XAUI)) {
+ val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
+ val |= MV_XLG_MAC_CTRL0_PORTEN_MASK;
+ val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK;
+ mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
+ } else {
+ val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
+ val |= MVPP2_GMAC_PORT_EN_MASK;
+ val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
+ }
+}
+
+void
+mvpp2_port_disable(struct mvpp2_port *port)
+{
+ uint32_t val;
+
+ if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
+ port->sc_phy_mode == PHY_MODE_XAUI)) {
+ val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
+ val &= ~MV_XLG_MAC_CTRL0_PORTEN_MASK;
+ mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
+ }
+
+ val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
+ val &= ~MVPP2_GMAC_PORT_EN_MASK;
+ mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
+}
+
+int
+mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
+{
+ uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+ return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+void
+mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
+ int used_count, int free_count)
+{
+ uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+ mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+void
+mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
+{
+ uint32_t val;
+
+ offset = offset >> 5;
+ val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+ val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+ MVPP2_RXQ_PACKET_OFFSET_MASK);
+ mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+void
+mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
+{
+ uint32_t val, size, mtu;
+ int txq;
+
+ mtu = MCLBYTES * 8;
+ if (mtu > MVPP2_TXP_MTU_MAX)
+ mtu = MVPP2_TXP_MTU_MAX;
+
+ /* WA for wrong token bucket update: set MTU value = 3*real MTU value */
+ mtu = 3 * mtu;
+
+ /* indirect access to reg_valisters */
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+ mvpp2_egress_port(port));
+
+ /* set MTU */
+ val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
+ val &= ~MVPP2_TXP_MTU_MAX;
+ val |= mtu;
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
+
+ /* TXP token size and all TXqs token size must be larger that MTU */
+ val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+ size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+ val |= size;
+ mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+ }
+
+ for (txq = 0; txq < port->sc_ntxq; txq++) {
+ val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+ size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
+ }
+ }
+}
+
+void
+mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
+ uint32_t pkts)
+{
+ rxq->pkts_coal =
+ pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
+ pkts : MVPP2_OCCUPIED_THRESH_MASK;
+
+ mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
+ mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
+
+}
+
+void
+mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+ uint32_t pkts)
+{
+ txq->done_pkts_coal =
+ pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
+ pkts : MVPP2_TRANSMITTED_THRESH_MASK;
+
+ mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
+ txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
+}
+
+void
+mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
+ uint32_t usec)
+{
+ uint32_t val;
+
+ val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
+ mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+
+ rxq->time_coal = usec;
+}
+
+void
+mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
+{
+ uint32_t val;
+
+ val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
+ mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
+
+ port->sc_tx_time_coal = usec;
+}
+
+void
+mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
+ uint32_t ri, uint32_t ri_mask)
+{
+ sc->sc_prs_shadow[index].ri_mask = ri_mask;
+ sc->sc_prs_shadow[index].ri = ri;
+}
+
+void
+mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
+
+ pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
+ pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+}
+
+void
+mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+ if (add)
+ pe->tcam.byte[enable_off] &= ~(1 << port);
+ else
+ pe->tcam.byte[enable_off] |= (1 << port);
+}
+
+void
+mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+ uint8_t mask = MVPP2_PRS_PORT_MASK;
+
+ pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+ pe->tcam.byte[enable_off] &= ~mask;
+ pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
+}
+
+uint32_t
+mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+{
+ int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+ return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+}
+
+void
+mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
+ uint8_t byte, uint8_t enable)
+{
+ pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
+ pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+}
+
+void
+mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
+ uint8_t *byte, uint8_t *enable)
+{
+ *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
+ *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+}
+
+int
+mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
+{
+ int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
+ uint16_t tcam_data;
+
+ tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
+ pe->tcam.byte[byte_offset];
+ if (tcam_data != data)
+ return 0;
+
+ return 1;
+}
+
+void
+mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
+{
+ int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+
+ for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+ if (!(enable & (i << 1)))
+ continue;
+
+ if (bits & (i << 1))
+ pe->tcam.byte[ai_idx] |= 1 << i;
+ else
+ pe->tcam.byte[ai_idx] &= ~(1 << i);
+ }
+
+ pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+}
+
+int
+mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
+{
+ return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+}
+
+void
+mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
+ uint32_t *word, uint32_t *enable)
+{
+ int index, position;
+ uint8_t byte, mask;
+
+ for (index = 0; index < 4; index++) {
+ position = (data_offset * sizeof(int)) + index;
+ mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
+ ((uint8_t *)word)[index] = byte;
+ ((uint8_t *)enable)[index] = mask;
+ }
+}
+
+void
+mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
+ uint16_t ether_type)
+{
+ mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
+ mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
+}
+
+void
+mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
+{
+ pe->sram.byte[bit / 8] |= (val << (bit % 8));
+}
+
+void
+mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
+{
+ pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
+}
+
+void
+mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
+{
+ int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ if (bits & (1 << i))
+ mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+ else
+ mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+ }
+}
+
+int
+mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
+{
+ return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+void
+mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
+{
+ int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
+ int i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ if (bits & (1 << i))
+ mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+ else
+ mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+ }
+}
+
+int
+mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
+{
+ uint8_t bits;
+ int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_en_off = ai_off + 1;
+ int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+ bits = (pe->sram.byte[ai_off] >> ai_shift) |
+ (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+
+ return bits;
+}
+
+void
+mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
+{
+ if (shift < 0) {
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ shift = -shift;
+ } else {
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ }
+
+ pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
+ shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+void
+mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
+ uint32_t op)
+{
+ uint8_t udf_byte, udf_byte_offset;
+ uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
+
+ udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+ MVPP2_PRS_SRAM_UDF_BITS);
+ udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
+ op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
+ op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
+
+ if (offset < 0) {
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ offset = -offset;
+ } else {
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ }
+
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ MVPP2_PRS_SRAM_UDF_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+ pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
+ pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+ MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+ pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+ op_sel_udf_byte_offset);
+ pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+void
+mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
+{
+ int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+ mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
+ mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+
+void
+mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
+{
+ sc->sc_prs_shadow[index].valid = 1;
+ sc->sc_prs_shadow[index].lu = lu;
+}
+
+int
+mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
+{
+ int i;
+
+ if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return EINVAL;
+
+ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+ mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+ mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+ return 0;
+}
+
+int
+mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
+{
+ int i;
+
+ if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return EINVAL;
+
+ mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
+ mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+ if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ return EINVAL;
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ pe->tcam.word[i] =
+ mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
+
+ mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ pe->sram.word[i] =
+ mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
+
+ return 0;
+}
+
+struct mvpp2_prs_entry *
+mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
+{
+ struct mvpp2_prs_entry *pe;
+ uint32_t word, enable;
+ uint8_t bits;
+ int tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return NULL;
+
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+ if (!sc->sc_prs_shadow[tid].valid ||
+ sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+ continue;
+
+ pe->index = tid;
+ mvpp2_prs_hw_read(sc, pe);
+
+ mvpp2_prs_tcam_data_word_get(pe, 0, &word, &enable);
+ if ((word != 0) || (enable != 0))
+ continue;
+
+ bits = mvpp2_prs_sram_ai_get(pe);
+ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+ return pe;
+ }
+
+ free(pe, M_TEMP, sizeof(*pe));
+ return NULL;
+}
+
+int
+mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
+{
+ uint8_t tmp;
+ int tid;
+
+ if (start > end) {
+ tmp = end;
+ end = start;
+ start = tmp;
+ }
+
+ for (tid = start; tid <= end; tid++) {
+ if (!sc->sc_prs_shadow[tid].valid)
+ return tid;
+ }
+
+ return EINVAL;
+}
+
+void
+mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
+{
+ struct mvpp2_prs_entry pe;
+
+ if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+ pe.index = MVPP2_PE_DROP_ALL;
+ mvpp2_prs_hw_read(sc, &pe);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = MVPP2_PE_DROP_ALL;
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+void
+mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int add)
+{
+ struct mvpp2_prs_entry pe;
+
+ if (sc->sc_prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
+ pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+ mvpp2_prs_hw_read(sc, &pe);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
+ MVPP2_PRS_RI_L2_CAST_MASK);
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
+ }
+
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+void
+mvpp2_prs_mac_multi_set(struct mvpp2_softc *sc, uint32_t port, uint32_t index, int add)
+{
+ struct mvpp2_prs_entry pe;
+ uint8_t da_mc;
+
+ da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
+
+ if (sc->sc_prs_shadow[index].valid) {
+ pe.index = index;
+ mvpp2_prs_hw_read(sc, &pe);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = index;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
+ MVPP2_PRS_RI_L2_CAST_MASK);
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
+ }
+
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+void
+mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
+ int tagged, int extend)
+{
+ struct mvpp2_prs_entry pe;
+ int32_t tid, shift;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+ shift = 4;
+ }
+
+ if (sc->sc_prs_shadow[tid].valid) {
+ pe.index = tid;
+ mvpp2_prs_hw_read(sc, &pe);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+ mvpp2_prs_sram_shift_set(&pe, shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
+ if (tagged) {
+ mvpp2_prs_tcam_data_byte_set(&pe, 0,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+void
+mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
+ int add, int tagged, int extend)
+{
+ struct mvpp2_prs_entry pe;
+ int32_t tid, shift, port_mask;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+ port_mask = 0;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+ port_mask = MVPP2_PRS_PORT_MASK;
+ shift = 4;
+ }
+
+ if (sc->sc_prs_shadow[tid].valid) {
+ pe.index = tid;
+ mvpp2_prs_hw_read(sc, &pe);
+ } else {
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+ mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_EDSA);
+ mvpp2_prs_match_etype(&pe, 2, 0);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+ MVPP2_PRS_RI_DSA_MASK);
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
+ if (tagged) {
+ mvpp2_prs_tcam_data_byte_set(&pe,
+ MVPP2_ETH_TYPE_LEN + 2 + 3,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+ mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+ }
+
+ mvpp2_prs_tcam_port_set(&pe, port, add);
+ mvpp2_prs_hw_write(sc, &pe);
+}
+
+struct mvpp2_prs_entry *
+mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
+{
+ struct mvpp2_prs_entry *pe;
+ uint32_t ri_bits, ai_bits;
+ int match, tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return NULL;
+
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ if (!sc->sc_prs_shadow[tid].valid ||
+ sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+ pe->index = tid;
+ mvpp2_prs_hw_read(sc, pe);
+ match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
+ if (!match)
+ continue;
+ ri_bits = mvpp2_prs_sram_ri_get(pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+ ai_bits = mvpp2_prs_tcam_ai_get(pe);
+ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+ if (ai != ai_bits)
+ continue;
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ return pe;
+ }
+
+ free(pe, M_TEMP, sizeof(*pe));
+ return NULL;
+}
+
+int
+mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
+{
+ struct mvpp2_prs_entry *pe;
+ uint32_t ri_bits;
+ int tid_aux, tid;
+ int ret = 0;
+
+ pe = mvpp2_prs_vlan_find(sc, tpid, ai);
+ if (pe == NULL) {
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return ENOMEM;
+
+ /* get last double vlan tid */
+ for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+ tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+ if (!sc->sc_prs_shadow[tid_aux].valid ||
+ sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+ pe->index = tid_aux;
+ mvpp2_prs_hw_read(sc, pe);
+ ri_bits = mvpp2_prs_sram_ri_get(pe);
+ if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+ MVPP2_PRS_RI_VLAN_DOUBLE)
+ break;
+ }
+
+ if (tid <= tid_aux) {
+ ret = EINVAL;
+ goto error;
+ }
+
+ memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ pe->index = tid;
+ mvpp2_prs_match_etype(pe, 0, tpid);
+ mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
+ mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+ mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ } else {
+ ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+ mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ }
+ mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
+ }
+
+ mvpp2_prs_tcam_port_map_set(pe, port_map);
+ mvpp2_prs_hw_write(sc, pe);
+
+error:
+ free(pe, M_TEMP, sizeof(*pe));
+ return ret;
+}
+
+int
+mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
+{
+ int i;
+
+ for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
+ if (!sc->sc_prs_double_vlans[i])
+ return i;
+
+ return EINVAL;
+}
+
+struct mvpp2_prs_entry *
+mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
+{
+ struct mvpp2_prs_entry *pe;
+ uint32_t ri_mask;
+ int match, tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return NULL;
+
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ if (!sc->sc_prs_shadow[tid].valid ||
+ sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ pe->index = tid;
+ mvpp2_prs_hw_read(sc, pe);
+ match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
+ mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
+ if (!match)
+ continue;
+ ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+ return pe;
+ }
+
+ free(pe, M_TEMP, sizeof(*pe));
+ return NULL;
+}
+
+int
+mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
+ uint32_t port_map)
+{
+ struct mvpp2_prs_entry *pe;
+ int tid_aux, tid, ai, ret = 0;
+ uint32_t ri_bits;
+
+ pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
+ if (pe == NULL) {
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return ENOMEM;
+
+ ai = mvpp2_prs_double_vlan_ai_free_get(sc);
+ if (ai < 0) {
+ ret = ai;
+ goto error;
+ }
+
+ for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+ tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+ if (!sc->sc_prs_shadow[tid_aux].valid ||
+ sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+ pe->index = tid_aux;
+ mvpp2_prs_hw_read(sc, pe);
+ ri_bits = mvpp2_prs_sram_ri_get(pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ break;
+ }
+
+ if (tid >= tid_aux) {
+ ret = ERANGE;
+ goto error;
+ }
+
+ memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ pe->index = tid;
+ sc->sc_prs_double_vlans[ai] = 1;
+ mvpp2_prs_match_etype(pe, 0, tpid1);
+ mvpp2_prs_match_etype(pe, 4, tpid2);
+ mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
+ }
+
+ mvpp2_prs_tcam_port_map_set(pe, port_map);
+ mvpp2_prs_hw_write(sc, pe);
+
+error:
+ free(pe, M_TEMP, sizeof(*pe));
+ return ret;
+}
+
+int
+mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
+ uint32_t ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if ((proto != MV_IPPR_TCP) && (proto != MV_IPPR_UDP) &&
+ (proto != MV_IPPR_IGMP))
+ return EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_offset_set( &pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, ri_mask |
+ MVPP2_PRS_RI_IP_FRAG_MASK);
+ mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
+{
+ struct mvpp2_prs_entry pe;
+ int mask, tid;
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ switch (l3_cast) {
+ case MVPP2_PRS_L3_MULTI_CAST:
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+ MVPP2_PRS_IPV4_MC_MASK);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ case MVPP2_PRS_L3_BROAD_CAST:
+ mask = MVPP2_PRS_IPV4_BC_MASK;
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+ mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ default:
+ return EINVAL;
+ }
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
+ uint32_t ri_mask)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if ((proto != MV_IPPR_TCP) && (proto != MV_IPPR_UDP) &&
+ (proto != MV_IPPR_ICMPV6) && (proto != MV_IPPR_IPIP))
+ return EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+ mvpp2_prs_sram_offset_set( &pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
+{
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+ return EINVAL;
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+ MVPP2_PRS_IPV6_MC_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+ mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
+ mvpp2_prs_hw_write(sc, &pe);
+
+ return 0;
+}
+
+int
+mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
+ uint8_t *mask)
+{
+ uint8_t tcam_byte, tcam_mask;
+ int index;
+
+ for (index = 0; index < ETHER_ADDR_LEN; index++) {
+ mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
+ &tcam_mask);
+ if (tcam_mask != mask[index])
+ return 0;
+ if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+ return 0;
+ }
+
+ return 1;
+}
+
+struct mvpp2_prs_entry *
+mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
+ uint8_t *mask, int udf_type)
+{
+ struct mvpp2_prs_entry *pe;
+ int tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return NULL;
+
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID;
+ tid++) {
+ uint32_t entry_pmap;
+
+ if (!sc->sc_prs_shadow[tid].valid ||
+ (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (sc->sc_prs_shadow[tid].udf != udf_type))
+ continue;
+
+ pe->index = tid;
+ mvpp2_prs_hw_read(sc, pe);
+ entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
+ if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
+ entry_pmap == pmap)
+ return pe;
+ }
+
+ free(pe, M_TEMP, sizeof(*pe));
+ return NULL;
+}
+
+int
+mvpp2_prs_mac_da_accept(struct mvpp2_softc *sc, int port_id, const uint8_t *da,
+ int add)
+{
+ struct mvpp2_prs_entry *pe;
+ uint32_t pmap, len, ri;
+ uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ int tid;
+
+ pe = mvpp2_prs_mac_da_range_find(sc, (1 << port_id), da, mask,
+ MVPP2_PRS_UDF_MAC_DEF);
+ if (pe == NULL) {
+ if (!add)
+ return 0;
+
+ for (tid = MVPP2_PE_FIRST_FREE_TID; tid <=
+ MVPP2_PE_LAST_FREE_TID; tid++) {
+ if (sc->sc_prs_shadow[tid].valid &&
+ (sc->sc_prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
+ (sc->sc_prs_shadow[tid].udf == MVPP2_PRS_UDF_MAC_RANGE))
+ break;
+ }
+
+ tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID, tid - 1);
+ if (tid < 0)
+ return tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return ENOMEM;
+
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+ pe->index = tid;
+ mvpp2_prs_tcam_port_map_set(pe, 0);
+ }
+
+ mvpp2_prs_tcam_port_set(pe, port_id, add);
+
+ /* invalidate the entry if no ports are left enabled */
+ pmap = mvpp2_prs_tcam_port_map_get(pe);
+ if (pmap == 0) {
+ if (add) {
+ free(pe, M_TEMP, sizeof(*pe));
+ return -1;
+ }
+ mvpp2_prs_hw_inv(sc, pe->index);
+ sc->sc_prs_shadow[pe->index].valid = 0;
+ free(pe, M_TEMP, sizeof(*pe));
+ return 0;
+ }
+
+ mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+
+ len = ETHER_ADDR_LEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+
+ if (ETHER_IS_BROADCAST(da))
+ ri = MVPP2_PRS_RI_L2_BCAST;
+ else if (ETHER_IS_MULTICAST(da))
+ ri = MVPP2_PRS_RI_L2_MCAST;
+ else
+ ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
+
+ mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+ mvpp2_prs_shadow_ri_set(sc, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+ mvpp2_prs_sram_shift_set(pe, 2 * ETHER_ADDR_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ sc->sc_prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
+ mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(sc, pe);
+
+ free(pe, M_TEMP, sizeof(*pe));
+ return 0;
+}
+
+int
+mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
+{
+ switch (type) {
+ case MVPP2_TAG_TYPE_EDSA:
+ mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
+ MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
+ MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_DSA);
+ break;
+ case MVPP2_TAG_TYPE_DSA:
+ mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
+ MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
+ MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_EDSA);
+ break;
+ case MVPP2_TAG_TYPE_MH:
+ case MVPP2_TAG_TYPE_NONE:
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
+ MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_DSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
+ MVPP2_PRS_EDSA);
+ mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_EDSA);
+ break;
+ default:
+ if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+ return EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+int
+mvpp2_prs_def_flow(struct mvpp2_port *port)
+{
+ struct mvpp2_prs_entry *pe;
+ int tid;
+
+ pe = mvpp2_prs_flow_find(port->sc, port->sc_id);
+ if (pe == NULL) {
+ tid = mvpp2_prs_tcam_first_free(port->sc,
+ MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
+ if (pe == NULL)
+ return ENOMEM;
+
+ mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+ pe->index = tid;
+ mvpp2_prs_sram_ai_update(pe, port->sc_id,
+ MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+ mvpp2_prs_shadow_set(port->sc, pe->index, MVPP2_PRS_LU_FLOWS);
+ }
+
+ mvpp2_prs_tcam_port_map_set (pe,(1 << port->sc_id));
+ mvpp2_prs_hw_write(port->sc, pe);
+ free(pe, M_TEMP, sizeof(*pe));
+ return 0;
+}
+
+void
+mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
+{
+ mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+ mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+}
+
+void
+mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
+{
+ uint32_t val;
+
+ val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+ mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
+ mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+void
+mvpp2_cls_init(struct mvpp2_softc *sc)
+{
+ struct mvpp2_cls_lookup_entry le;
+ struct mvpp2_cls_flow_entry fe;
+ int index;
+
+ mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+ memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
+ for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+ fe.index = index;
+ mvpp2_cls_flow_write(sc, &fe);
+ }
+ le.data = 0;
+ for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+ le.lkpid = index;
+ le.way = 0;
+ mvpp2_cls_lookup_write(sc, &le);
+ le.way = 1;
+ mvpp2_cls_lookup_write(sc, &le);
+ }
+}
+
+void
+mvpp2_cls_port_config(struct mvpp2_port *port)
+{
+ struct mvpp2_cls_lookup_entry le;
+ uint32_t val;
+
+ /* set way for the port */
+ val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
+ val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
+ mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
+
+ /*
+ * pick the entry to be accessed in lookup ID decoding table
+ * according to the way and lkpid.
+ */
+ le.lkpid = port->sc_id;
+ le.way = 0;
+ le.data = 0;
+
+ /* set initial CPU queue for receiving packets */
+ le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+ le.data |= (port->sc_id * 32);
+
+ /* disable classification engines */
+ le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ /* update lookup ID table entry */
+ mvpp2_cls_lookup_write(port->sc, &le);
+}
+
+void
+mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
+{
+ mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
+ (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+}
diff --git a/sys/dev/fdt/if_mvppreg.h b/sys/dev/fdt/if_mvppreg.h
new file mode 100644
index 00000000000..4a3e3d5f2c7
--- /dev/null
+++ b/sys/dev/fdt/if_mvppreg.h
@@ -0,0 +1,2077 @@
+/* $OpenBSD: if_mvppreg.h,v 1.1 2020/06/25 12:09:11 patrick Exp $ */
+/*
+ * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
+ * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Copyright (C) 2016 Marvell International Ltd.
+ *
+ * Marvell BSD License Option
+ *
+ * If you received this File from Marvell, you may opt to use, redistribute
+ * and/or modify this File under the following licensing terms.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Marvell nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MVPP2_LIB_HW__
+#define __MVPP2_LIB_HW__
+
+#ifndef BIT
+#define BIT(nr) (1 << (nr))
+#endif
+
+/* PP2v2 registers offsets */
+#define MVPP22_SMI_OFFSET 0x1200
+#define MVPP22_MPCS_OFFSET 0x7000
+#define MVPP22_MPCS_REG_SIZE 0x1000
+#define MVPP22_XPCS_OFFSET 0x7400
+#define MVPP22_XPCS_REG_SIZE 0x1000
+#define MVPP22_GMAC_OFFSET 0x7e00
+#define MVPP22_GMAC_REG_SIZE 0x1000
+#define MVPP22_XLG_OFFSET 0x7f00
+#define MVPP22_XLG_REG_SIZE 0x1000
+#define MVPP22_RFU1_OFFSET 0x318000
+#define MVPP22_ADDR_SPACE_SIZE 0x10000
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
+#define MVPP2_RX_FIFO_INIT_REG 0x64
+#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
+#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
+#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
+#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
+#define MVPP2_POOL_BUF_SIZE_OFFSET 5
+#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
+#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
+#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
+#define MVPP2_RXQ_POOL_SHORT_OFFS 20
+#define MVPP2_RXQ_POOL_SHORT_MASK 0xf00000
+#define MVPP2_RXQ_POOL_LONG_OFFS 24
+#define MVPP2_RXQ_POOL_LONG_MASK 0xf000000
+#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
+#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
+#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
+#define MVPP2_PRS_PORT_LU_MAX 0xf
+#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
+#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
+#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
+#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
+#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
+#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
+#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG 0x1800
+#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG 0x1810
+#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG 0x1814
+#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
+#define MVPP2_CLS_LKP_TBL_REG 0x1818
+#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
+#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG 0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
+#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
+#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
+#define MVPP2_RXQ_NUM_NEW_OFFSET 16
+#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
+#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
+#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
+#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
+#define MVPP2_RXQ_THRESH_REG 0x204c
+#define MVPP2_OCCUPIED_THRESH_OFFSET 0
+#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
+#define MVPP2_RXQ_INDEX_REG 0x2050
+#define MVPP2_TXQ_NUM_REG 0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
+#define MVPP22_TXQ_DESC_ADDR_HIGH_REG 0x20a8
+#define MVPP22_TXQ_DESC_ADDR_HIGH_MASK 0xff
+#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
+#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
+#define MVPP2_TXQ_THRESH_REG 0x2094
+#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
+#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff
+#define MVPP2_TXQ_INDEX_REG 0x2098
+#define MVPP2_TXQ_PREF_BUF_REG 0x209c
+#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
+#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
+#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
+#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
+#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
+#define MVPP2_TXQ_PENDING_REG 0x20a0
+#define MVPP2_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
+#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
+#define MVPP22_TXQ_SENT_REG(txq) (0x3e00 + 4 * (txq-128))
+#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
+#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
+#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
+#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
+#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
+#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE 0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT 7
+#define MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT 8
+#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
+#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
+#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xff
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
+#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
+#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
+#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
+#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
+#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
+#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
+#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
+#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
+#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
+#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
+#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
+#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff8
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
+#define MVPP2_BM_START_MASK BIT(0)
+#define MVPP2_BM_STOP_MASK BIT(1)
+#define MVPP2_BM_STATE_MASK BIT(4)
+#define MVPP2_BM_LOW_THRESH_OFFS 8
+#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
+#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << MVPP2_BM_LOW_THRESH_OFFS)
+#define MVPP2_BM_HIGH_THRESH_OFFS 16
+#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
+#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
+#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
+#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
+#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
+#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
+#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
+#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
+#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
+#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG 0x64c0
+#define MVPP2_BM_MC_RLS_REG 0x64c4
+#define MVPP2_BM_MC_ID_MASK 0xfff
+#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
+
+#define MVPP22_BM_PHY_VIRT_HIGH_ALLOC_REG 0x6444
+#define MVPP22_BM_PHY_HIGH_ALLOC_OFFSET 0
+#define MVPP22_BM_VIRT_HIGH_ALLOC_OFFSET 8
+#define MVPP22_BM_VIRT_HIGH_ALLOC_MASK 0xff00
+
+#define MVPP22_BM_PHY_VIRT_HIGH_RLS_REG 0x64c4
+
+#define MVPP22_BM_PHY_HIGH_RLS_OFFSET 0
+#define MVPP22_BM_VIRT_HIGH_RLS_OFFST 8
+
+#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
+#define MVPP2_BM_PRIO_CTRL_REG 0x6800
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
+#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
+#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
+#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
+#define MVPP2_TXP_SCHED_MTU_REG 0x801c
+#define MVPP2_TXP_MTU_MAX 0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
+#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
+#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
+#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG 0x8800
+#define MVPP2_TX_PORT_FLUSH_REG 0x8810
+#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE 0x24
+#define MVPP2_SRC_ADDR_HIGH 0x28
+#define MVPP2_PHY_AN_CFG0_REG 0x34
+#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
+#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * 0x400 + (port) * 0x400)
+#define MVPP2_MIB_LATE_COLLISION 0x7c
+#define MVPP2_ISR_SUM_MASK_REG 0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG 0x0
+#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
+#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
+#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
+#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG 0x4
+#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
+#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
+#define MVPP2_GMAC_PCS_LB_EN_BIT 6
+#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
+#define MVPP2_GMAC_SA_LOW_OFFS 7
+#define MVPP2_GMAC_CTRL_2_REG 0x8
+#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
+#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
+#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
+#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
+#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
+#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
+#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4)
+#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
+#define MVPP2_GMAC_FC_ADV_EN BIT(9)
+#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10)
+#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
+#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+/* Port Interrupts */
+#define MV_GMAC_INTERRUPT_CAUSE_REG (0x0020)
+#define MV_GMAC_INTERRUPT_MASK_REG (0x0024)
+#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS 1
+#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK (0x1 << MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS)
+
+/* Port Interrupt Summary */
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_REG (0x00A0)
+#define MV_GMAC_INTERRUPT_SUM_MASK_REG (0x00A4)
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS 1
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK (0x1 << MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS)
+
+/* Port Mac Control0 */
+#define MVPP2_PORT_CTRL0_REG (0x0000)
+#define MVPP2_PORT_CTRL0_PORTEN_OFFS 0
+#define MVPP2_PORT_CTRL0_PORTEN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL0_PORTEN_OFFS)
+
+#define MVPP2_PORT_CTRL0_PORTTYPE_OFFS 1
+#define MVPP2_PORT_CTRL0_PORTTYPE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL0_PORTTYPE_OFFS)
+
+#define MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS 2
+#define MVPP2_PORT_CTRL0_FRAMESIZELIMIT_MASK \
+ (0x00001fff << MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS)
+
+#define MVPP2_PORT_CTRL0_COUNT_EN_OFFS 15
+#define MVPP2_PORT_CTRL0_COUNT_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL0_COUNT_EN_OFFS)
+
+/* Port Mac Control1 */
+#define MVPP2_PORT_CTRL1_REG (0x0004)
+#define MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS 0
+#define MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS)
+
+#define MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS 1
+#define MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS)
+
+#define MVPP2_PORT_CTRL1_MGMII_MODE_OFFS 2
+#define MVPP2_PORT_CTRL1_MGMII_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_MGMII_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS 3
+#define MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS)
+
+#define MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS 4
+#define MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS)
+
+#define MVPP2_PORT_CTRL1_GMII_LOOPBACK_OFFS 5
+#define MVPP2_PORT_CTRL1_GMII_LOOPBACK_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_GMII_LOOPBACK_OFFS)
+
+#define MVPP2_PORT_CTRL1_PCS_LOOPBACK_OFFS 6
+#define MVPP2_PORT_CTRL1_PCS_LOOPBACK_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_PCS_LOOPBACK_OFFS)
+
+#define MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_OFFS 7
+#define MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_MASK \
+ (0x000000ff << MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_OFFS)
+
+#define MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS 15
+#define MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS)
+
+/* Port Mac Control2 */
+#define MVPP2_PORT_CTRL2_REG (0x0008)
+#define MVPP2_PORT_CTRL2_SGMII_MODE_OFFS 0
+#define MVPP2_PORT_CTRL2_SGMII_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_SGMII_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL2_FC_MODE_OFFS 1
+#define MVPP2_PORT_CTRL2_FC_MODE_MASK \
+ (0x00000003 << MVPP2_PORT_CTRL2_FC_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL2_PCS_EN_OFFS 3
+#define MVPP2_PORT_CTRL2_PCS_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PCS_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_RGMII_MODE_OFFS 4
+#define MVPP2_PORT_CTRL2_RGMII_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_RGMII_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL2_DIS_PADING_OFFS 5
+#define MVPP2_PORT_CTRL2_DIS_PADING_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_DIS_PADING_OFFS)
+
+#define MVPP2_PORT_CTRL2_PORTMACRESET_OFFS 6
+#define MVPP2_PORT_CTRL2_PORTMACRESET_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PORTMACRESET_OFFS)
+
+#define MVPP2_PORT_CTRL2_TX_DRAIN_OFFS 7
+#define MVPP2_PORT_CTRL2_TX_DRAIN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_TX_DRAIN_OFFS)
+
+#define MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_OFFS 8
+#define MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_OFFS)
+
+#define MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_OFFS 9
+#define MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_PRBS_CHECK_EN_OFFS 10
+#define MVPP2_PORT_CTRL2_PRBS_CHECK_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PRBS_CHECK_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_PRBS_GEN_EN_OFFS 11
+#define MVPP2_PORT_CTRL2_PRBS_GEN_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PRBS_GEN_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS 12
+#define MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_MASK \
+ (0x00000003 << MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS)
+
+#define MVPP2_PORT_CTRL2_EN_COL_ON_BP_OFFS 14
+#define MVPP2_PORT_CTRL2_EN_COL_ON_BP_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_EN_COL_ON_BP_OFFS)
+
+#define MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_OFFS 15
+#define MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_OFFS)
+
+/* Port Auto-negotiation Configuration */
+#define MVPP2_PORT_AUTO_NEG_CFG_REG (0x000c)
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS 0
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS 1
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS 2
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS 3
+#define MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS 4
+#define MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS 5
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS 6
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS 7
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS 9
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS 10
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS 11
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS 12
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS 13
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS 14
+#define MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS 15
+#define MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS)
+
+/* Port Status0 */
+#define MVPP2_PORT_STATUS0_REG (0x0010)
+#define MVPP2_PORT_STATUS0_LINKUP_OFFS 0
+#define MVPP2_PORT_STATUS0_LINKUP_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_LINKUP_OFFS)
+
+#define MVPP2_PORT_STATUS0_GMIISPEED_OFFS 1
+#define MVPP2_PORT_STATUS0_GMIISPEED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_GMIISPEED_OFFS)
+
+#define MVPP2_PORT_STATUS0_MIISPEED_OFFS 2
+#define MVPP2_PORT_STATUS0_MIISPEED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_MIISPEED_OFFS)
+
+#define MVPP2_PORT_STATUS0_FULLDX_OFFS 3
+#define MVPP2_PORT_STATUS0_FULLDX_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_FULLDX_OFFS)
+
+#define MVPP2_PORT_STATUS0_RXFCEN_OFFS 4
+#define MVPP2_PORT_STATUS0_RXFCEN_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_RXFCEN_OFFS)
+
+#define MVPP2_PORT_STATUS0_TXFCEN_OFFS 5
+#define MVPP2_PORT_STATUS0_TXFCEN_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_TXFCEN_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTRXPAUSE_OFFS 6
+#define MVPP2_PORT_STATUS0_PORTRXPAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTRXPAUSE_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTTXPAUSE_OFFS 7
+#define MVPP2_PORT_STATUS0_PORTTXPAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTTXPAUSE_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS 8
+#define MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTBUFFULL_OFFS 9
+#define MVPP2_PORT_STATUS0_PORTBUFFULL_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTBUFFULL_OFFS)
+
+#define MVPP2_PORT_STATUS0_SYNCFAIL10MS_OFFS 10
+#define MVPP2_PORT_STATUS0_SYNCFAIL10MS_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SYNCFAIL10MS_OFFS)
+
+#define MVPP2_PORT_STATUS0_ANDONE_OFFS 11
+#define MVPP2_PORT_STATUS0_ANDONE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_ANDONE_OFFS)
+
+#define MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS 12
+#define MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS)
+
+#define MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_OFFS 13
+#define MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_OFFS)
+
+#define MVPP2_PORT_STATUS0_SYNCOK_OFFS 14
+#define MVPP2_PORT_STATUS0_SYNCOK_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SYNCOK_OFFS)
+
+#define MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS 15
+#define MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS)
+
+/* Port Serial Parameters Configuration */
+#define MVPP2_PORT_SERIAL_PARAM_CFG_REG (0x0014)
+#define MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS 0
+#define MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS 1
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS 2
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS 3
+#define MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS 4
+#define MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS 5
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS 6
+#define MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_MASK \
+ (0x0000003f << MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS 12
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS 13
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS 14
+#define MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS 15
+#define MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS)
+
+/* Port Fifo Configuration 0 */
+#define MVPP2_PORT_FIFO_CFG_0_REG (0x0018)
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS 0
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_MASK \
+ (0x000000ff << MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS)
+
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS 8
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_MASK \
+ (0x000000ff << MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS)
+
+/* Port Fifo Configuration 1 */
+#define MVPP2_PORT_FIFO_CFG_1_REG (0x001c)
+#define MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS 0
+#define MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_MASK \
+ (0x0000003f << MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS)
+
+#define MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK \
+ (0x000000ff << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)
+
+#define MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS 15
+#define MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS)
+
+/* Port Serdes Configuration0 */
+#define MVPP2_PORT_SERDES_CFG0_REG (0x0028)
+#define MVPP2_PORT_SERDES_CFG0_SERDESRESET_OFFS 0
+#define MVPP2_PORT_SERDES_CFG0_SERDESRESET_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_SERDESRESET_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_TX_OFFS 1
+#define MVPP2_PORT_SERDES_CFG0_PU_TX_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_TX_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_RX_OFFS 2
+#define MVPP2_PORT_SERDES_CFG0_PU_RX_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_RX_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_PLL_OFFS 3
+#define MVPP2_PORT_SERDES_CFG0_PU_PLL_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_PLL_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_IVREF_OFFS 4
+#define MVPP2_PORT_SERDES_CFG0_PU_IVREF_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_IVREF_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_TESTEN_OFFS 5
+#define MVPP2_PORT_SERDES_CFG0_TESTEN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_TESTEN_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_DPHER_EN_OFFS 6
+#define MVPP2_PORT_SERDES_CFG0_DPHER_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_DPHER_EN_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS 7
+#define MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS 8
+#define MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS 9
+#define MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS 10
+#define MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS 11
+#define MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_TERM75_TX_OFFS 12
+#define MVPP2_PORT_SERDES_CFG0_TERM75_TX_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_TERM75_TX_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_OUTAMP_OFFS 13
+#define MVPP2_PORT_SERDES_CFG0_OUTAMP_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_OUTAMP_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS 14
+#define MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS 15
+#define MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS)
+
+/* Port Serdes Configuration1 */
+#define MVPP2_PORT_SERDES_CFG1_REG (0x002c)
+#define MVPP2_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS 0
+#define MVPP2_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS 1
+#define MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_MEN_OFFS 2
+#define MVPP2_GMAC_PORT_SERDES_CFG1_MEN_MASK \
+ (0x00000003 << MVPP2_GMAC_PORT_SERDES_CFG1_MEN_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_OFFS 4
+#define MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS 5
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS 6
+#define MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS 7
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS 8
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_MASK \
+ (0x0000001f << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS 13
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS 14
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS 15
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS)
+
+/* Port Serdes Configuration2 */
+#define MVPP2_PORT_SERDES_CFG2_REG (0x0030)
+#define MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS 0
+#define MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_MASK \
+ (0x0000ffff << MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS)
+
+/* Port Serdes Configuration3 */
+#define MVPP2_PORT_SERDES_CFG3_REG (0x0034)
+#define MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS 0
+#define MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_MASK \
+ (0x0000ffff << MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS)
+
+/* Port Prbs Status */
+#define MVPP2_PORT_PRBS_STATUS_REG (0x0038)
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS 0
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_MASK \
+ (0x00000001 << MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS)
+
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS 1
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_MASK \
+ (0x00000001 << MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS)
+
+/* Port Prbs Error Counter */
+#define MVPP2_PORT_PRBS_ERR_CNTR_REG (0x003c)
+#define MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS 0
+#define MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_MASK \
+ (0x0000ffff << MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS)
+
+/* Port Status1 */
+#define MVPP2_PORT_STATUS1_REG (0x0040)
+#define MVPP2_PORT_STATUS1_MEDIAACTIVE_OFFS 0
+#define MVPP2_PORT_STATUS1_MEDIAACTIVE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS1_MEDIAACTIVE_OFFS)
+
+/* Port Mib Counters Control */
+#define MVPP2_PORT_MIB_CNTRS_CTRL_REG (0x0044)
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS 0
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS 1
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS 2
+#define MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS 3
+#define MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS 5
+#define MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 6
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 7
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS)
+
+/* Port Mac Control3 */
+#define MVPP2_PORT_CTRL3_REG (0x0048)
+#define MVPP2_PORT_CTRL3_BUF_SIZE_OFFS 0
+#define MVPP2_PORT_CTRL3_BUF_SIZE_MASK \
+ (0x0000003f << MVPP2_PORT_CTRL3_BUF_SIZE_OFFS)
+
+#define MVPP2_PORT_CTRL3_IPG_DATA_OFFS 6
+#define MVPP2_PORT_CTRL3_IPG_DATA_MASK \
+ (0x000001ff << MVPP2_PORT_CTRL3_IPG_DATA_OFFS)
+
+#define MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS 15
+#define MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS)
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Port Mac Control4 */
+#define MVPP2_PORT_CTRL4_REG (0x0090)
+#define MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS 0
+#define MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS)
+
+#define MVPP2_PORT_CTRL4_PREAMBLE_FIX_OFFS 1
+#define MVPP2_PORT_CTRL4_PREAMBLE_FIX_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_PREAMBLE_FIX_OFFS)
+
+#define MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS 2
+#define MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS)
+
+#define MVPP2_PORT_CTRL4_FC_EN_RX_OFFS 3
+#define MVPP2_PORT_CTRL4_FC_EN_RX_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_FC_EN_RX_OFFS)
+
+#define MVPP2_PORT_CTRL4_FC_EN_TX_OFFS 4
+#define MVPP2_PORT_CTRL4_FC_EN_TX_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_FC_EN_TX_OFFS)
+
+#define MVPP2_PORT_CTRL4_DP_CLK_SEL_OFFS 5
+#define MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_DP_CLK_SEL_OFFS)
+
+#define MVPP2_PORT_CTRL4_SYNC_BYPASS_OFFS 6
+#define MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_SYNC_BYPASS_OFFS)
+
+#define MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS 7
+#define MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS)
+
+#define MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS 8
+#define MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS)
+
+#define MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_OFFS 9
+#define MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_OFFS)
+
+#define MVPP2_PORT_CTRL4_LEDS_NUMBER_OFFS 10
+#define MVPP2_PORT_CTRL4_LEDS_NUMBER_MASK \
+ (0x0000003f << MVPP2_PORT_CTRL4_LEDS_NUMBER_OFFS)
+
+/* XPCS registers */
+
+/* Global Configuration 0 */
+#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
+#define MVPP22_XPCS_PCSRESET BIT(0)
+#define MVPP22_XPCS_PCSMODE_OFFS 3
+#define MVPP22_XPCS_PCSMODE_MASK (0x3 << MVPP22_XPCS_PCSMODE_OFFS)
+#define MVPP22_XPCS_LANEACTIVE_OFFS 5
+#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << MVPP22_XPCS_LANEACTIVE_OFFS)
+
+/* MPCS registers */
+
+#define MVPP22_MPCS40G_COMMON_CONTROL 0x14
+#define MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK BIT(10)
+
+#define MVPP22_MPCS_CLOCK_RESET 0x14c
+#define MVPP22_MPCS_TX_SD_CLK_RESET_MASK BIT(0)
+#define MVPP22_MPCS_RX_SD_CLK_RESET_MASK BIT(1)
+#define MVPP22_MPCS_MAC_CLK_RESET_MASK BIT(2)
+#define MVPP22_MPCS_CLK_DIVISION_RATIO_OFFS 4
+#define MVPP22_MPCS_CLK_DIVISION_RATIO_MASK (0x7 << MVPP22_MPCS_CLK_DIVISION_RATIO_OFFS)
+#define MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT (0x1 << MVPP22_MPCS_CLK_DIVISION_RATIO_OFFS)
+#define MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK BIT(11)
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) (((index) < (q)->LastDesc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_TXDONE_COAL_USEC 1000
+#define MVPP2_RX_COAL_PKTS 32
+#define MVPP2_RX_COAL_USEC 64
+
+/*
+ * The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE 2
+#define MVPP2_ETH_TYPE_LEN 2
+#define MVPP2_PPPOE_HDR_SIZE 8
+#define MVPP2_VLAN_TAG_LEN 4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE 0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
+#define MVPP2_TX_CSUM_MAX_SIZE 9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
+
+#define MVPP2_TX_MTU_MAX 0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT 16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS 4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ 8
+
+/* Maximum number of RXQs used by single port */
+#define MVPP2_MAX_RXQ 8
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ 4
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD 64
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD 32
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK 64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE 256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE 32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
+#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+
+/* TX FIFO constants */
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
+#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
+#define MVPP2_TX_FIFO_THRESHOLD_10KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP2_TX_FIFO_THRESHOLD_3KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+
+#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE 16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK BIT(0)
+
+/* SD1 Control1 */
+#define SD1_CONTROL_1_REG (0x148)
+
+#define SD1_CONTROL_XAUI_EN_OFFSET 28
+#define SD1_CONTROL_XAUI_EN_MASK (0x1 << SD1_CONTROL_XAUI_EN_OFFSET)
+
+#define SD1_CONTROL_RXAUI0_L23_EN_OFFSET 27
+#define SD1_CONTROL_RXAUI0_L23_EN_MASK (0x1 << SD1_CONTROL_RXAUI0_L23_EN_OFFSET)
+
+#define SD1_CONTROL_RXAUI1_L45_EN_OFFSET 26
+#define SD1_CONTROL_RXAUI1_L45_EN_MASK (0x1 << SD1_CONTROL_RXAUI1_L45_EN_OFFSET)
+
+/* System Soft Reset 1 */
+#define MV_GOP_SOFT_RESET_1_REG (0x108)
+
+#define NETC_GOP_SOFT_RESET_OFFSET 6
+#define NETC_GOP_SOFT_RESET_MASK (0x1 << NETC_GOP_SOFT_RESET_OFFSET)
+
+/* Ports Control 0 */
+#define MV_NETCOMP_PORTS_CONTROL_0 (0x110)
+
+#define NETC_CLK_DIV_PHASE_OFFSET 31
+#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFSET)
+
+#define NETC_GIG_RX_DATA_SAMPLE_OFFSET 29
+#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << NETC_GIG_RX_DATA_SAMPLE_OFFSET)
+
+#define NETC_BUS_WIDTH_SELECT_OFFSET 1
+#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << NETC_BUS_WIDTH_SELECT_OFFSET)
+
+#define NETC_GOP_ENABLE_OFFSET 0
+#define NETC_GOP_ENABLE_MASK (0x1 << NETC_GOP_ENABLE_OFFSET)
+
+/* Ports Control 1 */
+#define MV_NETCOMP_PORTS_CONTROL_1 (0x114)
+
+#define NETC_PORT_GIG_RF_RESET_OFFSET(port) (28 + port)
+#define NETC_PORT_GIG_RF_RESET_MASK(port) (0x1 << NETC_PORT_GIG_RF_RESET_OFFSET(port))
+
+#define NETC_PORTS_ACTIVE_OFFSET(port) (0 + port)
+#define NETC_PORTS_ACTIVE_MASK(port) (0x1 << NETC_PORTS_ACTIVE_OFFSET(port))
+
+/* Ports Status */
+#define MV_NETCOMP_PORTS_STATUS (0x11C)
+#define NETC_PORTS_STATUS_OFFSET(port) (0 + port)
+#define NETC_PORTS_STATUS_MASK(port) (0x1 << NETC_PORTS_STATUS_OFFSET(port))
+
+/* Networking Complex Control 0 */
+#define MV_NETCOMP_CONTROL_0 (0x120)
+
+#define NETC_GBE_PORT1_MII_MODE_OFFSET 2
+#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << NETC_GBE_PORT1_MII_MODE_OFFSET)
+
+#define NETC_GBE_PORT1_SGMII_MODE_OFFSET 1
+#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << NETC_GBE_PORT1_SGMII_MODE_OFFSET)
+
+#define NETC_GBE_PORT0_SGMII_MODE_OFFSET 0
+#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << NETC_GBE_PORT0_SGMII_MODE_OFFSET)
+
+/* Port Mac Control0 */
+#define MV_XLG_PORT_MAC_CTRL0_REG ( 0x0000)
+#define MV_XLG_MAC_CTRL0_PORTEN_OFFS 0
+#define MV_XLG_MAC_CTRL0_PORTEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PORTEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_MACRESETN_OFFS 1
+#define MV_XLG_MAC_CTRL0_MACRESETN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_MACRESETN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS 2
+#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS 3
+#define MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS 5
+#define MV_XLG_MAC_CTRL0_TXIPGMODE_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXFCEN_OFFS 7
+#define MV_XLG_MAC_CTRL0_RXFCEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXFCEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_TXFCEN_OFFS 8
+#define MV_XLG_MAC_CTRL0_TXFCEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_TXFCEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS 9
+#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS 10
+#define MV_XLG_MAC_CTRL0_PERIODICXONEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS 11
+#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS 13
+#define MV_XLG_MAC_CTRL0_PADDINGDIS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS 14
+#define MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS 15
+#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS)
+
+/* Port Mac Control1 */
+#define MV_XLG_PORT_MAC_CTRL1_REG (0x0004)
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS 0
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK \
+ (0x00001fff << MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS)
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_DEFAULT 0x1400
+
+#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS 13
+#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS 14
+#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS 15
+#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS)
+
+/* Port Mac Control2 */
+#define MV_XLG_PORT_MAC_CTRL2_REG (0x0008)
+#define MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS 0
+#define MV_XLG_MAC_CTRL2_SALOW_7_0_MASK \
+ (0x000000ff << MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS)
+
+#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS 8
+#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS 9
+#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS)
+
+#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS 10
+#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS 13
+#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_FC_MODE_OFFS 14
+#define MV_XLG_MAC_CTRL2_FC_MODE_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL2_FC_MODE_OFFS)
+
+/* Port Status */
+#define MV_XLG_MAC_PORT_STATUS_REG (0x000c)
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS 0
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS 1
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS 2
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS 3
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS 4
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS 5
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS 6
+#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS 7
+#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS 8
+#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS)
+
+/* Port Fifos Thresholds Configuration */
+#define MV_XLG_PORT_FIFOS_THRS_CFG_REG (0x0010)
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS 0
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_MASK \
+ (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS)
+
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS 5
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_MASK \
+ (0x0000003f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS)
+
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS 11
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_MASK \
+ (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS)
+
+/* Port Mac Control3 */
+#define MV_XLG_PORT_MAC_CTRL3_REG (0x001c)
+#define MV_XLG_MAC_CTRL3_BUFSIZE_OFFS 0
+#define MV_XLG_MAC_CTRL3_BUFSIZE_MASK \
+ (0x0000003f << MV_XLG_MAC_CTRL3_BUFSIZE_OFFS)
+
+#define MV_XLG_MAC_CTRL3_XTRAIPG_OFFS 6
+#define MV_XLG_MAC_CTRL3_XTRAIPG_MASK \
+ (0x0000007f << MV_XLG_MAC_CTRL3_XTRAIPG_OFFS)
+
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS 13
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC \
+ (0x00000000 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_10G \
+ (0x00000001 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+
+/* Port Per Prio Flow Control Status */
+#define MV_XLG_PORT_PER_PRIO_FLOW_CTRL_STATUS_REG (0x0020)
+#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS 0
+#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS)
+
+/* Debug Bus Status */
+#define MV_XLG_DEBUG_BUS_STATUS_REG (0x0024)
+#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS 0
+#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_MASK \
+ (0x0000ffff << MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS)
+
+/* Port Metal Fix */
+#define MV_XLG_PORT_METAL_FIX_REG (0x002c)
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS 0
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS 1
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS 2
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS 3
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS 4
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS 5
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS 6
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS 7
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS 8
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS 9
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_MASK \
+ (0x0000000f << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS 13
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_MASK \
+ (0x00000007 << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS)
+
+/* Xg Mib Counters Control */
+#define MV_XLG_MIB_CNTRS_CTRL_REG (0x0030)
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS 0
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS 1
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS 2
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS 3
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS 5
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_MASK \
+ (0x0000003f << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 11
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 12
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS)
+
+/* Cn/ccfc Timer%i */
+#define MV_XLG_CNCCFC_TIMERI_REG(t) ((0x0038 + (t) * 4))
+#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS 0
+#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_MASK \
+ (0x0000ffff << MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS)
+
+/* Ppfc Control */
+#define MV_XLG_MAC_PPFC_CTRL_REG (0x0060)
+#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS 0
+#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_MASK \
+ (0x00000001 << MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS)
+
+#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS 9
+#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS)
+
+/* Fc Dsa Tag 0 */
+#define MV_XLG_MAC_FC_DSA_TAG_0_REG (0x0068)
+#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS)
+
+/* Fc Dsa Tag 1 */
+#define MV_XLG_MAC_FC_DSA_TAG_1_REG (0x006c)
+#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS)
+
+/* Fc Dsa Tag 2 */
+#define MV_XLG_MAC_FC_DSA_TAG_2_REG (0x0070)
+#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS)
+
+/* Fc Dsa Tag 3 */
+#define MV_XLG_MAC_FC_DSA_TAG_3_REG (0x0074)
+#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS)
+
+/* Dic Budget Compensation */
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_REG (0x0080)
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS 0
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_MASK \
+ (0x0000ffff << MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS)
+
+/* Port Mac Control4 */
+#define MV_XLG_PORT_MAC_CTRL4_REG (0x0084)
+#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS 0
+#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS)
+
+#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS 1
+#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS 2
+#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS 3
+#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS)
+
+#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS 4
+#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS 5
+#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS 6
+#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS 7
+#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_USE_XPCS_OFFS 8
+#define MV_XLG_MAC_CTRL4_USE_XPCS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_USE_XPCS_OFFS)
+
+#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS 9
+#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS 10
+#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS)
+
+#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS 12
+#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS)
+
+#define MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK 14
+#define MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK)
+
+/* Port Mac Control5 */
+#define MV_XLG_PORT_MAC_CTRL5_REG (0x0088)
+#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS 0
+#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_MASK \
+ (0x0000000f << MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS)
+
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS 4
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS)
+
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS 7
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS)
+
+#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS 10
+#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS)
+
+#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS 13
+#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS)
+
+/* External Control */
+#define MV_XLG_MAC_EXT_CTRL_REG (0x0090)
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS 0
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS 1
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS 2
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS 3
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS 4
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS 5
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS 6
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS 7
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS 8
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS 9
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS 10
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS 11
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS 12
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS 13
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS 14
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS 15
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS)
+
+/* Macro Control */
+#define MV_XLG_MAC_MACRO_CTRL_REG (0x0094)
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS 0
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS 1
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS 2
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS 3
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS 4
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS 5
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS 6
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS 7
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS 8
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS 9
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS 10
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS 11
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS 12
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS 13
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS 14
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS 15
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS)
+
+#define MV_XLG_MAC_DIC_PPM_IPG_REDUCE_REG (0x0094)
+
+/* Port Interrupt Cause */
+#define MV_XLG_INTERRUPT_CAUSE_REG (0x0014)
+/* Port Interrupt Mask */
+#define MV_XLG_INTERRUPT_MASK_REG (0x0018)
+#define MV_XLG_SUMMARY_INTERRUPT_OFFSET 0
+#define MV_XLG_SUMMARY_INTERRUPT_MASK \
+ (0x1 << MV_XLG_SUMMARY_INTERRUPT_OFFSET)
+#define MV_XLG_INTERRUPT_LINK_CHANGE_OFFS 1
+#define MV_XLG_INTERRUPT_LINK_CHANGE_MASK \
+ (0x1 << MV_XLG_INTERRUPT_LINK_CHANGE_OFFS)
+
+/* Port Interrupt Summary Cause */
+#define MV_XLG_EXTERNAL_INTERRUPT_CAUSE_REG (0x0058)
+/* Port Interrupt Summary Mask */
+#define MV_XLG_EXTERNAL_INTERRUPT_MASK_REG (0x005C)
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_OFFS 1
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK \
+ (0x1 << MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_OFFS)
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_OFFS 2
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK \
+ (0x1 << MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_OFFS)
+
+/*All PPV22 Addresses are 40-bit */
+#define MVPP22_ADDR_HIGH_SIZE 8
+#define MVPP22_ADDR_HIGH_MASK ((1<<MVPP22_ADDR_HIGH_SIZE) - 1)
+#define MVPP22_ADDR_MASK (0xFFFFFFFFFF)
+
+/* Desc addr shift */
+#define MVPP21_DESC_ADDR_SHIFT 0 /*Applies to RXQ, AGGR_TXQ*/
+#define MVPP22_DESC_ADDR_SHIFT 8 /*Applies to RXQ, AGGR_TXQ*/
+
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+
+/* PHY address register */
+#define MV_SMI_PHY_ADDRESS_REG(n) (0xC + 0x4 * (n))
+#define MV_SMI_PHY_ADDRESS_PHYAD_OFFS 0
+#define MV_SMI_PHY_ADDRESS_PHYAD_MASK \
+ (0x1F << MV_SMI_PHY_ADDRESS_PHYAD_OFFS)
+
+/* Marvell tag types */
+enum Mvpp2TagType {
+ MVPP2_TAG_TYPE_NONE = 0,
+ MVPP2_TAG_TYPE_MH = 1,
+ MVPP2_TAG_TYPE_DSA = 2,
+ MVPP2_TAG_TYPE_EDSA = 3,
+ MVPP2_TAG_TYPE_VLAN = 4,
+ MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_PRS_TCAM_WORDS 6
+#define MVPP2_PRS_SRAM_WORDS 4
+#define MVPP2_PRS_FLOW_ID_SIZE 64
+#define MVPP2_PRS_FLOW_ID_MASK 0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
+#define MVPP2_PRS_IPV4_HEAD 0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
+#define MVPP2_PRS_IPV4_MC 0xe0
+#define MVPP2_PRS_IPV4_MC_MASK 0xf0
+#define MVPP2_PRS_IPV4_BC_MASK 0xff
+#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MASK 0xf
+#define MVPP2_PRS_IPV6_MC 0xff
+#define MVPP2_PRS_IPV6_MC_MASK 0xff
+#define MVPP2_PRS_IPV6_HOP_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX 100
+
+/*
+ * Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_PORT_MASK 0xff
+#define MVPP2_PRS_LU_MASK 0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs) (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) (((offs) * 2) - ((offs) % 2) + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE 16
+#define MVPP2_PRS_TCAM_PORT_BYTE 17
+#define MVPP2_PRS_TCAM_LU_BYTE 20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD 5
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL 0
+#define MVPP2_PE_FIRST_FREE_TID 1
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+/*
+ * Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS 0
+#define MVPP2_PRS_SRAM_RI_WORD 0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_SHIFT_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_OFFS 73
+#define MVPP2_PRS_SRAM_UDF_BITS 8
+#define MVPP2_PRS_SRAM_UDF_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
+#define MVPP2_PRS_SRAM_AI_OFFS 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
+#define MVPP2_PRS_SRAM_AI_MASK 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
+#define MVPP2_PRS_RI_DSA_MASK 0x2
+#define MVPP2_PRS_RI_VLAN_MASK 0xc
+#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
+#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_MCAST BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK 0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
+#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_IP4 BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6 BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
+#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_MCAST BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_UDF3_MASK 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
+#define MVPP2_PRS_RI_L4_TCP BIT(22)
+#define MVPP2_PRS_RI_L4_UDP BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI 0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED 1
+#define MVPP2_PRS_UNTAGGED 0
+#define MVPP2_PRS_EDSA 1
+#define MVPP2_PRS_DSA 0
+
+/* MAC entries, shadow udf */
+enum Mvpp2PrsUdf {
+ MVPP2_PRS_UDF_MAC_DEF,
+ MVPP2_PRS_UDF_MAC_RANGE,
+ MVPP2_PRS_UDF_L2_DEF,
+ MVPP2_PRS_UDF_L2_DEF_COPY,
+ MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum Mvpp2PrsLookup {
+ MVPP2_PRS_LU_MH,
+ MVPP2_PRS_LU_MAC,
+ MVPP2_PRS_LU_DSA,
+ MVPP2_PRS_LU_VLAN,
+ MVPP2_PRS_LU_L2,
+ MVPP2_PRS_LU_PPPOE,
+ MVPP2_PRS_LU_IP4,
+ MVPP2_PRS_LU_IP6,
+ MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum Mvpp2PrsL3Cast {
+ MVPP2_PRS_L3_UNI_CAST,
+ MVPP2_PRS_L3_MULTI_CAST,
+ MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE 512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_LKP_TBL_SIZE 64
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS 8
+#define MVPP2_BM_COOKIE_CPU_OFFS 24
+
+/*
+ * The MVPP2_TX_DESC and MVPP2_RX_DESC structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+#define MVPP2_TXD_L3_OFF_SHIFT 0
+#define MVPP2_TXD_IP_HLEN_SHIFT 8
+#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE BIT(23)
+#define MVPP2_TXD_L4_UDP BIT(24)
+#define MVPP2_TXD_L3_IP6 BIT(26)
+#define MVPP2_TXD_L_DESC BIT(28)
+#define MVPP2_TXD_F_DESC BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_OVERRUN BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS 16
+#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
+#define MVPP2_RXD_L4_TCP BIT(25)
+#define MVPP2_RXD_L4_UDP BIT(26)
+#define MVPP2_RXD_L3_IP4 BIT(28)
+#define MVPP2_RXD_L3_IP6 BIT(30)
+#define MVPP2_RXD_BUF_HDR BIT(31)
+
+struct mvpp2_tx_desc {
+ uint32_t command; /* Options used by HW for packet transmitting.*/
+ uint8_t packet_offset; /* the offset from the buffer beginning */
+ uint8_t phys_txq; /* destination queue ID */
+ uint16_t data_size; /* data size of transmitted packet in bytes */
+ uint64_t rsrvd_hw_cmd1; /* HwCmd (BM, PON, PNC) */
+ uint64_t buf_phys_addr_hw_cmd2;
+ uint64_t buf_cookie_bm_qset_hw_cmd3;
+};
+
+struct mvpp2_rx_desc {
+ uint32_t status; /* info about received packet */
+ uint16_t reserved1; /* ParserInfo (for future use, PnC) */
+ uint16_t data_size; /* size of received packet in bytes */
+ uint16_t rsrvd_gem; /* GemPortId (for future use, PON) */
+ uint16_t rsrvd_l4_csum; /* CsumL4 (for future use, PnC) */
+ uint32_t rsrvd_timestamp;
+ uint64_t buf_phys_addr_key_hash;
+ uint64_t buf_cookie_bm_qset_cls_info;
+};
+
+union mvpp2_prs_tcam_entry {
+ uint32_t word[MVPP2_PRS_TCAM_WORDS];
+ uint8_t byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mvpp2_prs_sram_entry {
+ uint32_t word[MVPP2_PRS_SRAM_WORDS];
+ uint8_t byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mvpp2_prs_entry {
+ uint32_t index;
+ union mvpp2_prs_tcam_entry tcam;
+ union mvpp2_prs_sram_entry sram;
+};
+
+struct mvpp2_prs_shadow {
+ int valid;
+ int finish;
+
+ /* Lookup ID */
+ int32_t lu;
+
+ /* User defined offset */
+ int32_t udf;
+
+ /* Result info */
+ uint32_t ri;
+ uint32_t ri_mask;
+};
+
+struct mvpp2_cls_flow_entry {
+ uint32_t index;
+ uint32_t data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lookup_entry {
+ uint32_t lkpid;
+ uint32_t way;
+ uint32_t data;
+};
+
+typedef struct {
+ uint32_t NextBuffPhysAddr;
+ uint32_t NextBuffVirtAddr;
+ uint16_t ByteCount;
+ uint16_t info;
+ uint8_t reserved1; /* BmQset (for future use, BM) */
+} MVPP2_BUFF_HDR;
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* SerDes */
+#define MVPP2_SFI_LANE_COUNT 1
+
+/* Net Complex */
+enum MvNetcTopology {
+ MV_NETC_GE_MAC0_RXAUI_L23 = BIT(0),
+ MV_NETC_GE_MAC0_RXAUI_L45 = BIT(1),
+ MV_NETC_GE_MAC0_XAUI = BIT(2),
+ MV_NETC_GE_MAC2_SGMII = BIT(3),
+ MV_NETC_GE_MAC3_SGMII = BIT(4),
+ MV_NETC_GE_MAC3_RGMII = BIT(5),
+};
+
+enum MvNetcPhase {
+ MV_NETC_FIRST_PHASE,
+ MV_NETC_SECOND_PHASE,
+};
+
+enum MvNetcSgmiiXmiMode {
+ MV_NETC_GBE_SGMII,
+ MV_NETC_GBE_XMII,
+};
+
+enum MvNetcMiiMode {
+ MV_NETC_GBE_RGMII,
+ MV_NETC_GBE_MII,
+};
+
+enum MvNetcLanes {
+ MV_NETC_LANE_23,
+ MV_NETC_LANE_45,
+};
+
+/* Port related */
+enum MvReset {
+ RESET,
+ UNRESET
+};
+
+enum Mvpp2Command {
+ MVPP2_START, /* Start */
+ MVPP2_STOP, /* Stop */
+ MVPP2_PAUSE, /* Pause */
+ MVPP2_RESTART /* Restart */
+};
+
+enum MvPortDuplex {
+ MV_PORT_DUPLEX_AN,
+ MV_PORT_DUPLEX_HALF,
+ MV_PORT_DUPLEX_FULL
+};
+
+/* L2 and L3 protocol macros */
+#define MV_IPPR_TCP 0
+#define MV_IPPR_UDP 1
+#define MV_IPPR_IPIP 2
+#define MV_IPPR_ICMPV6 3
+#define MV_IPPR_IGMP 4
+#define MV_ETH_P_IP 5
+#define MV_ETH_P_IPV6 6
+#define MV_ETH_P_PPP_SES 7
+#define MV_ETH_P_ARP 8
+#define MV_ETH_P_8021Q 9
+#define MV_ETH_P_8021AD 10
+#define MV_ETH_P_EDSA 11
+#define MV_PPP_IP 12
+#define MV_PPP_IPV6 13
+
+#endif /* __MVPP2_LIB_HW__ */