summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Matthew <jmatthew@cvs.openbsd.org>2020-03-02 01:59:02 +0000
committerJonathan Matthew <jmatthew@cvs.openbsd.org>2020-03-02 01:59:02 +0000
commitf8c560f2e0e90c20af00417f6da165e071e2aac5 (patch)
tree03cd277af8e28230f0172b10e64a880b12cb8544
parent83eeb1348bb81b380a99b5f9bd957a7d4bd60a7a (diff)
Update ix(4) from freebsd to add support for X553 controllers.
Tested on 82599 (sfp+) and X540 (baseT) by me and Hrvoje Popovski, and on X553 by sthen@ and abieber@, and possibly more via snapshots ok sthen@ mikeb@
-rw-r--r--sys/dev/pci/if_ix.c217
-rw-r--r--sys/dev/pci/if_ix.h4
-rw-r--r--sys/dev/pci/ixgbe.c800
-rw-r--r--sys/dev/pci/ixgbe.h40
-rw-r--r--sys/dev/pci/ixgbe_82598.c41
-rw-r--r--sys/dev/pci/ixgbe_82599.c37
-rw-r--r--sys/dev/pci/ixgbe_phy.c506
-rw-r--r--sys/dev/pci/ixgbe_type.h936
-rw-r--r--sys/dev/pci/ixgbe_x540.c179
-rw-r--r--sys/dev/pci/ixgbe_x550.c2440
10 files changed, 4055 insertions, 1145 deletions
diff --git a/sys/dev/pci/if_ix.c b/sys/dev/pci/if_ix.c
index 8ac16db5102..ef4bff865d1 100644
--- a/sys/dev/pci/if_ix.c
+++ b/sys/dev/pci/if_ix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.c,v 1.160 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: if_ix.c,v 1.161 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
@@ -85,6 +85,15 @@ const struct pci_matchid ixgbe_devices[] = {
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR_L },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP_N },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII_L },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_10G_T },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T_L }
};
/*********************************************************************
@@ -158,7 +167,6 @@ uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
void ixgbe_setup_vlan_hw_support(struct ix_softc *);
/* Support for pluggable optic modules */
-void ixgbe_setup_optics(struct ix_softc *);
void ixgbe_handle_mod(struct ix_softc *);
void ixgbe_handle_msf(struct ix_softc *);
void ixgbe_handle_phy(struct ix_softc *);
@@ -281,9 +289,6 @@ ixgbe_attach(struct device *parent, struct device *self, void *aux)
goto err_late;
}
- /* Detect and set physical type */
- ixgbe_setup_optics(sc);
-
bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
IXGBE_ETH_LENGTH_OF_ADDRESS);
@@ -865,8 +870,8 @@ ixgbe_config_gpie(struct ix_softc *sc)
}
if (sc->hw.mac.type == ixgbe_mac_X540 ||
- hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
- hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ sc->hw.mac.type == ixgbe_mac_X550EM_x ||
+ sc->hw.mac.type == ixgbe_mac_X550EM_a) {
/*
* Thermal Failure Detection (X540)
* Link Detection (X552 SFP+, X552/X557-AT)
@@ -906,6 +911,7 @@ ixgbe_config_delay_values(struct ix_softc *sc)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
tmp = IXGBE_DV_X540(frame, frame);
break;
default:
@@ -921,6 +927,7 @@ ixgbe_config_delay_values(struct ix_softc *sc)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
tmp = IXGBE_LOW_DV_X540(frame);
break;
default:
@@ -1101,6 +1108,7 @@ void
ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
{
struct ix_softc *sc = ifp->if_softc;
+ uint64_t layer;
ifmr->ifm_active = IFM_ETHER;
ifmr->ifm_status = IFM_AVALID;
@@ -1108,47 +1116,110 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
INIT_DEBUGOUT("ixgbe_media_status: begin");
ixgbe_update_link_status(sc);
- if (LINK_STATE_IS_UP(ifp->if_link_state)) {
- ifmr->ifm_status |= IFM_ACTIVE;
+ if (!LINK_STATE_IS_UP(ifp->if_link_state))
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ layer = sc->phy_layer;
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
+ layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
+ layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
+ layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
switch (sc->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_100_FULL:
ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_10_FULL:
+ ifmr->ifm_active |= IFM_10_T | IFM_FDX;
+ break;
+ }
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
+ layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
+ switch (sc->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_SFP_CU | IFM_FDX;
+ break;
+ }
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
+ switch (sc->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
- switch (sc->optics) {
- case IFM_10G_SR: /* multi-speed fiber */
- ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
- break;
- case IFM_10G_LR: /* multi-speed fiber */
- ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
- break;
- default:
- ifmr->ifm_active |= sc->optics | IFM_FDX;
- break;
- }
+ ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
break;
+ }
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
+ layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
+ switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- ifmr->ifm_active |= sc->optics | IFM_FDX;
+ ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
break;
}
-
- switch (sc->hw.fc.current_mode) {
- case ixgbe_fc_tx_pause:
- ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
+ switch (sc->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
break;
- case ixgbe_fc_rx_pause:
- ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
+ }
+ }
+ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
+ switch (sc->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
break;
- case ixgbe_fc_full:
- ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
- IFM_ETH_TXPAUSE;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
break;
- default:
- ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
- IFM_ETH_TXPAUSE);
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
break;
}
+ } else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
+ layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
+ layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
+ switch (sc->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
+ break;
+ }
+ }
+
+ switch (sc->hw.fc.current_mode) {
+ case ixgbe_fc_tx_pause:
+ ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
+ break;
+ case ixgbe_fc_full:
+ ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
+ IFM_ETH_TXPAUSE;
+ break;
+ default:
+ ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
+ IFM_ETH_TXPAUSE);
+ break;
}
}
@@ -1178,23 +1249,37 @@ ixgbe_media_change(struct ifnet *ifp)
case IFM_AUTO:
case IFM_10G_T:
speed |= IXGBE_LINK_SPEED_100_FULL;
- case IFM_10G_SR: /* KR, too */
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IFM_10G_SR:
+ case IFM_10G_KR:
case IFM_10G_LR:
- case IFM_10G_CX4: /* KX4 */
+ case IFM_10G_LRM:
+ case IFM_10G_CX4:
+ case IFM_10G_KX4:
speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ break;
case IFM_10G_SFP_CU:
speed |= IXGBE_LINK_SPEED_10GB_FULL;
break;
case IFM_1000_T:
speed |= IXGBE_LINK_SPEED_100_FULL;
+ speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ break;
case IFM_1000_LX:
case IFM_1000_SX:
- case IFM_1000_CX: /* KX */
+ case IFM_1000_CX:
+ case IFM_1000_KX:
speed |= IXGBE_LINK_SPEED_1GB_FULL;
break;
case IFM_100_TX:
speed |= IXGBE_LINK_SPEED_100_FULL;
break;
+ case IFM_10_T:
+ speed |= IXGBE_LINK_SPEED_10_FULL;
+ break;
default:
return (EINVAL);
}
@@ -1512,44 +1597,6 @@ ixgbe_identify_hardware(struct ix_softc *sc)
/*********************************************************************
*
- * Determine optic type
- *
- **********************************************************************/
-void
-ixgbe_setup_optics(struct ix_softc *sc)
-{
- struct ixgbe_hw *hw = &sc->hw;
- int layer;
-
- layer = hw->mac.ops.get_supported_physical_layer(hw);
-
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
- sc->optics = IFM_10G_T;
- else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
- sc->optics = IFM_1000_T;
- else if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
- sc->optics = IFM_100_TX;
- else if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
- layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
- sc->optics = IFM_10G_SFP_CU;
- else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR ||
- layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
- sc->optics = IFM_10G_LR;
- else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
- sc->optics = IFM_10G_SR;
- else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
- layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
- sc->optics = IFM_10G_CX4;
- else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
- sc->optics = IFM_1000_SX;
- else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
- sc->optics = IFM_1000_LX;
- else
- sc->optics = IFM_AUTO;
-}
-
-/*********************************************************************
- *
* Setup the Legacy or MSI Interrupt handler
*
**********************************************************************/
@@ -1699,9 +1746,10 @@ void
ixgbe_add_media_types(struct ix_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- int layer;
+ uint64_t layer;
- layer = hw->mac.ops.get_supported_physical_layer(hw);
+ sc->phy_layer = hw->mac.ops.get_supported_physical_layer(hw);
+ layer = sc->phy_layer;
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
@@ -1728,11 +1776,13 @@ ixgbe_add_media_types(struct ix_softc *sc)
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
- ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
- ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
- ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
if (hw->device_id == IXGBE_DEV_ID_82598AT) {
ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
@@ -2682,10 +2732,10 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum &= ~IXGBE_RXCSUM_PCSD;
+ ixgbe_initialize_rss_mapping(sc);
+
/* Setup RSS */
if (sc->num_queues > 1) {
- ixgbe_initialize_rss_mapping(sc);
-
/* RSS and RX IPP Checksum are mutually exclusive */
rxcsum |= IXGBE_RXCSUM_PCSD;
}
@@ -2716,6 +2766,7 @@ ixgbe_initialize_rss_mapping(struct ix_softc *sc)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
table_size = 512;
break;
default:
@@ -3061,6 +3112,7 @@ ixgbe_enable_intr(struct ix_softc *sc)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
mask |= IXGBE_EIMS_ECC;
/* MAC thermal sensor is automatically enabled */
mask |= IXGBE_EIMS_TS;
@@ -3183,6 +3235,7 @@ ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -3251,8 +3304,6 @@ ixgbe_handle_mod(struct ix_softc *sc)
sc->dev.dv_xname);
return;
}
- /* Set the optics type so system reports correctly */
- ixgbe_setup_optics(sc);
ixgbe_handle_msf(sc);
}
diff --git a/sys/dev/pci/if_ix.h b/sys/dev/pci/if_ix.h
index 27e772143dd..e50cf217476 100644
--- a/sys/dev/pci/if_ix.h
+++ b/sys/dev/pci/if_ix.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.h,v 1.36 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: if_ix.h,v 1.37 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
@@ -244,7 +244,7 @@ struct ix_softc {
uint32_t shadow_vfta[IXGBE_VFTA_SIZE];
/* Info about the interface */
- uint64_t optics;
+ uint64_t phy_layer;
uint32_t fc; /* local flow ctrl setting */
uint16_t max_frame_size;
uint16_t num_segs;
diff --git a/sys/dev/pci/ixgbe.c b/sys/dev/pci/ixgbe.c
index d34519a8d06..3ac139f6023 100644
--- a/sys/dev/pci/ixgbe.c
+++ b/sys/dev/pci/ixgbe.c
@@ -1,8 +1,9 @@
-/* $OpenBSD: ixgbe.c,v 1.25 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe.c,v 1.26 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,10 +33,11 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 299200 2016-05-06 22:54:56Z pfg $*/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 299200 2016-05-06 22:54:56Z pfg $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_common.c 326022 2017-11-20 19:36:21Z pfg $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 326022 2017-11-20 19:36:21Z pfg $*/
#include <dev/pci/ixgbe.h>
+#include <dev/pci/ixgbe_type.h>
#ifdef __sparc64__
#include <dev/ofw/openfirm.h>
@@ -61,15 +63,10 @@ int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
-int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg,
- uint32_t lp_reg, uint32_t adv_sym, uint32_t adv_asm,
- uint32_t lp_sym, uint32_t lp_asm);
int32_t prot_autoc_read_generic(struct ixgbe_hw *, bool *, uint32_t *);
int32_t prot_autoc_write_generic(struct ixgbe_hw *, uint32_t, bool);
-int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
-
/* MBX */
int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
@@ -85,6 +82,27 @@ int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
uint16_t vf_number);
+#define IXGBE_EMPTY_PARAM
+
+static const uint32_t ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM)
+};
+
+static const uint32_t ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X540)
+};
+
+static const uint32_t ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550)
+};
+
+static const uint32_t ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550EM_x)
+};
+
+static const uint32_t ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(_X550EM_a)
+};
/**
* ixgbe_init_ops_generic - Inits function ptrs
@@ -96,7 +114,7 @@ int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
struct ixgbe_mac_info *mac = &hw->mac;
- uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ uint32_t eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
DEBUGFUNC("ixgbe_init_ops_generic");
@@ -148,6 +166,7 @@ int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.disable_mc = ixgbe_disable_mc_generic;
mac->ops.clear_vfta = NULL;
mac->ops.set_vfta = NULL;
+ mac->ops.set_vlvf = NULL;
mac->ops.init_uta_tables = NULL;
mac->ops.enable_rx = ixgbe_enable_rx_generic;
mac->ops.disable_rx = ixgbe_disable_rx_generic;
@@ -155,11 +174,15 @@ int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
/* Flow Control */
mac->ops.fc_enable = ixgbe_fc_enable_generic;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg;
/* Link */
mac->ops.get_link_capabilities = NULL;
mac->ops.setup_link = NULL;
mac->ops.check_link = NULL;
+ mac->ops.dmac_config = NULL;
+ mac->ops.dmac_update_tcs = NULL;
+ mac->ops.dmac_config_tcs = NULL;
return IXGBE_SUCCESS;
}
@@ -185,16 +208,30 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
- hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
- /* if link is down, assume supported */
- if (link_up)
- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ /* flow control autoneg black list */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ supported = FALSE;
+ break;
+ default:
+ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
TRUE : FALSE;
- else
- supported = TRUE;
+ else
+ supported = TRUE;
+ }
+
break;
case ixgbe_media_type_backplane:
- supported = TRUE;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
+ supported = FALSE;
+ else
+ supported = TRUE;
break;
case ixgbe_media_type_copper:
/* only some copper devices support flow control autoneg */
@@ -206,6 +243,9 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
supported = TRUE;
break;
default:
@@ -391,8 +431,9 @@ out:
**/
int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
- int32_t ret_val = IXGBE_SUCCESS;
+ int32_t ret_val;
uint32_t ctrl_ext;
+ uint16_t device_caps;
DEBUGFUNC("ixgbe_start_hw_generic");
@@ -416,15 +457,32 @@ int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
if (hw->mac.ops.setup_fc) {
ret_val = hw->mac.ops.setup_fc(hw);
- if (ret_val != IXGBE_SUCCESS)
- goto out;
+ if (ret_val != IXGBE_SUCCESS) {
+ DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
+ return ret_val;
+ }
+ }
+
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ hw->need_crosstalk_fix = FALSE;
+ else
+ hw->need_crosstalk_fix = TRUE;
+ break;
+ default:
+ hw->need_crosstalk_fix = FALSE;
+ break;
}
/* Clear adapter stopped flag */
hw->adapter_stopped = FALSE;
-out:
- return ret_val;
+ return IXGBE_SUCCESS;
}
/**
@@ -485,11 +543,14 @@ int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw)
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
- if (status == IXGBE_SUCCESS) {
+ if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
/* Start the HW */
status = hw->mac.ops.start_hw(hw);
}
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
+
return status;
}
@@ -656,7 +717,8 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw,
{
struct ixgbe_mac_info *mac = &hw->mac;
- hw->bus.type = ixgbe_bus_type_pci_express;
+ if (hw->bus.type == ixgbe_bus_type_unknown)
+ hw->bus.type = ixgbe_bus_type_pci_express;
switch (link_status & IXGBE_PCI_LINK_WIDTH) {
case IXGBE_PCI_LINK_WIDTH_1:
@@ -719,13 +781,15 @@ int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
* ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
* @hw: pointer to the HW structure
*
- * Determines the LAN function id by reading memory-mapped registers
- * and swaps the port value if requested.
+ * Determines the LAN function id by reading memory-mapped registers and swaps
+ * the port value if requested, and set MAC instance for devices that share
+ * CS4227.
**/
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
uint32_t reg;
+ uint16_t ee_ctrl_4;
DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
@@ -734,9 +798,16 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
bus->lan_id = bus->func;
/* check for a port swap */
- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -804,6 +875,9 @@ int32_t ixgbe_led_on_generic(struct ixgbe_hw *hw, uint32_t index)
DEBUGFUNC("ixgbe_led_on_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
@@ -824,6 +898,9 @@ int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index)
DEBUGFUNC("ixgbe_led_off_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
@@ -860,7 +937,7 @@ int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* Check for EEPROM present first.
* If not present leave as none
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (eec & IXGBE_EEC_PRES) {
eeprom->type = ixgbe_eeprom_spi;
@@ -1272,14 +1349,14 @@ int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
status = IXGBE_ERR_SWFW_SYNC;
if (status == IXGBE_SUCCESS) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
/* Request EEPROM Access */
eec |= IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (eec & IXGBE_EEC_GNT)
break;
usec_delay(5);
@@ -1288,7 +1365,7 @@ int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Release if grant not acquired */
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
DEBUGOUT("Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1299,7 +1376,7 @@ int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
if (status == IXGBE_SUCCESS) {
/* Clear CS and SK */
eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -1329,7 +1406,7 @@ int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = IXGBE_SUCCESS;
break;
@@ -1354,7 +1431,7 @@ int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
status = IXGBE_SUCCESS;
}
@@ -1362,17 +1439,17 @@ int32_t ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the SWESMBI bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
/* Set the SW EEPROM semaphore bit to request access */
swsm |= IXGBE_SWSM_SWESMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
/*
* If we set the bit successfully then we got the
* semaphore.
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (swsm & IXGBE_SWSM_SWESMBI)
break;
@@ -1469,15 +1546,15 @@ void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_standby_eeprom");
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
/* Toggle CS to flush commands */
eec |= IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
eec &= ~IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -1497,7 +1574,7 @@ void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
/*
* Mask is used to shift "count" bits of "data" out to the EEPROM
@@ -1518,7 +1595,7 @@ void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
else
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
@@ -1535,13 +1612,14 @@ void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, uint16_t data,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
}
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
+ * @count: number of bits to shift
**/
uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
{
@@ -1558,7 +1636,7 @@ uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
* the value of the "DO" bit. During this "shifting in" process the
* "DI" bit should always be clear.
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
@@ -1566,7 +1644,7 @@ uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count)
data = data << 1;
ixgbe_raise_eeprom_clk(hw, &eec);
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eec &= ~(IXGBE_EEC_DI);
if (eec & IXGBE_EEC_DO)
@@ -1592,7 +1670,7 @@ void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
* (setting the SK bit), then delay
*/
*eec = *eec | IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -1600,7 +1678,7 @@ void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
- * @eecd: EECD's current value
+ * @eec: EEC's current value
**/
void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
{
@@ -1611,7 +1689,7 @@ void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec)
* delay
*/
*eec = *eec & ~IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
}
@@ -1626,19 +1704,19 @@ void ixgbe_release_eeprom(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_release_eeprom");
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eec |= IXGBE_EEC_CS; /* Pull CS high */
eec &= ~IXGBE_EEC_SK; /* Lower SCK */
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
usec_delay(1);
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1950,10 +2028,11 @@ int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw->mac.addr[4], hw->mac.addr[5]);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- /* clear VMDq pool/queue selection for RAR 0 */
- hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
hw->addr_ctrl.overflow_promisc = 0;
hw->addr_ctrl.rar_used_count = 1;
@@ -1982,6 +2061,7 @@ int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* ixgbe_add_uc_addr - Adds a secondary unicast address.
* @hw: pointer to hardware structure
* @addr: new address
+ * @vmdq: VMDq "set" or "pool" index
*
* Adds it to unused receive address register or goes into promiscuous mode.
**/
@@ -2056,7 +2136,7 @@ int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr)
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
- * @hash_value: Multicast address hash value
+ * @mc_addr: Multicast address
*
* Sets the bit-vector in the multicast table.
**/
@@ -2212,7 +2292,7 @@ int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
+ hw->mac.ops.fc_autoneg(hw);
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
@@ -2596,6 +2676,7 @@ int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
int32_t status = IXGBE_SUCCESS;
uint32_t i, poll;
+ uint16_t value;
DEBUGFUNC("ixgbe_disable_pcie_master");
@@ -2603,7 +2684,8 @@ int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Exit if master requests are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ IXGBE_REMOVED(hw->hw_addr))
goto out;
/* Poll for master request bit to clear */
@@ -2634,8 +2716,10 @@ int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
poll = ixgbe_pcie_timeout_poll(hw);
for (i = 0; i < poll; i++) {
usec_delay(100);
- if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
- IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
+ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -2758,6 +2842,7 @@ int32_t ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
/**
* prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
* @hw: pointer to hardware structure
+ * @locked: bool to indicate whether the SW/FW lock was taken
* @reg_val: Value we read from AUTOC
*
* The default case requires no protection so just to the register read.
@@ -2794,7 +2879,7 @@ int32_t prot_autoc_write_generic(struct ixgbe_hw *hw, uint32_t reg_val,
**/
int32_t ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
{
- int secrxreg;
+ uint32_t secrxreg;
DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
@@ -2841,6 +2926,9 @@ int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
DEBUGFUNC("ixgbe_blink_led_start_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/*
* Link must be up to auto-blink the LEDs;
* Force it if link is down.
@@ -2886,6 +2974,9 @@ int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index)
DEBUGFUNC("ixgbe_blink_led_stop_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val != IXGBE_SUCCESS)
goto out;
@@ -2929,6 +3020,7 @@ uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -2938,6 +3030,8 @@ uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ msix_count = 0;
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
@@ -3041,6 +3135,9 @@ int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmd
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto done;
+
if (!mpsar_lo && !mpsar_hi)
goto done;
@@ -3121,72 +3218,72 @@ int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if
+ * vlanid not found
+ *
*
* return the VLVF index where this VLAN id should be placed
*
**/
-int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
+int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan, bool vlvf_bypass)
{
- uint32_t bits = 0;
- uint32_t first_empty_slot = 0;
- int32_t regindex;
+ int32_t regindex, first_empty_slot;
+ uint32_t bits;
/* short cut the special case */
if (vlan == 0)
return 0;
- /*
- * Search for the vlan id in the VLVF entries. Save off the first empty
- * slot found along the way
- */
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ /* if vlvf_bypass is set we don't want to use an empty slot, we
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+
+ /* add VLAN enable bit for comparison */
+ vlan |= IXGBE_VLVF_VIEN;
+
+ /* Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way.
+ *
+ * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
+ */
+ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !(first_empty_slot))
+ if (bits == vlan)
+ return regindex;
+ if (!first_empty_slot && !bits)
first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
}
- /*
- * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
- * in the VLVF. Else use the first empty VLVF register for this
- * vlan id.
- */
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
- "No space in VLVF.\n");
- regindex = IXGBE_ERR_NO_SPACE;
- }
- }
+ /* If we are here then we didn't find the VLAN. Return first empty
+ * slot we found during our search, else error.
+ */
+ if (!first_empty_slot)
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
- return regindex;
+ return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
}
/**
* ixgbe_set_vfta_generic - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
- bool vlan_on)
+ bool vlan_on, bool vlvf_bypass)
{
- int32_t regindex;
- uint32_t bitindex;
- uint32_t vfta;
- uint32_t targetbit;
- int32_t ret_val = IXGBE_SUCCESS;
- bool vfta_changed = FALSE;
+ uint32_t regidx, vfta_delta, vfta;
+ int32_t ret_val;
DEBUGFUNC("ixgbe_set_vfta_generic");
- if (vlan > 4095)
+ if (vlan > 4095 || vind > 63)
return IXGBE_ERR_PARAM;
/*
@@ -3201,33 +3298,33 @@ int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- targetbit = (1 << bitindex);
- vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-
- if (vlan_on) {
- if (!(vfta & targetbit)) {
- vfta |= targetbit;
- vfta_changed = TRUE;
- }
- } else {
- if ((vfta & targetbit)) {
- vfta &= ~targetbit;
- vfta_changed = TRUE;
- }
- }
+ regidx = vlan / 32;
+ vfta_delta = 1 << (vlan % 32);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
+
+ /*
+ * vfta_delta represents the difference between the current value
+ * of vfta and the value we want in the register. Since the diff
+ * is an XOR mask we can just update the vfta using an XOR
+ */
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
/* Part 2
* Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
*/
- ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
- &vfta_changed);
- if (ret_val != IXGBE_SUCCESS)
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
+ vfta, vlvf_bypass);
+ if (ret_val != IXGBE_SUCCESS) {
+ if (vlvf_bypass)
+ goto vfta_update;
return ret_val;
+ }
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+vfta_update:
+ /* Update VFTA now that we are ready for traffic */
+ if (vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
return IXGBE_SUCCESS;
}
@@ -3236,21 +3333,25 @@ int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
* ixgbe_set_vlvf_generic - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified bit in VLVF table.
**/
int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
- bool vlan_on, bool *vfta_changed)
+ bool vlan_on, uint32_t *vfta_delta, uint32_t vfta,
+ bool vlvf_bypass)
{
- uint32_t vt;
+ uint32_t bits;
+ int32_t vlvf_index;
DEBUGFUNC("ixgbe_set_vlvf_generic");
- if (vlan > 4095)
+ if (vlan > 4095 || vind > 63)
return IXGBE_ERR_PARAM;
/* If VT Mode is set
@@ -3260,83 +3361,60 @@ int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (vt & IXGBE_VT_CTL_VT_ENABLE) {
- int32_t vlvf_index;
- uint32_t bits;
-
- vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
- if (vlvf_index < 0)
- return vlvf_index;
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
+ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
+ return IXGBE_SUCCESS;
- /*
- * If there are still bits set in the VLVFB registers
- * for the VLAN ID indicated we need to see if the
- * caller is requesting that we clear the VFTA entry bit.
- * If the caller has requested that we clear the VFTA
- * entry bit but there are still pools/VFs using this VLAN
- * ID entry then ignore the request. We're not worried
- * about the case where we're turning the VFTA VLAN ID
- * entry bit on, only when requested to turn it off as
- * there may be multiple pools and/or VFs using the
- * VLAN ID entry. In that case we cannot clear the
- * VFTA bit until all pools/VFs using that VLAN ID have also
- * been cleared. This will be indicated by "bits" being
- * zero.
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
+ if (vlvf_index < 0)
+ return vlvf_index;
+
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
+
+ /* set the pool bit */
+ bits |= 1 << (vind % 32);
+ if (vlan_on)
+ goto vlvf_update;
+
+ /* clear the pool bit */
+ bits ^= 1 << (vind % 32);
+
+ if (!bits &&
+ !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
+ /* Clear VFTA first, then disable VLVF. Otherwise
+ * we run the risk of stray packets leaking into
+ * the PF via the default pool
*/
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- if ((!vlan_on) && (vfta_changed != NULL)) {
- /* someone wants to clear the vfta entry
- * but some pools/VFs are still using it.
- * Ignore it. */
- *vfta_changed = FALSE;
- }
- } else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ if (*vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
+
+ /* disable VLVF and clear remaining bit from pool */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
+
+ return IXGBE_SUCCESS;
}
+ /* If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ *vfta_delta = 0;
+
+vlvf_update:
+ /* record pool change and enable VLAN ID if not already enabled */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
+
return IXGBE_SUCCESS;
}
@@ -3365,6 +3443,32 @@ int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
+ * @hw: pointer to hardware structure
+ *
+ * Contains the logic to identify if we need to verify link for the
+ * crosstalk fix
+ **/
+bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
+{
+
+ /* Does FW say we need the fix */
+ if (!hw->need_crosstalk_fix)
+ return FALSE;
+
+ /* Only consider SFP+ PHYs i.e. media type fiber */
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ break;
+ default:
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
* ixgbe_check_mac_link_generic - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -3381,6 +3485,35 @@ int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *spee
DEBUGFUNC("ixgbe_check_mac_link_generic");
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is full.
+ */
+ if (ixgbe_need_crosstalk_fix(hw)) {
+ uint32_t sfp_cage_full;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP0;
+ break;
+ default:
+ /* sanity check - No SFP+ devices here */
+ sfp_cage_full = FALSE;
+ break;
+ }
+
+ if (!sfp_cage_full) {
+ *link_up = FALSE;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return IXGBE_SUCCESS;
+ }
+ }
+
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -3422,11 +3555,17 @@ int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *spee
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type >= ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
@@ -3452,50 +3591,68 @@ int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps
}
/**
- * ixgbe_host_interface_command - Issue command to manageability block
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+uint8_t ixgbe_calculate_checksum(uint8_t *buffer, uint32_t length)
+{
+ uint32_t i;
+ uint8_t sum = 0;
+
+ DEBUGFUNC("ixgbe_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (uint8_t) (0 - sum);
+}
+
+/**
+ * ixgbe_hic_unlocked - Issue command to manageability block unlocked
* @hw: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
+ * @buffer: command to write and where the return status will be placed
* @length: length of buffer, must be multiple of 4 bytes
* @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
*
- * Communicates with the manageability block. On success return IXGBE_SUCCESS
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ *
+ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+ * by the caller.
**/
-int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
- uint32_t length, uint32_t timeout,
- bool return_data)
+int32_t ixgbe_hic_unlocked(struct ixgbe_hw *hw, uint32_t *buffer, uint32_t length,
+ uint32_t timeout)
{
- uint32_t hicr, i, bi, fwsts;
- uint32_t hdr_size = sizeof(struct ixgbe_hic_hdr);
- uint16_t buf_len;
+ uint32_t hicr, i, fwsts;
uint16_t dword_len;
- DEBUGFUNC("ixgbe_host_interface_command");
+ DEBUGFUNC("ixgbe_hic_unlocked");
- if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if ((hicr & IXGBE_HICR_EN) == 0) {
+ if (!(hicr & IXGBE_HICR_EN)) {
DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(uint32_t))) != 0) {
+ if (length % sizeof(uint32_t)) {
DEBUGOUT("Buffer length failure, not aligned to dword");
return IXGBE_ERR_INVALID_ARGUMENT;
}
@@ -3520,14 +3677,62 @@ int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
}
/* Check command completion */
- if ((timeout != 0 && i == timeout) ||
+ if ((timeout && i == timeout) ||
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
- DEBUGOUT("Command has failed with no status valid.\n");
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "Command has failed with no status valid.\n");
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (TRUE) or not (FALSE)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ *
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
+ uint32_t length, uint32_t timeout, bool return_data)
+{
+ uint32_t hdr_size = sizeof(struct ixgbe_hic_hdr);
+ struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
+ uint16_t buf_len;
+ int32_t status;
+ uint32_t bi;
+ uint32_t dword_len;
+
+ DEBUGFUNC("ixgbe_host_interface_command");
+
+ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
+
+ status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
+ if (status)
+ goto rel_out;
+
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@@ -3538,14 +3743,29 @@ int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
IXGBE_FLEX_MNG, bi));
}
- /* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
- if (buf_len == 0)
- return 0;
+ /*
+ * If there is any thing in data position pull it in
+ * Read Flash command requires reading buffer length from
+ * two byes instead of one byte
+ */
+ if (resp->cmd == 0x30) {
+ for (; bi < dword_len + 2; bi++) {
+ buffer[bi] = letoh32(IXGBE_READ_REG_ARRAY(hw,
+ IXGBE_FLEX_MNG, bi));
+ }
+ buf_len = (((uint16_t)(resp->cmd_or_resp.ret_status) << 3)
+ & 0xF00) | resp->buf_len;
+ hdr_size += (2 << 2);
+ } else {
+ buf_len = resp->buf_len;
+ }
+ if (!buf_len)
+ goto rel_out;
if (length < buf_len + hdr_size) {
DEBUGOUT("Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
@@ -3557,7 +3777,10 @@ int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
IXGBE_FLEX_MNG, bi));
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -3600,6 +3823,8 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
for (i = 0; i < poll; i++) {
usec_delay(100);
value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (IXGBE_REMOVED(hw->hw_addr))
+ goto out;
if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -3621,10 +3846,21 @@ out:
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
+ uint32_t pfdtxgswc;
uint32_t rxctrl;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = TRUE;
+ } else {
+ hw->mac.set_lben = FALSE;
+ }
+ }
rxctrl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
}
@@ -3632,10 +3868,20 @@ void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
{
+ uint32_t pfdtxgswc;
uint32_t rxctrl;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.set_lben) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = FALSE;
+ }
+ }
}
/**
@@ -3649,9 +3895,9 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
if (hw->mac.type < ixgbe_mac_82599EB)
return FALSE;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
- fwsm &= IXGBE_FWSM_MODE_MASK;
- return fwsm == IXGBE_FWSM_FW_MODE_PT;
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
+
+ return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
}
/**
@@ -3664,7 +3910,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
uint32_t fwsm, manc, factps;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
return FALSE;
@@ -3673,7 +3919,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
return FALSE;
if (hw->mac.type <= ixgbe_mac_X540) {
- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
if (factps & IXGBE_FACTPS_MNGCG)
return FALSE;
}
@@ -3703,8 +3949,6 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
/* Mask off requested but non-supported speeds */
- if (!hw->mac.ops.get_link_capabilities)
- return IXGBE_NOT_IMPLEMENTED;
status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
if (status != IXGBE_SUCCESS)
return status;
@@ -3718,14 +3962,6 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
speedcnt++;
highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
- goto out;
-
/* Set the module link speed */
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber_fixed:
@@ -3780,14 +4016,6 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If we already have link at this speed, just jump out */
- status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
- if (status != IXGBE_SUCCESS)
- return status;
-
- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
- goto out;
-
/* Set the module link speed */
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber_fixed:
@@ -3958,7 +4186,10 @@ int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw)
status = ixgbe_init_ops_X550(hw);
break;
case ixgbe_mac_X550EM_x:
- status = ixgbe_init_ops_X550EM(hw);
+ status = ixgbe_init_ops_X550EM_x(hw);
+ break;
+ case ixgbe_mac_X550EM_a:
+ status = ixgbe_init_ops_X550EM_a(hw);
break;
default:
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
@@ -3982,8 +4213,13 @@ int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_set_mac_type\n");
- if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID)
+ if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Unsupported vendor id: %x", hw->vendor_id);
return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw->mvals = ixgbe_mvals_base;
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
@@ -4019,43 +4255,50 @@ int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
- case IXGBE_DEV_ID_82599_VF:
- case IXGBE_DEV_ID_82599_VF_HV:
- hw->mac.type = ixgbe_mac_82599_vf;
- break;
- case IXGBE_DEV_ID_X540_VF:
- case IXGBE_DEV_ID_X540_VF_HV:
- hw->mac.type = ixgbe_mac_X540_vf;
- break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
+ hw->mvals = ixgbe_mvals_X540;
break;
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
hw->mac.type = ixgbe_mac_X550;
+ hw->mvals = ixgbe_mvals_X550;
break;
case IXGBE_DEV_ID_X550EM_X_KX4:
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
hw->mac.type = ixgbe_mac_X550EM_x;
+ hw->mvals = ixgbe_mvals_X550EM_x;
break;
- case IXGBE_DEV_ID_X550_VF:
- case IXGBE_DEV_ID_X550_VF_HV:
- hw->mac.type = ixgbe_mac_X550_vf;
- break;
- case IXGBE_DEV_ID_X550EM_X_VF:
- case IXGBE_DEV_ID_X550EM_X_VF_HV:
- hw->mac.type = ixgbe_mac_X550EM_x_vf;
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ hw->mac.type = ixgbe_mac_X550EM_a;
+ hw->mvals = ixgbe_mvals_X550EM_a;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Unsupported device id: %x",
+ hw->device_id);
break;
}
+ DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, ret_val);
return ret_val;
}
@@ -4110,6 +4353,9 @@ int32_t ixgbe_identify_phy(struct ixgbe_hw *hw)
/**
* ixgbe_check_link - Get link and speed status
* @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: TRUE when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
*
* Reads the links register to determine if link is up and the current speed
**/
@@ -4350,6 +4596,10 @@ int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
usec_delay(mbx->usec_delay);
}
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox message timedout", mbx_id);
+
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
@@ -4378,6 +4628,10 @@ int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
usec_delay(mbx->usec_delay);
}
+ if (countdown == 0)
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Polling for VF%d mailbox ack timedout", mbx_id);
+
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
@@ -4557,6 +4811,7 @@ int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
@@ -4594,6 +4849,10 @@ int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = IXGBE_SUCCESS;
+ else
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to obtain mailbox lock for VF%d", vf_number);
+
return ret_val;
}
@@ -4690,6 +4949,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/sys/dev/pci/ixgbe.h b/sys/dev/pci/ixgbe.h
index 47580974412..5c3b7400d28 100644
--- a/sys/dev/pci/ixgbe.h
+++ b/sys/dev/pci/ixgbe.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe.h,v 1.28 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe.h,v 1.29 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
@@ -32,8 +32,9 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/* FreeBSD: src/sys/dev/ixgbe/ixgbe_osdep.h 251964 Jun 18 21:28:19 2013 UTC */
-/* FreeBSD: src/sys/dev/ixgbe/ixgbe_common.h 251964 Jun 18 21:28:19 2013 UTC */
+/* FreeBSD: src/sys/dev/ixgbe/ixgbe_osdep.h 326022 2017-11-20 19:36:21Z pfg $*/
+/* FreeBSD: src/sys/dev/ixgbe/ixgbe_common.h 326022 2017-11-20 19:36:21Z pfg $*/
+
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -151,6 +152,8 @@ extern void ixgbe_write_pci_cfg(struct ixgbe_hw *, uint32_t, uint16_t);
bus_space_write_4(((struct ixgbe_osdep *)(a)->back)->os_memt, \
((struct ixgbe_osdep *)(a)->back)->os_memh, (reg + ((offset) << 2)), value)
+#define IXGBE_REMOVED(a) (0)
+
/* MAC Operations */
uint16_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -158,10 +161,6 @@ int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw);
int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw);
int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-int32_t ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, uint32_t *pba_num);
-int32_t ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, uint8_t *pba_num,
- uint32_t pba_num_size);
-int32_t ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, uint32_t *pba_num_size);
int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr);
int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -215,9 +214,9 @@ int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmd
int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq);
int32_t ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan,
- uint32_t vind, bool vlan_on);
+ uint32_t vind, bool vlan_on, bool);
int32_t ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
- bool vlan_on, bool *vfta_changed);
+ bool vlan_on, uint32_t*, uint32_t, bool);
int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
@@ -230,6 +229,7 @@ int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw,
int32_t ixgbe_host_interface_command(struct ixgbe_hw *hw, uint32_t *buffer,
uint32_t length, uint32_t timeout,
bool return_data);
+int32_t ixgbe_hic_unlocked(struct ixgbe_hw *, uint32_t *buffer, uint32_t length, uint32_t timeout);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
@@ -243,6 +243,9 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed);
+int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg, uint32_t lp_reg,
+ uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm);
+
int32_t ixgbe_init_shared_code(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw);
@@ -250,6 +253,8 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_X540(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_X550(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
+int32_t ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+int32_t ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
int32_t ixgbe_set_mac_type(struct ixgbe_hw *hw);
int32_t ixgbe_init_hw(struct ixgbe_hw *hw);
@@ -262,8 +267,6 @@ int32_t ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
int32_t ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-int32_t ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *autoneg);
int32_t ixgbe_set_rar(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
uint32_t vmdq, uint32_t enable_addr);
@@ -316,7 +319,7 @@ bool ixgbe_is_sfp(struct ixgbe_hw *hw);
int32_t ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
int32_t ixgbe_identify_module_generic(struct ixgbe_hw *hw);
int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-int32_t ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+uint64_t ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
int32_t ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
int32_t ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
uint16_t *list_offset,
@@ -334,6 +337,19 @@ int32_t ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
uint8_t *eeprom_data);
int32_t ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
uint8_t eeprom_data);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+int32_t ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
+ uint16_t *val, bool lock);
+int32_t ixgbe_read_i2c_combined_generic(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
+ uint16_t *val);
+int32_t ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *, uint8_t addr,
+ uint16_t reg, uint16_t *val);
+int32_t ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
+ uint16_t val, bool lock);
+int32_t ixgbe_write_i2c_combined_generic(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
+ uint16_t val);
+int32_t ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *, uint8_t addr,
+ uint16_t reg, uint16_t val);
/* MBX */
int32_t ixgbe_read_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
diff --git a/sys/dev/pci/ixgbe_82598.c b/sys/dev/pci/ixgbe_82598.c
index 65f4ade9417..372c41cb0dc 100644
--- a/sys/dev/pci/ixgbe_82598.c
+++ b/sys/dev/pci/ixgbe_82598.c
@@ -1,8 +1,9 @@
-/* $OpenBSD: ixgbe_82598.c,v 1.17 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe_82598.c,v 1.18 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,7 +33,8 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 292674 2015-12-23 22:45:17Z sbruno $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 331224 2018-03-19 20:55:05Z erj $*/
+
#include <dev/pci/ixgbe.h>
#include <dev/pci/ixgbe_type.h>
@@ -63,20 +65,20 @@ int32_t ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
int32_t ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+
int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw);
-void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan,
- uint32_t vind, bool vlan_on);
+ uint32_t vind, bool vlan_on, bool vlvf_bypass);
int32_t ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
int32_t ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t *val);
int32_t ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, uint32_t reg, uint8_t val);
int32_t ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, uint8_t dev_addr,
uint8_t byte_offset, uint8_t *eeprom_data);
int32_t ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, uint8_t byte_offset,
- uint8_t *eeprom_data);
-uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+ uint8_t *eeprom_data);
+uint64_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
int32_t ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
@@ -160,6 +162,7 @@ int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = ixgbe_set_vfta_82598;
+ mac->ops.set_vlvf = NULL;
mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
/* Flow Control */
@@ -171,7 +174,7 @@ int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+ mac->max_msix_vectors = 0 /*ixgbe_get_pcie_msix_count_generic(hw)*/;
/* SFP+ Module */
phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
@@ -254,7 +257,7 @@ out:
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function.
- * Disables relaxed ordering, then set pcie completion timeout
+ * Disables relaxed ordering Then set pcie completion timeout
*
**/
int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw)
@@ -555,6 +558,7 @@ out:
/**
* ixgbe_start_mac_link_82598 - Configures MAC link settings
* @hw: pointer to hardware structure
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
*
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
@@ -747,7 +751,7 @@ int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_mac_link_82598");
/* Check to see if speed passed in is supported. */
- ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
+ hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN)
@@ -1000,11 +1004,12 @@ int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFTA
* @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ * @vlvf_bypass: boolean flag - unused
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
- bool vlan_on)
+ bool vlan_on, bool vlvf_bypass)
{
uint32_t regindex;
uint32_t bitindex;
@@ -1209,9 +1214,9 @@ int32_t ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, uint8_t byte_offset,
*
* Determines physical layer capabilities of the current configuration.
**/
-uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+uint64_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
{
- uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ uint64_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
uint32_t autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
uint32_t pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
uint32_t pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -1252,14 +1257,8 @@ uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
- else { /* XAUI */
- if (autoc & IXGBE_AUTOC_KX_SUPP)
- physical_layer |=
- IXGBE_PHYSICAL_LAYER_1000BASE_KX;
- if (autoc & IXGBE_AUTOC_KX4_SUPP)
- physical_layer |=
- IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
- }
+ else /* XAUI */
+ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
break;
case IXGBE_AUTOC_LMS_KX4_AN:
case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
diff --git a/sys/dev/pci/ixgbe_82599.c b/sys/dev/pci/ixgbe_82599.c
index e2d8b31cf03..92e0d87e55b 100644
--- a/sys/dev/pci/ixgbe_82599.c
+++ b/sys/dev/pci/ixgbe_82599.c
@@ -1,8 +1,9 @@
-/* $OpenBSD: ixgbe_82599.c,v 1.18 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe_82599.c,v 1.19 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,7 +33,8 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 292674 2015-12-23 22:45:17Z sbruno $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82599.c 326022 2017-11-20 19:36:21Z pfg $*/
+
#include <dev/pci/ixgbe.h>
#include <dev/pci/ixgbe_type.h>
@@ -71,7 +73,7 @@ int32_t ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, uint32_t reg,
int32_t ixgbe_start_hw_82599(struct ixgbe_hw *hw);
int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
int32_t ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+uint64_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
int32_t ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, uint32_t regval);
int32_t prot_autoc_read_82599(struct ixgbe_hw *, bool *locked, uint32_t *reg_val);
int32_t prot_autoc_write_82599(struct ixgbe_hw *, uint32_t reg_val, bool locked);
@@ -304,7 +306,7 @@ int32_t prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
/**
* prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
* @hw: pointer to hardware structure
- * @reg_val: value to write to AUTOC
+ * @autoc: value to write to AUTOC
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous proc_autoc_read_82599.
*
@@ -362,7 +364,7 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_ops_82599");
- ret_val = ixgbe_init_phy_ops_generic(hw);
+ ixgbe_init_phy_ops_generic(hw);
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
@@ -380,7 +382,7 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599;
mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599;
mac->ops.start_hw = ixgbe_start_hw_82599;
-
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
mac->ops.prot_autoc_read = prot_autoc_read_82599;
mac->ops.prot_autoc_write = prot_autoc_write_82599;
@@ -409,6 +411,9 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
mac->max_msix_vectors = 0 /*ixgbe_get_pcie_msix_count_generic(hw)*/;
+ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
+ & IXGBE_FWSM_MODE_MASK);
+
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
/* EEPROM */
@@ -602,10 +607,11 @@ void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
uint16_t ee_ctrl_2 = 0;
DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
- ixgbe_read_eeprom_82599(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
+ if (hw->eeprom.ops.read)
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- if (!ixgbe_mng_present(hw) &&
- (ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT)) {
+ if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -920,7 +926,7 @@ int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_mac_link_82599");
/* Check to see if speed passed in is supported. */
- status = ixgbe_get_link_capabilities_82599(hw, &link_capabilities,
+ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
&autoneg);
if (status)
goto out;
@@ -1158,7 +1164,8 @@ mac_reset_top:
* Likewise if we support WoL we don't want change the
* LMS state.
*/
- if (hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw))
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
+ hw->wol_enabled)
hw->mac.orig_autoc =
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
curr_lms;
@@ -1317,9 +1324,9 @@ int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+uint64_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
{
- uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ uint64_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
uint32_t autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
uint32_t autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
uint32_t pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -1623,6 +1630,7 @@ reset_pipeline_out:
* ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -1680,6 +1688,7 @@ release_i2c_access:
* ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to read from
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
diff --git a/sys/dev/pci/ixgbe_phy.c b/sys/dev/pci/ixgbe_phy.c
index 11f066c8e7b..4021522f120 100644
--- a/sys/dev/pci/ixgbe_phy.c
+++ b/sys/dev/pci/ixgbe_phy.c
@@ -1,8 +1,9 @@
-/* $OpenBSD: ixgbe_phy.c,v 1.21 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe_phy.c,v 1.22 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,9 +33,10 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_phy.c 303032 2016-07-19 17:31:48Z sbruno $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_phy.c 331224 2018-03-19 20:55:05Z erj $*/
#include <dev/pci/ixgbe.h>
+#include <dev/pci/ixgbe_type.h>
void ixgbe_i2c_start(struct ixgbe_hw *hw);
void ixgbe_i2c_stop(struct ixgbe_hw *hw);
@@ -47,7 +49,6 @@ void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, uint32_t *i2cctl);
void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, uint32_t *i2cctl);
int32_t ixgbe_set_i2c_data(struct ixgbe_hw *hw, uint32_t *i2cctl, bool data);
bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, uint32_t *i2cctl);
-void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
@@ -86,8 +87,8 @@ static int32_t ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, uint8_t *byte)
/**
* ixgbe_ones_comp_byte_add - Perform one's complement addition
- * @add1 - addend 1
- * @add2 - addend 2
+ * @add1: addend 1
+ * @add2: addend 2
*
* Returns one's complement 8-bit sum.
*/
@@ -114,7 +115,7 @@ int32_t ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, uint8_t addr,
bool lock)
{
uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
uint8_t csum_byte;
uint8_t high_bits;
@@ -122,8 +123,6 @@ int32_t ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, uint8_t addr,
uint8_t reg_high;
uint8_t csum;
- if (hw->mac.type >= ixgbe_mac_X550)
- max_retry = 3;
reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
@@ -210,6 +209,7 @@ int32_t ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, uint8_t ad
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE);
}
+
/**
* ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation
* @hw: pointer to the hardware structure
@@ -350,6 +350,42 @@ int32_t ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_probe_phy - Probe a single address for a PHY
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address to probe
+ *
+ * Returns TRUE if PHY found
+ */
+static bool ixgbe_probe_phy(struct ixgbe_hw *hw, uint16_t phy_addr)
+{
+ uint16_t ext_ability = 0;
+
+ if (!ixgbe_validate_phy_addr(hw, phy_addr)) {
+ DEBUGOUT1("Unable to validate PHY address 0x%04X\n",
+ phy_addr);
+ return FALSE;
+ }
+
+ if (ixgbe_get_phy_id(hw))
+ return FALSE;
+
+ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability &
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
+ hw->phy.type = ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type = ixgbe_phy_generic;
+ }
+
+ return TRUE;
+}
+
+/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
@@ -358,8 +394,7 @@ int32_t ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
int32_t ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
int32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
- uint32_t phy_addr;
- uint16_t ext_ability = 0;
+ uint16_t phy_addr;
DEBUGFUNC("ixgbe_identify_phy_generic");
@@ -370,45 +405,33 @@ int32_t ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
}
- if (hw->phy.type == ixgbe_phy_unknown) {
- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
- if (ixgbe_validate_phy_addr(hw, phy_addr)) {
- hw->phy.addr = phy_addr;
- ixgbe_get_phy_id(hw);
- hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &ext_ability);
- if (ext_ability &
- (IXGBE_MDIO_PHY_10GBASET_ABILITY |
- IXGBE_MDIO_PHY_1000BASET_ABILITY))
- hw->phy.type =
- ixgbe_phy_cu_unknown;
- else
- hw->phy.type =
- ixgbe_phy_generic;
- }
+ if (hw->phy.type != ixgbe_phy_unknown)
+ return IXGBE_SUCCESS;
- status = IXGBE_SUCCESS;
- break;
- }
- }
+ if (hw->phy.nw_mng_if_sel) {
+ phy_addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ if (ixgbe_probe_phy(hw, phy_addr))
+ return IXGBE_SUCCESS;
+ else
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
- /* Certain media types do not have a phy so an address will not
- * be found and the code will take this path. Caller has to
- * decide if it is an error or not.
- */
- if (status != IXGBE_SUCCESS) {
- hw->phy.addr = 0;
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_probe_phy(hw, phy_addr)) {
+ status = IXGBE_SUCCESS;
+ break;
}
- } else {
- status = IXGBE_SUCCESS;
}
+ /* Certain media types do not have a phy so an address will not
+ * be found and the code will take this path. Caller has to
+ * decide if it is an error or not.
+ */
+ if (status != IXGBE_SUCCESS)
+ hw->phy.addr = 0;
+
return status;
}
@@ -444,6 +467,7 @@ int32_t ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
/**
* ixgbe_validate_phy_addr - Determines phy address is valid
* @hw: pointer to hardware structure
+ * @phy_addr: PHY address
*
**/
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, uint32_t phy_addr)
@@ -460,6 +484,8 @@ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, uint32_t phy_addr)
if (phy_id != 0xFFFF && phy_id != 0x0)
valid = TRUE;
+ DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id);
+
return valid;
}
@@ -489,12 +515,15 @@ int32_t ixgbe_get_phy_id(struct ixgbe_hw *hw)
hw->phy.revision =
(uint32_t)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
+ DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
+ phy_id_high, phy_id_low);
+
return status;
}
/**
* ixgbe_get_phy_type_from_id - Get the phy type
- * @hw: pointer to hardware structure
+ * @phy_id: PHY ID information
*
**/
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(uint32_t phy_id)
@@ -507,7 +536,6 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(uint32_t phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID1:
case X550_PHY_ID2:
case X550_PHY_ID3:
case X540_PHY_ID:
@@ -520,14 +548,17 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(uint32_t phy_id)
phy_type = ixgbe_phy_nl;
break;
case X557_PHY_ID:
+ case X557_PHY_ID2:
phy_type = ixgbe_phy_x550em_ext_t;
break;
+ case IXGBE_M88E1500_E_PHY_ID:
+ case IXGBE_M88E1543_E_PHY_ID:
+ phy_type = ixgbe_phy_ext_1g_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
}
-
- DEBUGOUT1("phy type found is %d\n", phy_type);
return phy_type;
}
@@ -573,11 +604,30 @@ int32_t ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msec_delay(100);
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
- if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
- usec_delay(2);
- break;
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ usec_delay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
+ break;
+ }
}
}
@@ -596,6 +646,7 @@ out:
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
int32_t ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, uint32_t reg_addr,
@@ -621,12 +672,13 @@ int32_t ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, uint32_t reg_addr,
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
+ break;
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+ DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -656,6 +708,7 @@ int32_t ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, uint32_t reg_addr,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+ DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -675,6 +728,7 @@ int32_t ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, uint32_t reg_addr,
* using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
int32_t ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, uint32_t reg_addr,
@@ -685,13 +739,12 @@ int32_t ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, uint32_t reg_addr,
DEBUGFUNC("ixgbe_read_phy_reg_generic");
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
- status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
- phy_data);
- hw->mac.ops.release_swfw_sync(hw, gssr);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
- }
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
return status;
}
@@ -787,7 +840,7 @@ int32_t ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, uint32_t reg_addr,
DEBUGFUNC("ixgbe_write_phy_reg_generic");
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
- status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
@@ -814,91 +867,63 @@ int32_t ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
- hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
- if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
- /* Set or unset auto-negotiation 5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_5GB_FULL)
- autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
- /* Set or unset auto-negotiation 2.5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_2_5GB_FULL)
- autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
}
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
+ (speed & IXGBE_LINK_SPEED_100_FULL))
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
- autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
- IXGBE_MII_100BASE_T_ADVERTISE_HALF);
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
/* Blocked by MNG FW so don't reset PHY */
if (ixgbe_check_reset_blocked(hw))
@@ -920,6 +945,7 @@ int32_t ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
+ * @autoneg_wait_to_complete: unused
**/
int32_t ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -948,6 +974,9 @@ int32_t ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_100_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed & IXGBE_LINK_SPEED_10_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
+
/* Setup link based on the new speed settings */
hw->phy.ops.setup_link(hw);
@@ -985,6 +1014,7 @@ int32_t ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
break;
default:
@@ -1019,6 +1049,8 @@ int32_t ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
/**
* ixgbe_check_phy_link_tnx - Determine link and speed status
* @hw: pointer to hardware structure
+ * @speed: current link speed
+ * @link_up: TRUE is link is up, FALSE otherwise
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
@@ -1582,7 +1614,7 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
/*
- * We do not limit the definition of "supported SPF modules"
+ * We do not limit the definition of "supported SFP modules"
* to the vendor/make whitelist.
*/
status = IXGBE_SUCCESS;
@@ -1606,9 +1638,9 @@ err_read_i2c_eeprom:
*
* Determines physical layer capabilities of the current SFP.
*/
-int32_t ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+uint64_t ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
{
- uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ uint64_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
uint8_t comp_codes_10g = 0;
uint8_t comp_codes_1g = 0;
@@ -1827,7 +1859,7 @@ int32_t ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_qsfp_unknown;
/*
- * We do not limit the definition of "supported SPF modules"
+ * We do not limit the definition of "supported QSFP modules"
* to the vendor/make whitelist.
*/
status = IXGBE_SUCCESS;
@@ -2006,7 +2038,9 @@ bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, uint8_t offset, uint8_t addr)
* ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
+ * @lock: TRUE if to take and release semaphore
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
@@ -2096,6 +2130,7 @@ fail:
* ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2112,6 +2147,7 @@ int32_t ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
* ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
+ * @dev_addr: address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface at
@@ -2128,6 +2164,7 @@ int32_t ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, uint8_t byte_o
* ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
* @lock: TRUE if to take and release semaphore
*
@@ -2199,6 +2236,7 @@ fail:
* ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -2215,6 +2253,7 @@ int32_t ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
* ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
+ * @dev_addr: address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface at
@@ -2236,16 +2275,11 @@ int32_t ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, uint8_t byte_
**/
void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- uint32_t i2cctl;
+ uint32_t i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
DEBUGFUNC("ixgbe_i2c_start");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_X550);
- i2cctl |= IXGBE_I2C_BB_EN_X550;
- } else
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw);
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -2276,16 +2310,13 @@ void ixgbe_i2c_start(struct ixgbe_hw *hw)
**/
void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- uint32_t i2cctl;
+ uint32_t i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ uint32_t data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ uint32_t clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+ uint32_t bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_i2c_stop");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x)
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_X550);
- else
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -2298,12 +2329,10 @@ void ixgbe_i2c_stop(struct ixgbe_hw *hw)
/* bus free time between stop and start (4.7us)*/
usec_delay(IXGBE_I2C_T_BUF);
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl &= ~IXGBE_I2C_BB_EN_X550;
- i2cctl |= IXGBE_I2C_DATA_OE_N_EN_X550 |
- IXGBE_I2C_CLK_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_X550, i2cctl);
+ if (bb_en_bit || data_oe_bit || clk_oe_bit) {
+ i2cctl &= ~bb_en_bit;
+ i2cctl |= data_oe_bit | clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
}
}
@@ -2356,16 +2385,10 @@ int32_t ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, uint8_t data)
}
/* Release SDA line (set high) */
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_X550);
- i2cctl |= IXGBE_I2C_DATA_OUT_X550 | IXGBE_I2C_DATA_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_X550, i2cctl);
- } else {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
- i2cctl |= IXGBE_I2C_DATA_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
- }
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -2379,24 +2402,20 @@ int32_t ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, uint8_t data)
**/
int32_t ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
+ uint32_t data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
int32_t status = IXGBE_SUCCESS;
uint32_t i = 0;
- uint32_t i2cctl, i2cctl_reg;
+ uint32_t i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
uint32_t timeout = 10;
bool ack = 1;
DEBUGFUNC("ixgbe_get_i2c_ack");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl_reg = IXGBE_I2CCTL_X550;
- i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
- i2cctl |= IXGBE_I2C_DATA_OUT_X550 | IXGBE_I2C_DATA_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, i2cctl_reg, i2cctl);
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
- } else {
- i2cctl_reg = IXGBE_I2CCTL;
- i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
}
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -2406,7 +2425,7 @@ int32_t ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
ack = ixgbe_get_i2c_data(hw, &i2cctl);
usec_delay(1);
@@ -2436,28 +2455,23 @@ int32_t ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
int32_t ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- uint32_t i2cctl, i2cctl_reg;
+ uint32_t i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ uint32_t data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_clock_in_i2c_bit");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl_reg = IXGBE_I2CCTL_X550;
- i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
- i2cctl |= IXGBE_I2C_DATA_OUT_X550 | IXGBE_I2C_DATA_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, i2cctl_reg, i2cctl);
+ if (data_oe_bit) {
+ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
- } else {
- i2cctl_reg = IXGBE_I2CCTL;
- i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
}
-
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
usec_delay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
*data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -2478,16 +2492,10 @@ int32_t ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
int32_t ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
int32_t status;
- uint32_t i2cctl;
+ uint32_t i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
DEBUGFUNC("ixgbe_clock_out_i2c_bit");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x)
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_X550);
- else
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
-
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == IXGBE_SUCCESS) {
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -2520,30 +2528,28 @@ int32_t ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
**/
void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, uint32_t *i2cctl)
{
- uint32_t i2cctl_clk_in, i2cctl_reg;
+ uint32_t clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
uint32_t i = 0;
uint32_t timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+ uint32_t i2cctl_r = 0;
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl_reg = IXGBE_I2CCTL_X550;
- *i2cctl |= IXGBE_I2C_CLK_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, i2cctl_reg, *i2cctl);
- *i2cctl |= IXGBE_I2C_CLK_OUT_X550;
- i2cctl_clk_in = IXGBE_I2C_CLK_IN_X550;
- } else {
- i2cctl_reg = IXGBE_I2CCTL;
- *i2cctl |= IXGBE_I2C_CLK_OUT;
- i2cctl_clk_in = IXGBE_I2C_CLK_IN;
+ DEBUGFUNC("ixgbe_raise_i2c_clk");
+
+ if (clk_oe_bit) {
+ *i2cctl |= clk_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
}
for (i = 0; i < timeout; i++) {
- IXGBE_WRITE_REG(hw, i2cctl_reg, *i2cctl);
+ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
usec_delay(IXGBE_I2C_T_RISE);
- if (IXGBE_READ_REG(hw, i2cctl_reg) & i2cctl_clk_in)
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
break;
}
}
@@ -2560,15 +2566,10 @@ void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, uint32_t *i2cctl)
{
DEBUGFUNC("ixgbe_lower_i2c_clk");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- *i2cctl &= ~(IXGBE_I2C_CLK_OUT_X550 |
- IXGBE_I2C_CLK_OE_N_EN_X550);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_X550, *i2cctl);
- } else {
- *i2cctl &= ~IXGBE_I2C_CLK_OUT;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
- }
+ *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw));
+ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -2586,28 +2587,18 @@ void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, uint32_t *i2cctl)
**/
int32_t ixgbe_set_i2c_data(struct ixgbe_hw *hw, uint32_t *i2cctl, bool data)
{
- uint32_t i2cctl_reg;
+ uint32_t data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
int32_t status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_set_i2c_data");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl_reg = IXGBE_I2CCTL_X550;
- if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT_X550;
- else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT_X550;
- *i2cctl &= ~IXGBE_I2C_DATA_OE_N_EN_X550;
- } else {
- i2cctl_reg = IXGBE_I2CCTL;
- if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT;
- else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT;
- }
+ if (data)
+ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ else
+ *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw));
+ *i2cctl &= ~data_oe_bit;
- IXGBE_WRITE_REG(hw, i2cctl_reg, *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
@@ -2615,15 +2606,14 @@ int32_t ixgbe_set_i2c_data(struct ixgbe_hw *hw, uint32_t *i2cctl, bool data)
if (!data) /* Can't verify data in this case */
return IXGBE_SUCCESS;
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- *i2cctl |= IXGBE_I2C_DATA_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, i2cctl_reg, *i2cctl);
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
}
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, i2cctl_reg);
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
status = IXGBE_ERR_I2C;
ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
@@ -2644,22 +2634,19 @@ int32_t ixgbe_set_i2c_data(struct ixgbe_hw *hw, uint32_t *i2cctl, bool data)
**/
bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, uint32_t *i2cctl)
{
- uint32_t i2cctl_data_in;
bool data;
+ uint32_t data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw);
DEBUGFUNC("ixgbe_get_i2c_data");
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x) {
- i2cctl_data_in = IXGBE_I2C_DATA_IN_X550;
- *i2cctl |= IXGBE_I2C_DATA_OE_N_EN_X550;
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_X550, *i2cctl);
+ if (data_oe_bit) {
+ *i2cctl |= data_oe_bit;
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
usec_delay(IXGBE_I2C_T_FALL);
- } else
- i2cctl_data_in = IXGBE_I2C_DATA_IN;
+ }
- if (*i2cctl & i2cctl_data_in)
+ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
data = 1;
else
data = 0;
@@ -2682,12 +2669,7 @@ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_i2c_bus_clear");
ixgbe_i2c_start(hw);
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x)
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_X550);
- else
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
ixgbe_set_i2c_data(hw, &i2cctl, 1);
diff --git a/sys/dev/pci/ixgbe_type.h b/sys/dev/pci/ixgbe_type.h
index 32a79d26bc5..d98c6e4e0b0 100644
--- a/sys/dev/pci/ixgbe_type.h
+++ b/sys/dev/pci/ixgbe_type.h
@@ -1,8 +1,9 @@
-/* $OpenBSD: ixgbe_type.h,v 1.34 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe_type.h,v 1.35 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,13 +33,50 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_type.h 299200 2016-05-06 22:54:56Z pfg $*/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_phy.h 292674 2015-12-23 22:45:17Z sbruno $*/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.h 283883 2015-06-01 17:43:34Z jfv $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_type.h 331224 2018-03-19 20:55:05Z erj $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_phy.h 326022 2017-11-20 19:36:21Z pfg $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.h 326022 2017-11-20 19:36:21Z pfg $*/
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
+/*
+ * The following is a brief description of the error categories used by the
+ * ERROR_REPORT* macros.
+ *
+ * - IXGBE_ERROR_INVALID_STATE
+ * This category is for errors which represent a serious failure state that is
+ * unexpected, and could be potentially harmful to device operation. It should
+ * not be used for errors relating to issues that can be worked around or
+ * ignored.
+ *
+ * - IXGBE_ERROR_POLLING
+ * This category is for errors related to polling/timeout issues and should be
+ * used in any case where the timeout occurred, or a failure to obtain a lock, or
+ * failure to receive data within the time limit.
+ *
+ * - IXGBE_ERROR_CAUTION
+ * This category should be used for reporting issues that may be the cause of
+ * other errors, such as temperature warnings. It should indicate an event which
+ * could be serious, but hasn't necessarily caused problems yet.
+ *
+ * - IXGBE_ERROR_SOFTWARE
+ * This category is intended for errors due to software state preventing
+ * something. The category is not intended for errors due to bad arguments, or
+ * due to unsupported features. It should be used when a state occurs which
+ * prevents action but is not a serious issue.
+ *
+ * - IXGBE_ERROR_ARGUMENT
+ * This category is for when a bad or invalid argument is passed. It should be
+ * used whenever a function is called and error checking has detected the
+ * argument is wrong or incorrect.
+ *
+ * - IXGBE_ERROR_UNSUPPORTED
+ * This category is for errors which are due to unsupported circumstances or
+ * configuration issues. It should not be used when the issue is due to an
+ * invalid argument, but for when something has occurred that is unsupported
+ * (Ex: Flow control autonegotiation or an unsupported SFP+ module.)
+ */
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -70,11 +108,11 @@
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
-#define IXGBE_SUBDEV_ID_82599_SFP_LOM 0x06EE
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -95,24 +133,47 @@
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
#define IXGBE_DEV_ID_X550T1 0x15D1
+#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
+#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA
+#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC
+#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_CAT(r,m) IXGBE_##r##m
+
+#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)])
+
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_STATUS 0x00008
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL 0x00028
+#define IXGBE_I2CCTL_82599 0x00028
+#define IXGBE_I2CCTL IXGBE_I2CCTL_82599
+#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599
#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL)
#define IXGBE_PHY_GPIO 0x00028
#define IXGBE_MAC_GPIO 0x00030
#define IXGBE_PHYINT_STATUS0 0x00100
@@ -125,18 +186,44 @@
#define IXGBE_EXVET 0x05078
/* NVM Registers */
-#define IXGBE_EEC 0x10010
-#define IXGBE_EERD 0x10014
-#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
+#define IXGBE_EEC 0x10010
+#define IXGBE_EEC_X540 IXGBE_EEC
+#define IXGBE_EEC_X550 IXGBE_EEC
+#define IXGBE_EEC_X550EM_x IXGBE_EEC
+#define IXGBE_EEC_X550EM_a 0x15FF8
+#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC)
+
+#define IXGBE_EERD 0x10014
+#define IXGBE_EEWR 0x10018
+
+#define IXGBE_FLA 0x1001C
+#define IXGBE_FLA_X540 IXGBE_FLA
+#define IXGBE_FLA_X550 IXGBE_FLA
+#define IXGBE_FLA_X550EM_x IXGBE_FLA
+#define IXGBE_FLA_X550EM_a 0x15F68
+#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA)
+
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
#define IXGBE_FLMNGCTL 0x10118
#define IXGBE_FLMNGDATA 0x1011C
#define IXGBE_FLMNGCNT 0x10120
#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
-#define IXGBE_SRAMREL 0x10210
+
+#define IXGBE_GRC 0x10200
+#define IXGBE_GRC_X540 IXGBE_GRC
+#define IXGBE_GRC_X550 IXGBE_GRC
+#define IXGBE_GRC_X550EM_x IXGBE_GRC
+#define IXGBE_GRC_X550EM_a 0x15F64
+#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC)
+
+#define IXGBE_SRAMREL 0x10210
+#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL
+#define IXGBE_SRAMREL_X550EM_a 0x15F6C
+#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
+
#define IXGBE_PHYDBG 0x10218
/* General Receive Control */
@@ -148,20 +235,97 @@
/* I2CCTL Bit Masks */
#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN
#define IXGBE_I2C_CLK_IN_X550 0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT
#define IXGBE_I2C_CLK_OUT_X550 0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN
#define IXGBE_I2C_DATA_IN_X550 0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT
#define IXGBE_I2C_DATA_OUT_X550 0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN 0
+#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN
#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN 0
+#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN
#define IXGBE_I2C_BB_EN_X550 0x00000100
-#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
+#define IXGBE_I2C_CLK_OE_N_EN 0
+#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN
+#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+
+#define NVM_OROM_OFFSET 0x17
+#define NVM_OROM_BLK_LOW 0x83
+#define NVM_OROM_BLK_HI 0x84
+#define NVM_OROM_PATCH_MASK 0xFF
+#define NVM_OROM_SHIFT 8
+
+#define NVM_VER_MASK 0x00FF /* version mask */
+#define NVM_VER_SHIFT 8 /* version bit shift */
+#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW 0x2D /* version low order word */
+#define NVM_ETK_OFF_HI 0x2E /* version high order word */
+#define NVM_ETK_SHIFT 16 /* high version word shift */
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETK_VALID 0x8000
+#define NVM_INVALID_PTR 0xFFFF
+#define NVM_VER_SIZE 32 /* version sting size */
+
+struct ixgbe_nvm_version {
+ uint32_t etk_id;
+ uint8_t nvm_major;
+ uint16_t nvm_minor;
+ uint8_t nvm_id;
+
+ bool oem_valid;
+ uint8_t oem_major;
+ uint8_t oem_minor;
+ uint16_t oem_release;
+
+ bool or_valid;
+ uint8_t or_major;
+ uint16_t or_build;
+ uint8_t or_patch;
+
+};
+
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
#define IXGBE_EICS 0x00808
@@ -328,6 +492,13 @@
#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */
#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
+/* Registers for setting up RSS on X550 with SRIOV
+ * _p - pool number (0..63)
+ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA)
+ */
+#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4))
+#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40))
+#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40))
/* Flow Director registers */
#define IXGBE_FDIRCTRL 0x0EE00
@@ -418,6 +589,12 @@
#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */
#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
+/* masks for accessing VXLAN and GENEVE UDP ports */
+#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */
+#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16
+
#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
/* Ext Flexible Host Filter Table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100))
@@ -425,7 +602,6 @@
/* Four Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
-
/* Six Flexible Filters are supported */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6
/* Eight Flexible Filters are supported */
@@ -523,6 +699,13 @@
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
/* Power Management */
+/* DMA Coalescing configuration */
+struct ixgbe_dmac_config {
+ uint16_t watchdog_timer; /* usec units */
+ uint32_t link_speed;
+ uint8_t fcoe_tc;
+ uint8_t num_tcs;
+};
/*
* DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed.
@@ -565,8 +748,6 @@
#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */
#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */
-
-
/* Security Control Registers */
#define IXGBE_SECTXCTRL 0x08800
#define IXGBE_SECTXSTAT 0x08804
@@ -704,7 +885,6 @@
#define IXGBE_RTTBCNRTT 0x05150
#define IXGBE_RTTBCNRD 0x0498C
-
/* FCoE DMA Context Registers */
/* FCoE Direct DMA Context */
#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10))
@@ -883,7 +1063,7 @@
#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW 0x15014
+#define IXGBE_LSWFW 0x15F14
#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
#define IXGBE_BMCIPVAL 0x05060
#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
@@ -925,17 +1105,65 @@
#define IXGBE_PCIEPIPEDAT 0x11008
#define IXGBE_GSCL_1 0x11010
#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1
+#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2
#define IXGBE_GSCL_3 0x11018
#define IXGBE_GSCL_4 0x1101C
#define IXGBE_GSCN_0 0x11020
#define IXGBE_GSCN_1 0x11024
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0
+#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1
+#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2
+#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3
#define IXGBE_FACTPS 0x10150
+#define IXGBE_FACTPS_X540 IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550 0x11800
+#define IXGBE_GSCL_2_X550 0x11804
+#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550 0x11820
+#define IXGBE_GSCN_1_X550 0x11824
+#define IXGBE_GSCN_2_X550 0x11828
+#define IXGBE_GSCN_3_X550 0x1182C
+#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550
+#define IXGBE_FACTPS_X550 IXGBE_FACTPS
+#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550
+#define IXGBE_FACTPS_X550EM_a 0x15FEC
+#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS)
+
#define IXGBE_PCIEANACTL 0x11040
#define IXGBE_SWSM 0x10140
+#define IXGBE_SWSM_X540 IXGBE_SWSM
+#define IXGBE_SWSM_X550 IXGBE_SWSM
+#define IXGBE_SWSM_X550EM_x IXGBE_SWSM
+#define IXGBE_SWSM_X550EM_a 0x15F70
+#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM)
+
#define IXGBE_FWSM 0x10148
+#define IXGBE_FWSM_X540 IXGBE_FWSM
+#define IXGBE_FWSM_X550 IXGBE_FWSM
+#define IXGBE_FWSM_X550EM_x IXGBE_FWSM
+#define IXGBE_FWSM_X550EM_a 0x15F74
+#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM)
+
#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC
+#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78
+#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC)
+
#define IXGBE_GSSR 0x10160
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
@@ -947,14 +1175,40 @@
#define IXGBE_GSCL_6_82599 0x11034
#define IXGBE_GSCL_7_82599 0x11038
#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599
+#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599
+#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599
+#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599
#define IXGBE_PHYADR_82599 0x11040
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
+#define IXGBE_CIAA 0x11088
+#define IXGBE_CIAD 0x1108C
+#define IXGBE_CIAA_82599 IXGBE_CIAA
+#define IXGBE_CIAD_82599 IXGBE_CIAD
+#define IXGBE_CIAA_X540 IXGBE_CIAA
+#define IXGBE_CIAD_X540 IXGBE_CIAD
+#define IXGBE_GSCL_5_X550 0x11810
+#define IXGBE_GSCL_6_X550 0x11814
+#define IXGBE_GSCL_7_X550 0x11818
+#define IXGBE_GSCL_8_X550 0x1181C
+#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550
#define IXGBE_CIAA_X550 0x11508
#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
+#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550
+#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
+#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
+#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA)
+#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD)
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1098,6 +1352,7 @@
#define IXGBE_XPCSS 0x04290
#define IXGBE_MFLCN 0x04294
#define IXGBE_SERDESC 0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
#define IXGBE_MACS 0x0429C
#define IXGBE_AUTOC 0x042A0
#define IXGBE_LINKS 0x042A4
@@ -1284,6 +1539,7 @@
#define IXGBE_CORECTL_WRITE_CMD 0x00010000
/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1349,6 +1605,7 @@
#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */
#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */
#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */
+#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */
#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
@@ -1408,16 +1665,17 @@
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID1 0x01540220
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
+#define X557_PHY_ID2 0x01540250
#define AQ_FW_REV 0x20
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0
+#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
@@ -1438,13 +1696,26 @@
#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
#define IXGBE_GPIE_EIAME 0x40000000
#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_LLI_DELAY_SHIFT 7
#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
-#define IXGBE_GPIE_LLI_DELAY_SHIFT 7
#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
@@ -1474,7 +1745,16 @@ enum {
};
/* Transmit Flow Control status */
-#define IXGBE_TFCS_TXON 0x00000001
+#define IXGBE_TFCS_TXON 0x00000001
+#define IXGBE_TFCS_TXOFF 0x00000001
+#define IXGBE_TFCS_TXOFF0 0x00000100
+#define IXGBE_TFCS_TXOFF1 0x00000200
+#define IXGBE_TFCS_TXOFF2 0x00000400
+#define IXGBE_TFCS_TXOFF3 0x00000800
+#define IXGBE_TFCS_TXOFF4 0x00001000
+#define IXGBE_TFCS_TXOFF5 0x00002000
+#define IXGBE_TFCS_TXOFF6 0x00004000
+#define IXGBE_TFCS_TXOFF7 0x00008000
/* TCP Timer */
#define IXGBE_TCPTIMER_KS 0x00000100
@@ -1512,6 +1792,8 @@ enum {
#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
/* VMOLR bitmasks */
+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */
+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */
#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
@@ -1611,6 +1893,22 @@ enum {
#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */
#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EIMS_GPI_SDP0_X540 IXGBE_EICR_GPI_SDP0_X540 /* deprecated */
+#define IXGBE_EIMS_GPI_SDP1_X540 IXGBE_EICR_GPI_SDP1_X540 /* deprecated */
+#define IXGBE_EIMS_GPI_SDP2_X540 IXGBE_EICR_GPI_SDP2_X540 /* deprecated */
+#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -1628,10 +1926,10 @@ enum {
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP0_X540 IXGBE_EICR_GPI_SDP0_X540
-#define IXGBE_EICS_GPI_SDP1_X540 IXGBE_EICR_GPI_SDP1_X540
-#define IXGBE_EICS_GPI_SDP2_X540 IXGBE_EICR_GPI_SDP2_X540
#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1650,10 +1948,10 @@ enum {
#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP0_X540 IXGBE_EICR_GPI_SDP0_X540
-#define IXGBE_EIMS_GPI_SDP1_X540 IXGBE_EICR_GPI_SDP1_X540
-#define IXGBE_EIMS_GPI_SDP2_X540 IXGBE_EICR_GPI_SDP2_X540
#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1670,11 +1968,11 @@ enum {
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP0_X540 IXGBE_EICR_GPI_SDP0_X540
-#define IXGBE_EIMC_GPI_SDP1_X540 IXGBE_EICR_GPI_SDP1_X540
-#define IXGBE_EIMC_GPI_SDP2_X540 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
+#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw)
+#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw)
#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
@@ -1683,6 +1981,7 @@ enum {
#define IXGBE_EIMS_ENABLE_MASK ( \
IXGBE_EIMS_RTX_QUEUE | \
IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
IXGBE_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
@@ -1936,6 +2235,7 @@ enum {
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
@@ -1981,6 +2281,7 @@ enum {
#define IXGBE_GSSR_FLASH_SM 0x0010
#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -2023,6 +2324,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2050,7 +2354,9 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2120,6 +2426,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2323,6 +2630,7 @@ enum {
#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */
#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -2494,6 +2802,7 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */
#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
@@ -2546,6 +2855,68 @@ enum {
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
+#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
+#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P)))
+#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P)))
+#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P)))
+#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P)))
+#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P)))
+#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P)))
+#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P)))
+#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P)))
+#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P)))
+#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P)))
+#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \
+ (0x012300 + (((P) - 24) * 4)))
+#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P)))
+#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P)))
+#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P)))
+#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P)))
+#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \
+ : (0x0D000 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \
+ : (0x0D004 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \
+ : (0x0D008 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \
+ : (0x0D010 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \
+ : (0x0D018 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \
+ : (0x0D028 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \
+ : (0x0D014 + (0x40 * ((P) - 64))))
+#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
+#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
+#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
+#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
+#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \
+ : (0x0D00C + (0x40 * ((P) - 64))))
+#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
+
+#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
/* Little Endian defines */
#ifndef __le16
@@ -2675,6 +3046,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
@@ -2687,10 +3059,72 @@ enum ixgbe_fdir_pballoc_type {
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_PHY_TOKEN_REQ_CMD 0xA
+#define FW_PHY_TOKEN_REQ_LEN 2
+#define FW_PHY_TOKEN_REQ 0
+#define FW_PHY_TOKEN_REL 1
+#define FW_PHY_TOKEN_OK 1
+#define FW_PHY_TOKEN_RETRY 0x80
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
#define FW_INT_PHY_REQ_CMD 0xB
#define FW_INT_PHY_REQ_LEN 10
#define FW_INT_PHY_REQ_READ 0
#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0)
+#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1)
+#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3)
+#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4)
+#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5)
+#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6)
+#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7)
+#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8)
+#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9)
+#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18)
+#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19)
+#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20)
+#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
/* Host Interface Command Structures */
@@ -2702,26 +3136,26 @@ struct ixgbe_hic_hdr {
uint8_t ret_status;
} cmd_or_resp;
uint8_t checksum;
-};
+} __packed __aligned(4);
struct ixgbe_hic_hdr2_req {
uint8_t cmd;
uint8_t buf_lenh;
uint8_t buf_lenl;
uint8_t checksum;
-};
+} __packed __aligned(4);
struct ixgbe_hic_hdr2_rsp {
uint8_t cmd;
uint8_t buf_lenl;
- uint8_t buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
+ uint8_t buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
uint8_t checksum;
-};
+} __packed __aligned(4);
union ixgbe_hic_hdr2 {
struct ixgbe_hic_hdr2_req req;
struct ixgbe_hic_hdr2_rsp rsp;
-};
+} __packed __aligned(4);
struct ixgbe_hic_drv_info {
struct ixgbe_hic_hdr hdr;
@@ -2732,7 +3166,17 @@ struct ixgbe_hic_drv_info {
uint8_t ver_maj;
uint8_t pad; /* end spacing to ensure length is mult. of dword */
uint16_t pad2; /* end spacing to ensure length is mult. of dword2 */
-};
+} __packed __aligned(4);
+
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ uint8_t port_num;
+ uint8_t ver_sub;
+ uint8_t ver_build;
+ uint8_t ver_min;
+ uint8_t ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+} __packed __aligned(4);
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
@@ -2742,7 +3186,7 @@ struct ixgbe_hic_read_shadow_ram {
uint16_t pad2;
uint16_t data;
uint16_t pad3;
-};
+} __packed __aligned(4);
struct ixgbe_hic_write_shadow_ram {
union ixgbe_hic_hdr2 hdr;
@@ -2751,46 +3195,66 @@ struct ixgbe_hic_write_shadow_ram {
uint16_t pad2;
uint16_t data;
uint16_t pad3;
-};
+} __packed __aligned(4);
struct ixgbe_hic_disable_rxen {
struct ixgbe_hic_hdr hdr;
uint8_t port_number;
uint8_t pad2;
uint16_t pad3;
-};
+} __packed __aligned(4);
+
+struct ixgbe_hic_phy_token_req {
+ struct ixgbe_hic_hdr hdr;
+ uint8_t port_number;
+ uint8_t command_type;
+ uint16_t pad;
+} __packed __aligned(4);
struct ixgbe_hic_internal_phy_req {
struct ixgbe_hic_hdr hdr;
uint8_t port_number;
uint8_t command_type;
- uint16_t address;
+ __be16 address;
uint16_t rsv1;
- uint32_t write_data;
+ __be32 write_data;
uint16_t pad;
-};
+} __packed __aligned(4);
struct ixgbe_hic_internal_phy_resp {
struct ixgbe_hic_hdr hdr;
- uint32_t read_data;
-};
+ __be32 read_data;
+} __packed __aligned(4);
+
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ uint8_t port_number;
+ uint8_t pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+} __packed __aligned(4);
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+} __packed __aligned(4);
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
- uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+ uint64_t buffer_addr; /* Address of the descriptor's data buffer */
union {
__le32 data;
struct {
- __le16 length; /* Data buffer length */
- uint8_t cso; /* Checksum offset */
- uint8_t cmd; /* Descriptor control */
+ __le16 length; /* Data buffer length */
+ uint8_t cso; /* Checksum offset */
+ uint8_t cmd; /* Descriptor control */
} flags;
} lower;
union {
__le32 data;
struct {
- uint8_t status; /* Descriptor status */
- uint8_t css; /* Checksum start */
+ uint8_t status; /* Descriptor status */
+ uint8_t css; /* Checksum start */
__le16 vlan;
} fields;
} upper;
@@ -2813,10 +3277,10 @@ union ixgbe_adv_tx_desc {
/* Receive Descriptor - Legacy */
struct ixgbe_legacy_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- uint8_t status; /* Descriptor status */
- uint8_t errors; /* Descriptor Errors */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ uint8_t status; /* Descriptor status */
+ uint8_t errors; /* Descriptor Errors */
__le16 vlan;
};
@@ -2899,6 +3363,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */
#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */
#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
@@ -2921,12 +3386,14 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */
#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */
#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */
-
+/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */
+#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26
/* Autonegotiation advertised speeds */
typedef uint32_t ixgbe_autoneg_advertised;
/* Link speed */
typedef uint32_t ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_10_FULL 0x0002
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
@@ -2939,24 +3406,26 @@ typedef uint32_t ixgbe_link_speed;
IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
-typedef uint32_t ixgbe_physical_layer;
+typedef uint64_t ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_LX 0x8000
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_LX 0x20000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
@@ -3103,6 +3572,37 @@ union ixgbe_atr_hash_dword {
};
+#define IXGBE_MVALS_INIT(m) \
+ IXGBE_CAT(EEC, m), \
+ IXGBE_CAT(FLA, m), \
+ IXGBE_CAT(GRC, m), \
+ IXGBE_CAT(SRAMREL, m), \
+ IXGBE_CAT(FACTPS, m), \
+ IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
+ IXGBE_CAT(FWSM, m), \
+ IXGBE_CAT(SDP0_GPIEN, m), \
+ IXGBE_CAT(SDP1_GPIEN, m), \
+ IXGBE_CAT(SDP2_GPIEN, m), \
+ IXGBE_CAT(EICR_GPI_SDP0, m), \
+ IXGBE_CAT(EICR_GPI_SDP1, m), \
+ IXGBE_CAT(EICR_GPI_SDP2, m), \
+ IXGBE_CAT(CIAA, m), \
+ IXGBE_CAT(CIAD, m), \
+ IXGBE_CAT(I2C_CLK_IN, m), \
+ IXGBE_CAT(I2C_CLK_OUT, m), \
+ IXGBE_CAT(I2C_DATA_IN, m), \
+ IXGBE_CAT(I2C_DATA_OUT, m), \
+ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \
+ IXGBE_CAT(I2C_BB_EN, m), \
+ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \
+ IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+ IXGBE_MVALS_INIT(_IDX),
+ IXGBE_MVALS_IDX_LIMIT
+};
+
/*
* Unavailable: The FCoE Boot Option ROM is not present in the flash.
* Disabled: Present; boot order is not set for any targets on the port.
@@ -3130,8 +3630,10 @@ enum ixgbe_mac_type {
ixgbe_mac_X540_vf,
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
+ ixgbe_mac_X550EM_a,
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
+ ixgbe_mac_X550EM_a_vf,
ixgbe_num_macs
};
@@ -3279,7 +3781,8 @@ struct ixgbe_bus_info {
enum ixgbe_bus_type type;
uint16_t func;
- uint16_t lan_id;
+ uint8_t lan_id;
+ uint16_t instance_id;
};
/* Flow control parameters */
@@ -3379,9 +3882,8 @@ struct ixgbe_hw_stats {
struct ixgbe_hw;
/* iterator type for walking multicast address lists */
-typedef uint8_t* (*ixgbe_mc_addr_itr)(struct ixgbe_hw *hw,
- uint8_t **mc_addr_ptr,
- uint32_t *vmdq);
+typedef uint8_t* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, uint8_t **mc_addr_ptr,
+ uint32_t *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
@@ -3399,10 +3901,12 @@ struct ixgbe_mac_operations {
int32_t (*start_hw)(struct ixgbe_hw *);
int32_t (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- uint32_t (*get_supported_physical_layer)(struct ixgbe_hw *);
+ uint64_t (*get_supported_physical_layer)(struct ixgbe_hw *);
int32_t (*get_mac_addr)(struct ixgbe_hw *, uint8_t *);
+ int32_t (*get_device_caps)(struct ixgbe_hw *, uint16_t *);
int32_t (*stop_adapter)(struct ixgbe_hw *);
int32_t (*get_bus_info)(struct ixgbe_hw *);
+ int32_t (*negotiate_api_version)(struct ixgbe_hw *, int);
void (*set_lan_id)(struct ixgbe_hw *);
int32_t (*read_analog_reg8)(struct ixgbe_hw*, uint32_t, uint8_t*);
int32_t (*write_analog_reg8)(struct ixgbe_hw*, uint32_t, uint8_t);
@@ -3412,6 +3916,7 @@ struct ixgbe_mac_operations {
int32_t (*enable_sec_rx_path)(struct ixgbe_hw *);
int32_t (*acquire_swfw_sync)(struct ixgbe_hw *, uint32_t);
void (*release_swfw_sync)(struct ixgbe_hw *, uint32_t);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
int32_t (*prot_autoc_read)(struct ixgbe_hw *, bool *, uint32_t *);
int32_t (*prot_autoc_write)(struct ixgbe_hw *, uint32_t, bool);
@@ -3423,7 +3928,7 @@ struct ixgbe_mac_operations {
int32_t (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
int32_t (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
int32_t (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
+ bool *);
void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
/* LED */
@@ -3434,18 +3939,22 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
int32_t (*set_rar)(struct ixgbe_hw *, uint32_t, uint8_t *, uint32_t, uint32_t);
+ int32_t (*set_uc_addr)(struct ixgbe_hw *, uint32_t, uint8_t *);
int32_t (*clear_rar)(struct ixgbe_hw *, uint32_t);
int32_t (*insert_mac_addr)(struct ixgbe_hw *, uint8_t *, uint32_t);
int32_t (*set_vmdq)(struct ixgbe_hw *, uint32_t, uint32_t);
int32_t (*clear_vmdq)(struct ixgbe_hw *, uint32_t, uint32_t);
int32_t (*init_rx_addrs)(struct ixgbe_hw *);
int32_t (*update_mc_addr_list)(struct ixgbe_hw *, uint8_t *, uint32_t,
- ixgbe_mc_addr_itr, bool clear);
+ ixgbe_mc_addr_itr, bool clear);
+ int32_t (*update_xcast_mode)(struct ixgbe_hw *, int);
int32_t (*enable_mc)(struct ixgbe_hw *);
int32_t (*disable_mc)(struct ixgbe_hw *);
int32_t (*clear_vfta)(struct ixgbe_hw *);
- int32_t (*set_vfta)(struct ixgbe_hw *, uint32_t, uint32_t, bool);
- int32_t (*set_vlvf)(struct ixgbe_hw *, uint32_t, uint32_t, bool, bool *);
+ int32_t (*set_vfta)(struct ixgbe_hw *, uint32_t, uint32_t, bool, bool);
+ int32_t (*set_vlvf)(struct ixgbe_hw *, uint32_t, uint32_t, bool, uint32_t *, uint32_t,
+ bool);
+ int32_t (*set_rlpml)(struct ixgbe_hw *, uint16_t);
int32_t (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
@@ -3453,13 +3962,20 @@ struct ixgbe_mac_operations {
/* Flow Control */
int32_t (*fc_enable)(struct ixgbe_hw *);
int32_t (*setup_fc)(struct ixgbe_hw *);
+ void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
-
- /* Misc */
- void (*stop_mac_link_on_d3)(struct ixgbe_hw *);
+ void (*stop_mac_link_on_d3)(struct ixgbe_hw *);
+ void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
+ unsigned int);
+ int32_t (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ int32_t (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ int32_t (*dmac_config)(struct ixgbe_hw *hw);
+ int32_t (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee);
+ int32_t (*read_iosf_sb_reg)(struct ixgbe_hw *, uint32_t, uint32_t, uint32_t *);
+ int32_t (*write_iosf_sb_reg)(struct ixgbe_hw *, uint32_t, uint32_t, uint32_t);
};
struct ixgbe_phy_operations {
@@ -3481,19 +3997,36 @@ struct ixgbe_phy_operations {
int32_t (*read_i2c_eeprom)(struct ixgbe_hw *, uint8_t , uint8_t *);
int32_t (*write_i2c_eeprom)(struct ixgbe_hw *, uint8_t, uint8_t);
void (*i2c_bus_clear)(struct ixgbe_hw *);
- int32_t (*read_i2c_combined)(struct ixgbe_hw *, uint8_t addr, uint16_t reg, uint16_t *val);
+ /*depreatced*/
+ int32_t (*read_i2c_combined)(struct ixgbe_hw *, uint8_t addr, uint16_t reg, uint16_t *val);
int32_t (*write_i2c_combined)(struct ixgbe_hw *, uint8_t addr, uint16_t reg, uint16_t val);
- int32_t (*check_overtemp)(struct ixgbe_hw *);
- int32_t (*set_phy_power)(struct ixgbe_hw *, bool on);
- int32_t (*handle_lasi)(struct ixgbe_hw *hw);
int32_t (*read_i2c_combined_unlocked)(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
uint16_t *value);
int32_t (*write_i2c_combined_unlocked)(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
uint16_t value);
+ /**/
+ int32_t (*check_overtemp)(struct ixgbe_hw *);
+ int32_t (*set_phy_power)(struct ixgbe_hw *, bool on);
+ int32_t (*enter_lplu)(struct ixgbe_hw *);
+ int32_t (*handle_lasi)(struct ixgbe_hw *hw);
int32_t (*read_i2c_byte_unlocked)(struct ixgbe_hw *, uint8_t offset, uint8_t addr,
- uint8_t *value);
+ uint8_t *value);
int32_t (*write_i2c_byte_unlocked)(struct ixgbe_hw *, uint8_t offset, uint8_t addr,
- uint8_t value);
+ uint8_t value);
+};
+
+struct ixgbe_link_operations {
+ int32_t (*read_link)(struct ixgbe_hw *, uint8_t addr, uint16_t reg, uint16_t *val);
+ int32_t (*read_link_unlocked)(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
+ uint16_t *val);
+ int32_t (*write_link)(struct ixgbe_hw *, uint8_t addr, uint16_t reg, uint16_t val);
+ int32_t (*write_link_unlocked)(struct ixgbe_hw *, uint8_t addr, uint16_t reg,
+ uint16_t val);
+};
+
+struct ixgbe_link_info {
+ struct ixgbe_link_operations ops;
+ uint8_t addr;
};
struct ixgbe_eeprom_info {
@@ -3525,11 +4058,13 @@ struct ixgbe_mac_info {
uint32_t orig_autoc;
bool get_link_status;
uint32_t orig_autoc2;
- uint32_t max_msix_vectors;
- int msix_vectors_from_pcie;
+ uint16_t max_msix_vectors;
+ bool arc_subsystem_valid;
bool orig_link_settings_stored;
bool autotry_restart;
uint8_t flags;
+ struct ixgbe_dmac_config dmac_config;
+ bool set_lben;
uint32_t max_link_up_time;
};
@@ -3546,6 +4081,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -3554,7 +4091,6 @@ struct ixgbe_phy_info {
uint32_t nw_mng_if_sel;
};
-/* MBX */
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
#define IXGBE_ERR_MBX -100
@@ -3598,6 +4134,23 @@ struct ixgbe_phy_info {
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
@@ -3611,6 +4164,19 @@ struct ixgbe_phy_info {
/* mailbox API, version 1.1 VF requests */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+/* mailbox API, version 1.2 VF requests */
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+
+/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
+};
+
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
@@ -3640,7 +4206,6 @@ struct ixgbe_phy_info {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-/* end MBX */
struct ixgbe_mbx_operations {
void (*init_params)(struct ixgbe_hw *hw);
@@ -3678,9 +4243,11 @@ struct ixgbe_hw {
struct ixgbe_addr_filter_info addr_ctrl;
struct ixgbe_fc_info fc;
struct ixgbe_phy_info phy;
+ struct ixgbe_link_info link;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
struct ixgbe_mbx_info mbx;
+ const uint32_t *mvals;
uint16_t device_id;
uint16_t vendor_id;
uint16_t subsystem_device_id;
@@ -3689,8 +4256,15 @@ struct ixgbe_hw {
bool adapter_stopped;
int api_version;
bool force_full_reset;
+ bool allow_unsupported_sfp;
+ bool wol_enabled;
+ bool need_crosstalk_fix;
};
+#define ixgbe_call_func(hw, func, params, error) \
+ (func != NULL) ? func params : error
+
+
/* Error Codes */
#define IXGBE_SUCCESS 0
#define IXGBE_ERR_EEPROM -1
@@ -3726,43 +4300,176 @@ struct ixgbe_hw {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_OUT_OF_MEM -34
+#define IXGBE_BYPASS_FW_WRITE_FAILURE -35
#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36
#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+#define IXGBE_ERR_FW_RESP_INVALID -39
+#define IXGBE_ERR_TOKEN_RETRY -40
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define BYPASS_PAGE_CTL0 0x00000000
+#define BYPASS_PAGE_CTL1 0x40000000
+#define BYPASS_PAGE_CTL2 0x80000000
+#define BYPASS_PAGE_M 0xc0000000
+#define BYPASS_WE 0x20000000
+
+#define BYPASS_AUTO 0x0
+#define BYPASS_NOP 0x0
+#define BYPASS_NORM 0x1
+#define BYPASS_BYPASS 0x2
+#define BYPASS_ISOLATE 0x3
+
+#define BYPASS_EVENT_MAIN_ON 0x1
+#define BYPASS_EVENT_AUX_ON 0x2
+#define BYPASS_EVENT_MAIN_OFF 0x3
+#define BYPASS_EVENT_AUX_OFF 0x4
+#define BYPASS_EVENT_WDT_TO 0x5
+#define BYPASS_EVENT_USR 0x6
+
+#define BYPASS_MODE_OFF_M 0x00000003
+#define BYPASS_STATUS_OFF_M 0x0000000c
+#define BYPASS_AUX_ON_M 0x00000030
+#define BYPASS_MAIN_ON_M 0x000000c0
+#define BYPASS_MAIN_OFF_M 0x00000300
+#define BYPASS_AUX_OFF_M 0x00000c00
+#define BYPASS_WDTIMEOUT_M 0x00003000
+#define BYPASS_WDT_ENABLE_M 0x00004000
+#define BYPASS_WDT_VALUE_M 0x00070000
+
+#define BYPASS_MODE_OFF_SHIFT 0
+#define BYPASS_STATUS_OFF_SHIFT 2
+#define BYPASS_AUX_ON_SHIFT 4
+#define BYPASS_MAIN_ON_SHIFT 6
+#define BYPASS_MAIN_OFF_SHIFT 8
+#define BYPASS_AUX_OFF_SHIFT 10
+#define BYPASS_WDTIMEOUT_SHIFT 12
+#define BYPASS_WDT_ENABLE_SHIFT 14
+#define BYPASS_WDT_TIME_SHIFT 16
+
+#define BYPASS_WDT_1 0x0
+#define BYPASS_WDT_1_5 0x1
+#define BYPASS_WDT_2 0x2
+#define BYPASS_WDT_3 0x3
+#define BYPASS_WDT_4 0x4
+#define BYPASS_WDT_8 0x5
+#define BYPASS_WDT_16 0x6
+#define BYPASS_WDT_32 0x7
+#define BYPASS_WDT_OFF 0xffff
+
+#define BYPASS_CTL1_TIME_M 0x01ffffff
+#define BYPASS_CTL1_VALID_M 0x02000000
+#define BYPASS_CTL1_OFFTRST_M 0x04000000
+#define BYPASS_CTL1_WDT_PET_M 0x08000000
+
+#define BYPASS_CTL1_VALID 0x02000000
+#define BYPASS_CTL1_OFFTRST 0x04000000
+#define BYPASS_CTL1_WDT_PET 0x08000000
+
+#define BYPASS_CTL2_DATA_M 0x000000ff
+#define BYPASS_CTL2_OFFSET_M 0x0000ff00
+#define BYPASS_CTL2_RW_M 0x00010000
+#define BYPASS_CTL2_HEAD_M 0x0ff00000
+
+#define BYPASS_CTL2_OFFSET_SHIFT 8
+#define BYPASS_CTL2_HEAD_SHIFT 20
+
+#define BYPASS_CTL2_RW 0x00010000
+
+struct ixgbe_bypass_eeprom {
+ uint32_t logs;
+ uint32_t clear_off;
+ uint8_t actions;
+};
+
+#define BYPASS_MAX_LOGS 43
+#define BYPASS_LOG_SIZE 5
+#define BYPASS_LOG_LINE_SIZE 37
+
+#define BYPASS_EEPROM_VER_ADD 0x02
+
+#define BYPASS_LOG_TIME_M 0x01ffffff
+#define BYPASS_LOG_TIME_VALID_M 0x02000000
+#define BYPASS_LOG_HEAD_M 0x04000000
+#define BYPASS_LOG_CLEAR_M 0x08000000
+#define BYPASS_LOG_EVENT_M 0xf0000000
+#define BYPASS_LOG_ACTION_M 0x03
+
+#define BYPASS_LOG_EVENT_SHIFT 28
+#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
+
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ (1 << 5)
-#define IXGBE_FUSES0_REV1 (1 << 6)
+#define IXGBE_FUSES0_REV_MASK (3 << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
+#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31)
+
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
+#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1)
+#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2)
+#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2)
+#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3)
+#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29)
+#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1)
+
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11)
+
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19)
#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
@@ -3796,7 +4503,19 @@ struct ixgbe_hw {
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_NW_MNG_IF_SEL 0x00011178
-#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2)
+#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21)
+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25)
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
/* PHY */
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
@@ -3854,8 +4573,12 @@ struct ixgbe_hw {
#define IXGBE_CS4227 0xBE /* CS4227 address */
#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -3908,18 +4631,13 @@ struct ixgbe_hw {
#ifndef IXGBE_SFP_DETECT_RETRIES
#define IXGBE_SFP_DETECT_RETRIES 10
-#endif
+#endif /* IXGBE_SFP_DETECT_RETRIES */
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-#define IXGBE_SFF_SFF_8472_REV_9_3 0x01
-#define IXGBE_SFF_SFF_8472_REV_9_5 0x02
-#define IXGBE_SFF_SFF_8472_REV_10_2 0x03
-#define IXGBE_SFF_SFF_8472_REV_10_4 0x04
-#define IXGBE_SFF_SFF_8472_REV_11_0 0x05
/* end PHY */
diff --git a/sys/dev/pci/ixgbe_x540.c b/sys/dev/pci/ixgbe_x540.c
index 4583ff10ac8..8c0cf9d6ae3 100644
--- a/sys/dev/pci/ixgbe_x540.c
+++ b/sys/dev/pci/ixgbe_x540.c
@@ -1,8 +1,9 @@
-/* $OpenBSD: ixgbe_x540.c,v 1.11 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe_x540.c,v 1.12 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,7 +33,7 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x540.c 295093 2016-01-31 15:14:23Z smh $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x540.c 331224 2018-03-19 20:55:05Z erj $*/
#include <dev/pci/ixgbe.h>
#include <dev/pci/ixgbe_type.h>
@@ -49,12 +50,14 @@ int32_t ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
int32_t ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+int32_t ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed, bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
int32_t ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool link_up_wait_to_complete);
int32_t ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
int32_t ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-uint32_t ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+uint64_t ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
int32_t ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
int32_t ixgbe_read_eerd_X540(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data);
@@ -65,6 +68,7 @@ int32_t ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
int32_t ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int32_t ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, uint32_t index);
int32_t ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, uint32_t index);
@@ -109,8 +113,10 @@ int32_t ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.read_analog_reg8 = NULL;
mac->ops.write_analog_reg8 = NULL;
mac->ops.start_hw = ixgbe_start_hw_X540;
+ mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+ mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540;
mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
@@ -120,6 +126,7 @@ int32_t ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = ixgbe_set_vfta_generic;
+ mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
@@ -137,6 +144,14 @@ int32_t ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = 0 /*ixgbe_get_pcie_msix_count_generic(hw)*/;
+ /*
+ * FWSM register
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
+ & IXGBE_FWSM_MODE_MASK);
+
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
/* LEDs */
@@ -147,6 +162,23 @@ int32_t ixgbe_init_ops_X540(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: TRUE when autoneg or autotry is enabled
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ **/
+int32_t ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_get_media_type_X540 - Get media type
* @hw: pointer to hardware structure
*
@@ -183,6 +215,7 @@ int32_t ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
int32_t status;
uint32_t ctrl, i;
+ uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X540");
@@ -195,10 +228,17 @@ int32_t ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
@@ -210,7 +250,8 @@ mac_reset_top:
if (ctrl & IXGBE_CTRL_RST_MASK) {
status = IXGBE_ERR_RESET_FAILED;
- DEBUGOUT("Reset polling failed to complete.\n");
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
}
msec_delay(100);
@@ -272,9 +313,9 @@ out:
*
* Determines physical layer capabilities of the current configuration.
**/
-uint32_t ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+uint64_t ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
{
- uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ uint64_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
uint16_t ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
@@ -310,7 +351,7 @@ int32_t ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
eeprom_size = (uint16_t)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -388,7 +429,6 @@ int32_t ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
uint16_t length = 0;
uint16_t pointer = 0;
uint16_t word = 0;
- uint16_t checksum_last_word = IXGBE_EEPROM_CHECKSUM;
uint16_t ptr_start = IXGBE_PCIE_ANALOG_PTR;
/* Do not use hw->eeprom.ops.read because we do not want to take
@@ -398,14 +438,15 @@ int32_t ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
- /* Include 0x0-0x3F in the checksum */
- for (i = 0; i <= checksum_last_word; i++) {
+ /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
+ * checksum itself
+ */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
- if (i != IXGBE_EEPROM_CHECKSUM)
- checksum += word;
+ checksum += word;
}
/* Include all data from pointers 0x3, 0x6-0xE. This excludes the
@@ -497,7 +538,8 @@ int32_t ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
* calculated checksum
*/
if (read_checksum != checksum) {
- DEBUGOUT("Invalid EEPROM checksum\n");
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
status = IXGBE_ERR_EEPROM_CHECKSUM;
}
@@ -580,8 +622,8 @@ int32_t ixgbe_update_flash_X540(struct ixgbe_hw *hw)
goto out;
}
- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_SUCCESS)
@@ -590,11 +632,11 @@ int32_t ixgbe_update_flash_X540(struct ixgbe_hw *hw)
DEBUGOUT("Flash update time out\n");
if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
- flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
@@ -623,13 +665,18 @@ int32_t ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
if (reg & IXGBE_EEC_FLUDONE) {
status = IXGBE_SUCCESS;
break;
}
msec_delay(5);
}
+
+ if (i == IXGBE_FLUDONE_ATTEMPTS)
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Flash update status polling timed out");
+
return status;
}
@@ -662,19 +709,24 @@ int32_t ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask)
swmask |= swi2c_mask;
fwmask |= swi2c_mask << 2;
+ if (hw->mac.type >= ixgbe_mac_X550)
+ timeout = 1000;
+
for (i = 0; i < timeout; i++) {
/* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw),
+ swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- msec_delay(5);
return IXGBE_SUCCESS;
}
/* Firmware currently using resource (fwmask), hardware
@@ -685,22 +737,19 @@ int32_t ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask)
msec_delay(5);
}
- /* Failed to get SW only semaphore */
- if (swmask == IXGBE_GSSR_SW_MNG_SM) {
- return IXGBE_ERR_SWFW_SYNC;
- }
-
/* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should set the SW bit(s)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ }
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
return IXGBE_SUCCESS;
@@ -712,15 +761,18 @@ int32_t ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask)
*/
if (swfw_sync & swmask) {
uint32_t rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
- IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
@@ -744,12 +796,12 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask)
swmask |= mask & IXGBE_GSSR_I2C_MASK;
ixgbe_get_swfw_sync_semaphore(hw);
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
swfw_sync &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- msec_delay(5);
+ msec_delay(2);
}
/**
@@ -773,7 +825,7 @@ int32_t ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = IXGBE_SUCCESS;
break;
@@ -784,7 +836,7 @@ int32_t ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the REGSMP bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swsm & IXGBE_SWFW_REGSMP))
break;
@@ -796,14 +848,15 @@ int32_t ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
- DEBUGOUT("REGSMP Software NVM semaphore not "
- "granted.\n");
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "REGSMP Software NVM semaphore not granted.\n");
ixgbe_release_swfw_sync_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
- DEBUGOUT("Software semaphore SMBI between device drivers "
- "not granted.\n");
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Software semaphore SMBI between device drivers "
+ "not granted.\n");
}
return status;
@@ -823,18 +876,48 @@ void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
swsm &= ~IXGBE_SWFW_REGSMP;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm);
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ uint32_t rmask;
+
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+
+ /* Acquire and release all software resources. */
+ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
+
+ rmask |= IXGBE_GSSR_I2C_MASK;
+ ixgbe_acquire_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_X540(hw, rmask);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
@@ -851,6 +934,9 @@ int32_t ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, uint32_t index)
DEBUGFUNC("ixgbe_blink_led_start_X540");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/*
* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
@@ -885,6 +971,9 @@ int32_t ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, uint32_t index)
uint32_t macc_reg;
uint32_t ledctl_reg;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
DEBUGFUNC("ixgbe_blink_led_stop_X540");
/* Restore the LED to its default value. */
diff --git a/sys/dev/pci/ixgbe_x550.c b/sys/dev/pci/ixgbe_x550.c
index ac762fe380d..60c071b7643 100644
--- a/sys/dev/pci/ixgbe_x550.c
+++ b/sys/dev/pci/ixgbe_x550.c
@@ -1,8 +1,8 @@
-/* $OpenBSD: ixgbe_x550.c,v 1.6 2020/02/28 05:22:53 deraadt Exp $ */
+/* $OpenBSD: ixgbe_x550.c,v 1.7 2020/03/02 01:59:01 jmatthew Exp $ */
/******************************************************************************
- Copyright (c) 2001-2015, Intel Corporation
+ Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,7 +32,7 @@
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 295093 2016-01-31 15:14:23Z smh $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x550.c 333870 2018-05-19 05:57:26Z mmacy $*/
#include <dev/pci/ixgbe.h>
#include <dev/pci/ixgbe_type.h>
@@ -41,6 +41,18 @@ extern int32_t ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
extern int32_t ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask);
extern void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, uint32_t mask);
+int32_t ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, uint32_t mask);
+void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, uint32_t mask);
+int32_t ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
+
+int32_t ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+
+int32_t ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
+int32_t ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
+int32_t ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
+
int32_t ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw);
int32_t ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw);
int32_t ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw);
@@ -57,14 +69,20 @@ int32_t ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
uint16_t *data);
int32_t ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, uint16_t offset,
uint16_t *data);
-int32_t ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, uint16_t offset,
- uint16_t *data);
int32_t ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, uint16_t offset,
uint16_t data);
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool);
int32_t ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, uint32_t reg_addr,
uint32_t device_type, uint32_t data);
int32_t ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, uint32_t reg_addr,
uint32_t device_type, uint32_t *data);
+int32_t ixgbe_get_phy_token(struct ixgbe_hw *);
+int32_t ixgbe_put_phy_token(struct ixgbe_hw *);
+int32_t ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint32_t data);
+int32_t ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint32_t *data);
enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw);
int32_t ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw);
int32_t ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
@@ -77,15 +95,26 @@ int32_t ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
int32_t ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
int32_t ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
int32_t ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
-uint32_t ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+uint64_t ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
int32_t ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
+int32_t ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
int32_t ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, uint32_t mask);
void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, uint32_t mask);
int32_t ixgbe_setup_fc_X550em(struct ixgbe_hw *hw);
int32_t ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+int32_t ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint16_t *phy_data);
+int32_t ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint16_t phy_data);
+int32_t ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw);
+int32_t ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw);
+int32_t ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw);
int32_t ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
int32_t ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -99,6 +128,7 @@ int32_t ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, uint32_t led_idx);
int32_t ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
+
/**
* ixgbe_init_ops_X550 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
@@ -115,6 +145,12 @@ int32_t ixgbe_init_ops_X550(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_ops_X550");
ret_val = ixgbe_init_ops_X540(hw);
+ mac->ops.dmac_config = ixgbe_dmac_config_X550;
+ mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
+ mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
+ mac->ops.setup_eee = NULL;
+ mac->ops.set_source_address_pruning =
+ ixgbe_set_source_address_pruning_X550;
eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
@@ -124,9 +160,18 @@ int32_t ixgbe_init_ops_X550(struct ixgbe_hw *hw)
eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
mac->ops.disable_rx = ixgbe_disable_rx_x550;
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->mac.ops.led_on = NULL;
+ hw->mac.ops.led_off = NULL;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
hw->mac.ops.led_on = ixgbe_led_on_t_X550em;
hw->mac.ops.led_off = ixgbe_led_off_t_X550em;
+ break;
+ default:
+ break;
}
return ret_val;
}
@@ -141,8 +186,7 @@ int32_t ixgbe_init_ops_X550(struct ixgbe_hw *hw)
**/
int32_t ixgbe_read_cs4227(struct ixgbe_hw *hw, uint16_t reg, uint16_t *value)
{
- return hw->phy.ops.read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
- value);
+ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
/**
@@ -155,8 +199,7 @@ int32_t ixgbe_read_cs4227(struct ixgbe_hw *hw, uint16_t reg, uint16_t *value)
**/
int32_t ixgbe_write_cs4227(struct ixgbe_hw *hw, uint16_t reg, uint16_t value)
{
- return hw->phy.ops.write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg,
- value);
+ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
/**
@@ -169,11 +212,14 @@ int32_t ixgbe_write_cs4227(struct ixgbe_hw *hw, uint16_t reg, uint16_t value)
**/
int32_t ixgbe_read_pe(struct ixgbe_hw *hw, uint8_t reg, uint8_t *value)
{
- int32_t status;
+ int32_t status = IXGBE_NOT_IMPLEMENTED;
- status = hw->phy.ops.read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
+ if (hw->phy.ops.read_i2c_byte_unlocked)
+ status = hw->phy.ops.read_i2c_byte_unlocked(hw, reg, IXGBE_PE,
+ value);
if (status != IXGBE_SUCCESS)
- DEBUGOUT1("port expander access failed with %d\n", status);
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "port expander access failed with %d\n", status);
return status;
}
@@ -187,11 +233,14 @@ int32_t ixgbe_read_pe(struct ixgbe_hw *hw, uint8_t reg, uint8_t *value)
**/
int32_t ixgbe_write_pe(struct ixgbe_hw *hw, uint8_t reg, uint8_t value)
{
- int32_t status;
+ int32_t status = IXGBE_NOT_IMPLEMENTED;
- status = hw->phy.ops.write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
+ if (hw->phy.ops.write_i2c_byte_unlocked)
+ status = hw->phy.ops.write_i2c_byte_unlocked(hw, reg, IXGBE_PE,
+ value);
if (status != IXGBE_SUCCESS)
- DEBUGOUT1("port expander access failed with %d\n", status);
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "port expander access failed with %d\n", status);
return status;
}
@@ -255,14 +304,16 @@ int32_t ixgbe_reset_cs4227(struct ixgbe_hw *hw)
msec_delay(IXGBE_CS4227_CHECK_DELAY);
}
if (retry == IXGBE_CS4227_RETRIES) {
- DEBUGOUT("CS4227 reset did not complete.\n");
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 reset did not complete.");
return IXGBE_ERR_PHY;
}
status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
if (status != IXGBE_SUCCESS ||
!(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
- DEBUGOUT("CS4227 EEPROM did not load successfully.\n");
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 EEPROM did not load successfully.");
return IXGBE_ERR_PHY;
}
@@ -283,7 +334,8 @@ void ixgbe_check_cs4227(struct ixgbe_hw *hw)
for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
- DEBUGOUT1("semaphore failed with %d\n", status);
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
msec_delay(IXGBE_CS4227_CHECK_DELAY);
continue;
}
@@ -308,7 +360,8 @@ void ixgbe_check_cs4227(struct ixgbe_hw *hw)
if (retry == IXGBE_CS4227_RETRIES) {
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
- DEBUGOUT1("semaphore failed with %d\n", status);
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
return;
}
}
@@ -316,7 +369,8 @@ void ixgbe_check_cs4227(struct ixgbe_hw *hw)
/* Reset the CS4227. */
status = ixgbe_reset_cs4227(hw);
if (status != IXGBE_SUCCESS) {
- DEBUGOUT1("CS4227 reset failed: %d\n", status);
+ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+ "CS4227 reset failed: %d", status);
goto out;
}
@@ -329,7 +383,8 @@ void ixgbe_check_cs4227(struct ixgbe_hw *hw)
msec_delay(10);
status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
if (status != IXGBE_SUCCESS) {
- DEBUGOUT1("semaphore failed with %d\n", status);
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
return;
}
@@ -367,39 +422,180 @@ void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*/
int32_t ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
- int32_t ret_val;
+ hw->mac.ops.set_lan_id(hw);
+
+ ixgbe_read_mng_if_sel_x550em(hw);
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
- hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
+ /* Fallthrough */
- ret_val = ixgbe_identify_module_generic(hw);
-
- /* Set PHY type none if no SFP detected */
- if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) {
- hw->phy.type = ixgbe_phy_none;
- return IXGBE_SUCCESS;
- }
- return ret_val;
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ return ixgbe_identify_module_generic(hw);
break;
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
- case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ hw->phy.type = ixgbe_phy_ext_1g_t;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+int32_t ixgbe_fw_phy_activity(struct ixgbe_hw *hw, uint16_t activity,
+ uint32_t (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ uint16_t retries = FW_PHY_ACT_RETRIES;
+ int32_t rc;
+ uint16_t i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = htole16(activity);
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ hic.cmd.data[i] = htobe32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, (uint32_t *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ TRUE);
+ if (rc != IXGBE_SUCCESS)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = betoh32(hic.rsp.data[i]);
+ return IXGBE_SUCCESS;
+ }
+ usec_delay(20);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ uint16_t fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+int32_t ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ uint32_t info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ uint16_t phy_speeds;
+ uint16_t phy_id_lo;
+ int32_t rc;
+ uint16_t i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+int32_t ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+int32_t ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ uint32_t setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
int32_t ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, uint32_t reg_addr,
uint32_t device_type, uint16_t *phy_data)
{
@@ -449,6 +645,7 @@ int32_t ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_internal;
mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
+
mac->ops.get_media_type = ixgbe_get_media_type_X550em;
mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
@@ -461,12 +658,25 @@ int32_t ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
- mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
- mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
-
/* PHY */
phy->ops.init = ixgbe_init_phy_ops_X550em;
- phy->ops.identify = ixgbe_identify_phy_x550em;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_fw;
+ phy->ops.set_phy_power = NULL;
+ phy->ops.get_firmware_version = NULL;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ phy->ops.set_phy_power = NULL;
+ break;
+ default:
+ phy->ops.identify = ixgbe_identify_phy_x550em;
+ }
+
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
phy->ops.set_phy_power = NULL;
@@ -483,6 +693,312 @@ int32_t ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+int32_t ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ uint32_t setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ int32_t rc;
+ uint16_t i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+int32_t ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_setup_eee_fw - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * This function controls EEE for firmware-based PHY implementations.
+ */
+int32_t ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
+{
+ if (!!hw->phy.eee_speeds_advertised == enable_eee)
+ return IXGBE_SUCCESS;
+ if (enable_eee)
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ else
+ hw->phy.eee_speeds_advertised = 0;
+ return hw->phy.ops.setup_link(hw);
+}
+
+/**
+* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM_a.
+* Does not touch the hardware.
+**/
+int32_t ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ int32_t ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM_a");
+
+ /* Start with generic X550EM init */
+ ret_val = ixgbe_init_ops_X550EM(hw);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ } else {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
+ }
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
+
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ mac->ops.setup_fc = NULL;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
+ break;
+ case ixgbe_media_type_backplane:
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
+ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
+ break;
+ default:
+ break;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_eee = ixgbe_setup_eee_fw;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ break;
+ default:
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM_x.
+* Does not touch the hardware.
+**/
+int32_t ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_link_info *link = &hw->link;
+ int32_t ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM_x");
+
+ /* Start with generic X550EM init */
+ ret_val = ixgbe_init_ops_X550EM(hw);
+
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
+ link->ops.read_link = ixgbe_read_i2c_combined_generic;
+ link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
+ link->ops.write_link = ixgbe_write_i2c_combined_generic;
+ link->ops.write_link_unlocked =
+ ixgbe_write_i2c_combined_generic_unlocked;
+ link->addr = IXGBE_CS4227;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) {
+ mac->ops.setup_fc = NULL;
+ mac->ops.setup_eee = NULL;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_dmac_config_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing. If enabling dmac, dmac is activated.
+ * When disabling dmac, dmac enable dmac bit is cleared.
+ **/
+int32_t ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
+{
+ uint32_t reg;
+
+ DEBUGFUNC("ixgbe_dmac_config_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ /* Disable DMA Coalescing if the watchdog timer is 0 */
+ if (!hw->mac.dmac_config.watchdog_timer)
+ goto out;
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Configure DMA Coalescing Control Register */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+
+ /* Set the watchdog timer in units of 40.96 usec */
+ reg &= ~IXGBE_DMACR_DMACWT_MASK;
+ reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
+
+ reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
+ reg |= IXGBE_DMACR_EN_MNG_IND;
+
+ /* Enable DMA coalescing after configuration */
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+out:
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_config_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Configure DMA coalescing threshold per TC. The dmac enable bit must
+ * be cleared before configuring.
+ **/
+int32_t ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
+{
+ uint32_t tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
+
+ DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
+
+ /* Configure DMA coalescing enabled */
+ switch (hw->mac.dmac_config.link_speed) {
+ case IXGBE_LINK_SPEED_10_FULL:
+ case IXGBE_LINK_SPEED_100_FULL:
+ pb_headroom = IXGBE_DMACRXT_100M;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ pb_headroom = IXGBE_DMACRXT_1G;
+ break;
+ default:
+ pb_headroom = IXGBE_DMACRXT_10G;
+ break;
+ }
+
+ maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
+ IXGBE_MHADD_MFS_SHIFT) / 1024);
+
+ /* Set the per Rx packet buffer receive threshold */
+ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
+ reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
+
+ if (tc < hw->mac.dmac_config.num_tcs) {
+ /* Get Rx PB size */
+ rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
+ rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
+ IXGBE_RXPBSIZE_SHIFT;
+
+ /* Calculate receive buffer threshold in kilobytes */
+ if (rx_pb_size > pb_headroom)
+ rx_pb_size = rx_pb_size - pb_headroom;
+ else
+ rx_pb_size = 0;
+
+ /* Minimum of MFS shall be set for DMCTH */
+ reg |= (rx_pb_size > maxframe_size_kb) ?
+ rx_pb_size : maxframe_size_kb;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dmac_update_tcs_X550
+ * @hw: pointer to hardware structure
+ *
+ * Disables dmac, updates per TC settings, and then enables dmac.
+ **/
+int32_t ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
+{
+ uint32_t reg;
+
+ DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
+
+ /* Disable DMA coalescing before configuring */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg &= ~IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ ixgbe_dmac_config_tcs_X550(hw);
+
+ /* Enable DMA coalescing after configuration */
+ reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
+ reg |= IXGBE_DMACR_DMAC_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
* @hw: pointer to hardware structure
*
@@ -515,6 +1031,33 @@ int32_t ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable source address pruning
+ * @pool: Rx pool to set source address pruning for
+ **/
+void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
+ unsigned int pool)
+{
+ uint64_t pfflp;
+
+ /* max rx pool is 63 */
+ if (pool > 63)
+ return;
+
+ pfflp = (uint64_t)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
+ pfflp |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
+
+ if (enable)
+ pfflp |= (1ULL << pool);
+ else
+ pfflp &= ~(1ULL << pool);
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (uint32_t)pfflp);
+ IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (uint32_t)(pfflp >> 32));
+}
+
+/**
* ixgbe_iosf_wait - Wait for IOSF command completion
* @hw: pointer to hardware structure
* @ctrl: pointer to location to receive final IOSF control value
@@ -540,7 +1083,7 @@ int32_t ixgbe_iosf_wait(struct ixgbe_hw *hw, uint32_t *ctrl)
if (ctrl)
*ctrl = command;
if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- DEBUGOUT( "Wait timed out\n");
+ ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n");
return IXGBE_ERR_PHY;
}
@@ -548,18 +1091,18 @@ int32_t ixgbe_iosf_wait(struct ixgbe_hw *hw, uint32_t *ctrl)
}
/**
- * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- * device
+ * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
+ * of the IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
int32_t ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, uint32_t reg_addr,
- uint32_t device_type, uint32_t data)
+ uint32_t device_type, uint32_t data)
{
uint32_t gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
- uint32_t command, error;
+ uint32_t command, error __unused;
int32_t ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
@@ -584,7 +1127,8 @@ int32_t ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, uint32_t reg_addr,
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
- DEBUGOUT1("Failed to write, error %x\n", error);
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to write, error %x\n", error);
ret = IXGBE_ERR_PHY;
}
@@ -594,18 +1138,17 @@ out:
}
/**
- * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- * device
+ * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 3 bit device type
- * @phy_data: Pointer to read data from the register
+ * @data: Pointer to read data from the register
**/
int32_t ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, uint32_t reg_addr,
- uint32_t device_type, uint32_t *data)
+ uint32_t device_type, uint32_t *data)
{
uint32_t gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
- uint32_t command, error;
+ uint32_t command, error __unused;
int32_t ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
@@ -627,7 +1170,8 @@ int32_t ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, uint32_t reg_addr,
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
- DEBUGOUT1("Failed to read, error %x\n", error);
+ ERROR_REPORT2(IXGBE_ERROR_POLLING,
+ "Failed to read, error %x\n", error);
ret = IXGBE_ERR_PHY;
}
@@ -640,6 +1184,138 @@ out:
}
/**
+ * ixgbe_get_phy_token - Get the token for shared phy access
+ * @hw: Pointer to hardware structure
+ */
+
+int32_t ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ int32_t status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REQ;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, (uint32_t *)&token_cmd,
+ sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ TRUE);
+ if (status) {
+ DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
+ status);
+ return status;
+ }
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return IXGBE_SUCCESS;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
+ DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
+ token_cmd.hdr.cmd_or_resp.ret_status);
+ return IXGBE_ERR_FW_RESP_INVALID;
+ }
+
+ DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
+ return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared phy access
+ * @hw: Pointer to hardware structure
+ */
+
+int32_t ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ int32_t status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REL;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, (uint32_t *)&token_cmd,
+ sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ TRUE);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return IXGBE_SUCCESS;
+
+ DEBUGOUT("Put PHY Token host interface command failed");
+ return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
+ * of the IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+int32_t ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint32_t data)
+{
+ struct ixgbe_hic_internal_phy_req write_cmd;
+ int32_t status;
+
+ memset(&write_cmd, 0, sizeof(write_cmd));
+ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ write_cmd.port_number = hw->bus.lan_id;
+ write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+ write_cmd.address = htobe16(reg_addr);
+ write_cmd.write_data = htobe32(data);
+
+ status = ixgbe_host_interface_command(hw, (uint32_t *)&write_cmd,
+ sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, FALSE);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+int32_t ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint32_t *data)
+{
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
+ int32_t status;
+
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = htobe16(reg_addr);
+
+ status = ixgbe_host_interface_command(hw, (uint32_t *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, TRUE);
+
+ /* Extract the register value from the response. */
+ *data = betoh32(hic.rsp.read_data);
+
+ return status;
+}
+
+/**
* ixgbe_get_media_type_X550em - Get media type
* @hw: pointer to hardware structure
*
@@ -655,13 +1331,30 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ media_type = ixgbe_media_type_backplane;
+ hw->phy.type = ixgbe_phy_sgmii;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
media_type = ixgbe_media_type_copper;
break;
default:
@@ -755,6 +1448,195 @@ int32_t ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
}
/**
+* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
+* internal PHY
+* @hw: pointer to hardware structure
+**/
+int32_t ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+ int32_t status;
+ uint32_t link_ctrl;
+
+ /* Restart auto-negotiation. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
+
+ if (status) {
+ DEBUGOUT("Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ uint32_t flx_mask_st20;
+
+ /* Indicate to FW that AN restart has been asserted */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
+
+ if (status) {
+ DEBUGOUT("Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: TRUE when waiting for completion is needed
+ */
+int32_t ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t lval, sval, flx_val;
+ int32_t rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
+ if (rc)
+ return rc;
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: TRUE when waiting for completion is needed
+ */
+int32_t ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t lval, sval, flx_val;
+ int32_t rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
* ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
*/
@@ -764,8 +1646,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
- switch (hw->mac.ops.get_media_type(hw)) {
- case ixgbe_media_type_fiber:
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
/* CS4227 does not support autoneg, so disable the laser control
* functions for SFP+ fiber
*/
@@ -773,17 +1655,43 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
+
+ if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
+ (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ else
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
- mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
- mac->ops.check_link = ixgbe_check_link_t_X550em;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
+ break;
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link =
+ ixgbe_check_mac_link_generic;
+ } else {
+ mac->ops.setup_link =
+ ixgbe_setup_mac_link_t_X550em;
+ }
+ } else {
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ }
+ break;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
break;
default:
break;
- }
+ }
}
/**
@@ -798,6 +1706,13 @@ int32_t ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
{
DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = TRUE;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -820,8 +1735,30 @@ int32_t ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ switch (hw->phy.type) {
+ case ixgbe_phy_ext_1g_t:
+ case ixgbe_phy_sgmii:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ /* check different backplane modes */
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+ /* fall through */
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
*autoneg = TRUE;
}
@@ -937,21 +1874,34 @@ int32_t ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
/* Enable link status change alarm */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- if (status != IXGBE_SUCCESS)
- return status;
+ /* Enable the LASI interrupts on X552 devices to receive notifications
+ * of the link configurations of the external PHY and correspondingly
+ * support the configuration of the internal iXFI link, since iXFI does
+ * not support auto-negotiation. This is not required for X553 devices
+ * having KR support, which performs auto-negotiations and which is used
+ * as the internal link to the external PHY. Hence adding a check here
+ * to avoid enabling LASI interrupts for X553 devices.
+ */
+ if (hw->mac.type != ixgbe_mac_X550EM_a) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+ if (status != IXGBE_SUCCESS)
+ return status;
- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
- if (status != IXGBE_SUCCESS)
- return status;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
- /* Enables high temperature failure alarm */
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ /* Enable high temperature failure and global fault alarms */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
&reg);
@@ -959,7 +1909,8 @@ int32_t ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS)
return status;
- reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN;
+ reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
+ IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
@@ -1016,9 +1967,9 @@ int32_t ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
int32_t status;
uint32_t reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
@@ -1034,13 +1985,102 @@ int32_t ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- return status;
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ /* Set lane mode to KR auto negotiation */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ }
+
+ return ixgbe_restart_an_internal_phy_x550em(hw);
+}
+
+/**
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+int32_t ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+{
+ uint32_t store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ int32_t rc;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
+ if (rc)
+ return rc;
+ memset(store, 0, sizeof(store));
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
+ if (rc)
+ return rc;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ */
+int32_t ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+{
+ uint32_t store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ int32_t rc;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+ if (rc)
+ return rc;
+
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values, and check for valid field
+ * values.
+ **/
+int32_t ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+ hw->phy.addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ }
+
+ return IXGBE_SUCCESS;
}
/**
@@ -1054,31 +2094,57 @@ int32_t ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
int32_t ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
- ixgbe_link_speed speed;
int32_t ret_val;
DEBUGFUNC("ixgbe_init_phy_ops_X550em");
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
- if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
- speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- }
phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
}
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+
+ break;
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ default:
+ break;
+ }
+
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -1098,27 +2164,40 @@ int32_t ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_ext_1g_t:
+ /* link is managed by FW */
+ phy->ops.setup_link = NULL;
+ phy->ops.reset = NULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
-
/* If internal link mode is XFI, then setup iXFI internal link,
* else setup KR now.
*/
- if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
- phy->ops.setup_internal_link =
+ phy->ops.setup_internal_link =
ixgbe_setup_internal_phy_t_x550em;
- } else {
- speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
- }
+
+ /* setup SW LPLU only for first revision of X550EM_x */
+ if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
+ !(IXGBE_FUSES0_REV_MASK &
+ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
+ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
+ break;
default:
break;
}
@@ -1126,6 +2205,38 @@ int32_t ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+ uint32_t hlreg0;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* ixgbe_reset_hw_X550em - Perform hardware reset
* @hw: pointer to hardware structure
*
@@ -1139,37 +2250,43 @@ int32_t ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
int32_t status;
uint32_t ctrl = 0;
uint32_t i;
- uint32_t hlreg0;
bool link_up = FALSE;
+ uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X550em");
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
- if (status != IXGBE_SUCCESS)
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
return status;
-
+ }
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- /* Config MDIO clock speed before the first MDIO PHY access */
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- }
+ ixgbe_set_mdio_speed(hw);
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ status == IXGBE_ERR_PHY_ADDR_INVALID) {
+ DEBUGOUT("Returning from reset HW due to PHY init failure\n");
return status;
+ }
/* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
status = ixgbe_init_ext_t_x550em(hw);
- if (status)
+ if (status) {
+ DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
+ status);
return status;
+ }
}
/* Setup SFP module if there is one present. */
@@ -1182,8 +2299,10 @@ int32_t ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
return status;
/* Reset PHY */
- if (!hw->phy.reset_disable && hw->phy.ops.reset)
- hw->phy.ops.reset(hw);
+ if (!hw->phy.reset_disable && hw->phy.ops.reset) {
+ if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
+ return IXGBE_ERR_OVERTEMP;
+ }
mac_reset_top:
/* Issue global reset to the MAC. Needs to be SW reset if link is up.
@@ -1198,9 +2317,16 @@ mac_reset_top:
ctrl = IXGBE_CTRL_RST;
}
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear meaning reset is complete */
for (i = 0; i < 10; i++) {
@@ -1236,9 +2362,14 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
+ ixgbe_set_mdio_speed(hw);
+
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
+
return status;
}
@@ -1288,17 +2419,24 @@ int32_t ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY.
* @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY.
**/
int32_t ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+ return IXGBE_SUCCESS;
+
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
/**
* ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP
* @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
*
* Configure the external PHY and the integrated KR PHY for SFP support.
**/
@@ -1323,113 +2461,200 @@ int32_t ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
- /* Configure CS4227 LINE side to 10G SR. */
- reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = IXGBE_CS4227_SPEED_10G;
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
- reg_slice, reg_val);
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
- reg_slice, reg_val);
-
- /* Configure CS4227 for HOST connection rate then type. */
- reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ?
- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
- reg_slice, reg_val);
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
- reg_slice, reg_val);
-
- /* Setup XFI internal link. */
- ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
- } else {
- /* Configure internal PHY for KR/KX. */
- ixgbe_setup_kr_speed_x550em(hw, speed);
-
- /* Configure CS4227 LINE side to proper mode. */
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227,
- reg_slice, reg_val);
- }
+ ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
+ reg_val);
return ret_val;
}
/**
- * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
* @hw: pointer to hardware structure
* @speed: the link speed to force
*
- * Configures the integrated KR PHY to use iXFI mode. Used to connect an
- * internal and external PHY at a specific speed, without autonegotiation.
+ * Configures the integrated PHY for native SFI mode. Used to connect the
+ * internal PHY directly to an SFP cage, without autonegotiation.
**/
-int32_t ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+int32_t ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
+ struct ixgbe_mac_info *mac = &hw->mac;
int32_t status;
uint32_t reg_val;
- /* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Disable all AN and force speed to 10G Serial. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
/* Select forced link speed for internal PHY. */
switch (*speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
break;
default:
- /* Other link speeds are not supported by internal KR PHY. */
+ /* Other link speeds are not supported by internal PHY. */
return IXGBE_ERR_LINK_SETUP;
}
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: unused
+ *
+ * Configure the the integrated PHY for SFP support.
+ **/
+int32_t ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ int32_t ret_val;
+ uint16_t reg_phy_ext;
+ bool setup_linear = FALSE;
+ uint32_t reg_slice, reg_phy_int, slice_offset;
+
+ /* Check if SFP module is supported and linear */
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ return IXGBE_SUCCESS;
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
+ /* Configure internal PHY for native SFI based on module type */
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
+ if (!setup_linear)
+ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
+
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* Setup SFI internal link. */
+ ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
+ } else {
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
+ /* Find Address */
+ DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
+
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = hw->phy.ops.write_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+
+ /* Flush previous write with a read */
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
+ * @hw: pointer to hardware structure
+ *
+ * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
+ **/
+int32_t ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ int32_t status;
+ uint32_t reg_val;
/* Disable training protocol FSM. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Disable Flex from training TXFFE. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -1437,12 +2662,12 @@ int32_t ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -1450,14 +2675,14 @@ int32_t ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Enable override for coefficients. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -1466,22 +2691,68 @@ int32_t ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
+ return status;
+}
- /* Toggle port SW reset by AN reset. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+/**
+ * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+int32_t ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ int32_t status;
+ uint32_t reg_val;
+
+ /* iXFI is only supported with X552 */
+ if (mac->type != ixgbe_mac_X550EM_x)
+ return IXGBE_ERR_LINK_SETUP;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* Additional configuration needed for x550em_x */
+ if (hw->mac.type == ixgbe_mac_X550EM_x) {
+ status = ixgbe_setup_ixfi_x550em_x(hw);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
return status;
}
@@ -1540,43 +2811,51 @@ int32_t ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
- /* If link is not up, then there is no setup necessary so return */
- status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
- if (status != IXGBE_SUCCESS)
- return status;
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* If link is down, there is no setup necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
- if (!link_up)
- return IXGBE_SUCCESS;
+ if (!link_up)
+ return IXGBE_SUCCESS;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &speed);
- if (status != IXGBE_SUCCESS)
- return status;
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+ if (status != IXGBE_SUCCESS)
+ return status;
- /* If link is not still up, then no setup is necessary so return */
- status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
- if (status != IXGBE_SUCCESS)
- return status;
- if (!link_up)
- return IXGBE_SUCCESS;
+ /* If link is still down - no setup is required so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ if (!link_up)
+ return IXGBE_SUCCESS;
- /* clear everything but the speed and duplex bits */
- speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
+ /* clear everything but the speed and duplex bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
- switch (speed) {
- case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
- force_speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
- force_speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- default:
- /* Internal PHY does not support anything else */
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
- }
+ switch (speed) {
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ /* Internal PHY does not support anything else */
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
- return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+ return ixgbe_setup_ixfi_x550em(hw, &force_speed);
+ } else {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ return ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
}
/**
@@ -1591,57 +2870,57 @@ int32_t ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
uint32_t reg_val;
/* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set near-end loopback clocks. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set loopback enable. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Training bypass. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
@@ -1655,13 +2934,13 @@ int32_t ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-int32_t ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, uint16_t offset,
- uint16_t *data)
+int32_t ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, uint16_t offset,uint16_t *data)
{
- int32_t status;
+ const uint32_t mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
+ int32_t status;
- DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
+ DEBUGFUNC("ixgbe_read_ee_hostif_X550");
buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
@@ -1671,43 +2950,21 @@ int32_t ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, uint16_t offset,
buffer.address = htobe32(offset * 2);
/* one word */
buffer.length = htobe16(sizeof(uint16_t));
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
- status = ixgbe_host_interface_command(hw, (uint32_t *)&buffer,
- sizeof(buffer),
- IXGBE_HI_COMMAND_TIMEOUT, FALSE);
-
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status)
return status;
- *data = (uint16_t)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- FW_NVM_DATA_OFFSET);
-
- return 0;
-}
-
-/**
- * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EEPROM
- *
- * Reads a 16 bit word from the EEPROM using the hostif.
- **/
-int32_t ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, uint16_t offset,
- uint16_t *data)
-{
- int32_t status = IXGBE_SUCCESS;
-
- DEBUGFUNC("ixgbe_read_ee_hostif_X550");
-
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
- IXGBE_SUCCESS) {
- status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
+ status = ixgbe_hic_unlocked(hw, (uint32_t *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
+ if (!status) {
+ *data = (uint16_t)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ FW_NVM_DATA_OFFSET);
}
+ hw->mac.ops.release_swfw_sync(hw, mask);
return status;
}
@@ -1724,6 +2981,7 @@ int32_t ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
uint16_t offset, uint16_t words,
uint16_t *data)
{
+ const uint32_t mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
uint32_t current_word = 0;
uint16_t words_to_read;
@@ -1733,11 +2991,12 @@ int32_t ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
/* Take semaphore for the entire operation. */
- status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ status = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (status) {
DEBUGOUT("EEPROM read buffer - semaphore failed\n");
return status;
}
+
while (words) {
if (words > FW_MAX_READ_BUFFER_SIZE / 2)
words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
@@ -1752,11 +3011,11 @@ int32_t ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/* convert offset from words to bytes */
buffer.address = htobe32((offset + current_word) * 2);
buffer.length = htobe16(words_to_read * 2);
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
- status = ixgbe_host_interface_command(hw, (uint32_t *)&buffer,
- sizeof(buffer),
- IXGBE_HI_COMMAND_TIMEOUT,
- FALSE);
+ status = ixgbe_hic_unlocked(hw, (uint32_t *)&buffer, sizeof(buffer),
+ IXGBE_HI_COMMAND_TIMEOUT);
if (status) {
DEBUGOUT("Host interface command failed\n");
@@ -1781,7 +3040,7 @@ int32_t ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
}
out:
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, mask);
return status;
}
@@ -1838,7 +3097,7 @@ int32_t ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, uint16_t offset,
status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
} else {
- DEBUGOUT("write ee hostif failed to get semaphore\n");
+ DEBUGOUT("write ee hostif failed to get semaphore");
status = IXGBE_ERR_SWFW_SYNC;
}
@@ -1851,6 +3110,8 @@ int32_t ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, uint16_t offset,
* @ptr: pointer offset in eeprom
* @size: size of section pointed by ptr, if 0 first word will be used as size
* @csum: address of checksum to update
+ * @buffer: pointer to buffer containing calculated checksum
+ * @buffer_size: size of buffer
*
* Returns error status for any failure
*/
@@ -2052,7 +3313,8 @@ int32_t ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, uint16_t *check
*/
if (read_checksum != checksum) {
status = IXGBE_ERR_EEPROM_CHECKSUM;
- DEBUGOUT("Invalid EEPROM checksum\n");
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid EEPROM checksum");
}
/* If the user cares, return the calculated checksum */
@@ -2134,9 +3396,9 @@ int32_t ixgbe_update_flash_X550(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-uint32_t ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+uint64_t ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
{
- uint32_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ uint64_t physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
uint16_t ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
@@ -2145,6 +3407,21 @@ uint32_t ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ }
+ }
+ /* fall through */
+ case ixgbe_phy_x550em_xfi:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
IXGBE_PHYSICAL_LAYER_1000BASE_KX;
break;
@@ -2161,6 +3438,20 @@ uint32_t ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_fw:
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+ break;
+ case ixgbe_phy_sgmii:
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
default:
break;
}
@@ -2193,19 +3484,29 @@ int32_t ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
/**
* ixgbe_disable_rx_x550 - Disable RX unit
+ * @hw: pointer to hardware structure
*
* Enables the Rx DMA unit for x550
**/
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
- uint32_t rxctrl;
+ uint32_t rxctrl, pfdtxgswc;
int32_t status;
struct ixgbe_hic_disable_rxen fw_cmd;
- DEBUGFUNC("ixgbe_disable_rx_dma_x550");
+ DEBUGFUNC("ixgbe_enable_rx_dma_x550");
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = TRUE;
+ } else {
+ hw->mac.set_lben = FALSE;
+ }
+
fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
@@ -2227,6 +3528,113 @@ void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_enter_lplu_x550em - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ **/
+int32_t ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+ uint16_t an_10g_cntl_reg, autoneg_reg, speed;
+ int32_t status;
+ ixgbe_link_speed lcd_speed;
+ uint32_t save_autoneg;
+ bool link_up;
+
+ /* SW LPLU not required on later HW revisions. */
+ if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
+ (IXGBE_FUSES0_REV_MASK &
+ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
+ return IXGBE_SUCCESS;
+
+ /* If blocked by MNG FW, then don't restart AN */
+ if (ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability
+ * disabled, then force link down by entering low power mode.
+ */
+ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+ !(hw->wol_enabled || ixgbe_mng_present(hw)))
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ /* Determine LCD */
+ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If no valid LCD link speed, then force link down and exit. */
+ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &speed);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* If no link now, speed is invalid so take link down */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+ if (status != IXGBE_SUCCESS)
+ return ixgbe_set_copper_phy_power(hw, FALSE);
+
+ /* clear everything but the speed bits */
+ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+ /* If current speed is already LCD, then exit. */
+ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+ ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+ (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+ return status;
+
+ /* Clear AN completed indication */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &an_10g_cntl_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ save_autoneg = hw->phy.autoneg_advertised;
+
+ /* Setup link at least common link speed */
+ status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE);
+
+ /* restore autoneg from before setting lplu speed */
+ hw->phy.autoneg_advertised = save_autoneg;
+
+ return status;
+}
+
+/**
* ixgbe_get_lcd_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
@@ -2279,7 +3687,8 @@ int32_t ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
@@ -2314,15 +3723,19 @@ int32_t ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
asm_dir = 1;
break;
default:
- DEBUGOUT("Flow control param set incorrectly\n");
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
ret_val = IXGBE_ERR_CONFIG;
goto out;
}
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- ret_val = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (ret_val != IXGBE_SUCCESS)
goto out;
reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
@@ -2331,12 +3744,18 @@ int32_t ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
if (asm_dir)
reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
- ret_val = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = TRUE;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->fc.disable_fc_autoneg = TRUE;
+ break;
+ default:
+ break;
}
out:
@@ -2344,6 +3763,238 @@ out:
}
/**
+ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
+{
+ uint32_t link_s1, lp_an_page_low, an_cntl_1;
+ int32_t status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check at auto-negotiation has completed */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_S1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
+
+ if (status != IXGBE_SUCCESS ||
+ (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = TRUE;
+ } else {
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
+{
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+ int32_t status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ uint32_t info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+ if (status != IXGBE_SUCCESS ||
+ !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Negotiate the flow control */
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = TRUE;
+ } else {
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+int32_t ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+{
+ int32_t status = IXGBE_SUCCESS;
+ uint32_t an_cntl = 0;
+
+ DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do FC autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ /* The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
+
+ /* Restart auto-negotiation. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
* ixgbe_set_mux - Set mux for port 1 access with CS4227
* @hw: pointer to hardware structure
* @state: set mux if 1, clear if 0
@@ -2404,6 +4055,134 @@ void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, uint32_t mask)
}
/**
+ * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+int32_t ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, uint32_t mask)
+{
+ uint32_t hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+ int retries = FW_PHY_TOKEN_RETRIES;
+ int32_t status = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
+
+ while (--retries) {
+ status = IXGBE_SUCCESS;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
+ status);
+ return status;
+ }
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return IXGBE_SUCCESS;
+
+ status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
+
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
+ status);
+ return status;
+ }
+ }
+
+ DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, uint32_t mask)
+{
+ uint32_t hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_release_swfw_sync_X550a");
+
+ if (mask & IXGBE_GSSR_TOKEN_SM)
+ ixgbe_put_phy_token(hw);
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @device_type: 5 bit device type
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ **/
+int32_t ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint16_t *phy_data)
+{
+ int32_t status;
+ uint32_t mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_read_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ **/
+int32_t ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, uint32_t reg_addr,
+ uint32_t device_type, uint16_t phy_data)
+{
+ int32_t status;
+ uint32_t mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_write_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
* ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
* @hw: pointer to hardware structure
*
@@ -2424,7 +4203,7 @@ int32_t ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS)
return status;
- if (lsc && hw->phy.ops.setup_internal_link)
+ if (lsc)
return hw->phy.ops.setup_internal_link(hw);
return IXGBE_SUCCESS;
@@ -2458,8 +4237,10 @@ int32_t ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
else
force_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If internal link mode is XFI, then setup XFI internal link. */
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* If X552 and internal link mode is XFI, then setup XFI internal link.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
if (status != IXGBE_SUCCESS)
@@ -2482,7 +4263,7 @@ int32_t ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
uint32_t status;
- uint16_t autoneg_status;
+ uint16_t i, autoneg_status = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
@@ -2495,21 +4276,18 @@ int32_t ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return status;
/* MAC link is up, so check external PHY link.
- * Read this twice back to back to indicate current status.
+ * X557 PHY. Link status is latching low, and can only be used to detect
+ * link drop, and not the current status of the link without performing
+ * back-to-back reads.
*/
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
-
- if (status != IXGBE_SUCCESS)
- return status;
+ for (i = 0; i < 2; i++) {
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
-
- if (status != IXGBE_SUCCESS)
- return status;
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* If external PHY link is not up, then indicate link not up */
if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
@@ -2549,14 +4327,18 @@ int32_t ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, uint32_t led_idx)
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
return IXGBE_ERR_PARAM;
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_on_generic(hw, led_idx);
}
/**
@@ -2573,12 +4355,16 @@ int32_t ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, uint32_t led_idx)
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
return IXGBE_ERR_PARAM;
+ if (hw->phy.id == 0)
+ ixgbe_identify_phy(hw);
+
/* To turn on the LED, set mode to ON. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data);
phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_off_generic(hw, led_idx);
}