summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/dev/pci/if_ix.c1453
-rw-r--r--sys/dev/pci/if_ix.h207
-rw-r--r--sys/dev/pci/ixgbe.c1875
-rw-r--r--sys/dev/pci/ixgbe.h60
-rw-r--r--sys/dev/pci/ixgbe_82598.c210
-rw-r--r--sys/dev/pci/ixgbe_82599.c1386
-rw-r--r--sys/dev/pci/ixgbe_phy.c247
-rw-r--r--sys/dev/pci/ixgbe_type.h405
8 files changed, 3897 insertions, 1946 deletions
diff --git a/sys/dev/pci/if_ix.c b/sys/dev/pci/if_ix.c
index 8a2d5488ba5..41a295c2dfc 100644
--- a/sys/dev/pci/if_ix.c
+++ b/sys/dev/pci/if_ix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.c,v 1.51 2011/04/15 15:12:27 chl Exp $ */
+/* $OpenBSD: if_ix.c,v 1.52 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -55,6 +55,7 @@ const struct pci_matchid ixgbe_devices[] = {
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
@@ -66,9 +67,15 @@ const struct pci_matchid ixgbe_devices[] = {
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BACKPLANE },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
- { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM }
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE }
+#if 0
+ { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599VF }
+#endif
};
/*********************************************************************
@@ -91,8 +98,8 @@ int ixgbe_allocate_legacy(struct ix_softc *);
int ixgbe_allocate_queues(struct ix_softc *);
void ixgbe_free_pci_resources(struct ix_softc *);
void ixgbe_local_timer(void *);
-int ixgbe_hardware_init(struct ix_softc *);
void ixgbe_setup_interface(struct ix_softc *);
+void ixgbe_config_link(struct ix_softc *sc);
int ixgbe_allocate_transmit_buffers(struct tx_ring *);
int ixgbe_setup_transmit_structures(struct ix_softc *);
@@ -113,8 +120,8 @@ void ixgbe_enable_intr(struct ix_softc *);
void ixgbe_disable_intr(struct ix_softc *);
void ixgbe_update_stats_counters(struct ix_softc *);
int ixgbe_txeof(struct tx_ring *);
-int ixgbe_rxeof(struct rx_ring *, int);
-void ixgbe_rx_checksum(struct ix_softc *, uint32_t, struct mbuf *);
+int ixgbe_rxeof(struct ix_queue *, int);
+void ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
void ixgbe_set_promisc(struct ix_softc *);
void ixgbe_disable_promisc(struct ix_softc *);
void ixgbe_set_multi(struct ix_softc *);
@@ -124,7 +131,6 @@ void ixgbe_print_hw_stats(struct ix_softc *);
void ixgbe_update_link_status(struct ix_softc *);
int ixgbe_get_buf(struct rx_ring *, int);
int ixgbe_encap(struct tx_ring *, struct mbuf *);
-void ixgbe_enable_hw_vlans(struct ix_softc * sc);
int ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
struct ixgbe_dma_alloc *, int);
void ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
@@ -134,8 +140,18 @@ void ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
void ixgbe_configure_ivars(struct ix_softc *);
uint8_t *ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
+void ixgbe_setup_vlan_hw_support(struct ix_softc *);
+
+/* Support for pluggable optic modules */
+int ixgbe_sfp_probe(struct ix_softc *);
+void ixgbe_setup_optics(struct ix_softc *);
+
/* Legacy (single vector interrupt handler */
int ixgbe_legacy_irq(void *);
+void ixgbe_enable_queue(struct ix_softc *, uint32_t);
+void ixgbe_disable_queue(struct ix_softc *, uint32_t);
+void ixgbe_rearm_queue(struct ix_softc *, uint32_t);
+void ixgbe_handle_que(void *, int);
/*********************************************************************
* OpenBSD Device Interface Entry Points
@@ -155,7 +171,7 @@ int ixgbe_smart_speed = ixgbe_smart_speed_on;
* Device identification routine
*
* ixgbe_probe determines if the driver should be loaded on
- * sc based on PCI vendor/device id of the sc.
+ * adapter based on PCI vendor/device id of the adapter.
*
* return 0 on success, positive on failure
*********************************************************************/
@@ -204,7 +220,6 @@ ixgbe_attach(struct device *parent, struct device *self, void *aux)
ixgbe_identify_hardware(sc);
/* Indicate to RX setup to use Jumbo Clusters */
- sc->bigbufs = FALSE;
sc->num_tx_desc = DEFAULT_TXD;
sc->num_rx_desc = DEFAULT_RXD;
sc->rx_process_limit = 100; // XXX
@@ -217,11 +232,31 @@ ixgbe_attach(struct device *parent, struct device *self, void *aux)
if (ixgbe_allocate_queues(sc))
goto err_out;
+ /* Allocate multicast array memory. */
+ sc->mta = malloc(sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+ MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
+ if (sc->mta == 0) {
+ printf(": Can not allocate multicast setup array\n");
+ goto err_late;
+ }
+
/* Initialize the shared code */
- if (hw->mac.type == ixgbe_mac_82598EB)
- error = ixgbe_init_ops_82598(&sc->hw);
- else
- error = ixgbe_init_ops_82599(&sc->hw);
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ error = ixgbe_init_ops_82598(hw);
+ break;
+ case ixgbe_mac_82599EB:
+ error = ixgbe_init_ops_82599(hw);
+ break;
+#if 0
+ case ixgbe_mac_82599_vf:
+ error = ixgbe_init_ops_vf(hw);
+ break;
+#endif
+ default:
+ error = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
/*
* No optics in this port, set up
@@ -232,25 +267,18 @@ ixgbe_attach(struct device *parent, struct device *self, void *aux)
error = 0;
} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
printf(": Unsupported SFP+ module detected!\n");
- error = EIO;
goto err_late;
} else if (error) {
printf(": Unable to initialize the shared code\n");
- error = EIO;
goto err_late;
}
/* Make sure we have a good EEPROM before we read from it */
if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
printf(": The EEPROM Checksum Is Not Valid\n");
- error = EIO;
goto err_late;
}
- /* Pick up the smart speed setting */
- if (sc->hw.mac.type == ixgbe_mac_82599EB)
- sc->hw.phy.smart_speed = ixgbe_smart_speed;
-
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.pause_time = IXGBE_FC_PAUSE;
@@ -288,6 +316,9 @@ ixgbe_attach(struct device *parent, struct device *self, void *aux)
/* Initialize statistics */
ixgbe_update_stats_counters(sc);
+ /* Print PCIE bus type/speed/width info */
+ hw->mac.ops.get_bus_info(hw);
+
/* let hardware know driver is loaded */
ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@@ -303,6 +334,7 @@ err_late:
ixgbe_free_receive_structures(sc);
err_out:
ixgbe_free_pci_resources(sc);
+ free(sc->mta, M_DEVBUF);
}
/*********************************************************************
@@ -339,6 +371,7 @@ ixgbe_detach(struct device *self, int flags)
ixgbe_free_transmit_structures(sc);
ixgbe_free_receive_structures(sc);
+ free(sc->mta, M_DEVBUF);
return (0);
}
@@ -404,7 +437,7 @@ ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
*/
if (post)
IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
- txr->next_avail_tx_desc);
+ txr->next_avail_desc);
}
@@ -423,8 +456,8 @@ ixgbe_start(struct ifnet *ifp)
* and appropriate logic here to deal with
* it. -jfv
*/
- if (sc->num_tx_queues > 1)
- queue = (curcpu % sc->num_tx_queues);
+ if (sc->num_queues > 1)
+ queue = (curcpu % sc->num_queues);
#endif
txr = &sc->tx_rings[queue];
@@ -542,7 +575,7 @@ ixgbe_watchdog(struct ifnet * ifp)
* Finally, anytime all descriptors are clean the timer is
* set to 0.
*/
- for (i = 0; i < sc->num_tx_queues; i++, txr++) {
+ for (i = 0; i < sc->num_queues; i++, txr++) {
if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
continue;
else {
@@ -558,7 +591,7 @@ ixgbe_watchdog(struct ifnet * ifp)
* reset the hardware.
*/
if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
- for (i = 0; i < sc->num_tx_queues; i++, txr++)
+ for (i = 0; i < sc->num_queues; i++, txr++)
txr->watchdog_timer = IXGBE_TX_TIMEOUT;
ifp->if_timer = IXGBE_TX_TIMEOUT;
return;
@@ -566,12 +599,12 @@ ixgbe_watchdog(struct ifnet * ifp)
printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
- for (i = 0; i < sc->num_tx_queues; i++, txr++) {
+ for (i = 0; i < sc->num_queues; i++, txr++) {
printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
IXGBE_READ_REG(hw, IXGBE_TDH(i)),
IXGBE_READ_REG(hw, IXGBE_TDT(i)));
printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
- i, txr->tx_avail, txr->next_tx_to_clean);
+ i, txr->tx_avail, txr->next_to_clean);
}
ifp->if_flags &= ~IFF_RUNNING;
sc->watchdog_events++;
@@ -598,7 +631,7 @@ ixgbe_init(void *arg)
struct ix_softc *sc = (struct ix_softc *)arg;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct rx_ring *rxr = sc->rx_rings;
- uint32_t k, txdctl, rxdctl, mhadd, gpie;
+ uint32_t k, txdctl, rxdctl, rxctrl, mhadd, gpie;
int err;
int i, s;
@@ -608,18 +641,15 @@ ixgbe_init(void *arg)
ixgbe_stop(sc);
+ /* reprogram the RAR[0] in case user changed it. */
+ ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
+
/* Get the latest mac address, User can use a LAA */
bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
IXGBE_ETH_LENGTH_OF_ADDRESS);
ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, 1);
sc->hw.addr_ctrl.rar_used_count = 1;
- /* Do a warm reset */
- sc->hw.mac.ops.reset_hw(&sc->hw);
-
- if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
- ixgbe_enable_hw_vlans(sc);
-
/* Prepare transmit descriptors and buffers */
if (ixgbe_setup_transmit_structures(sc)) {
printf("%s: Could not setup transmit structures\n",
@@ -629,21 +659,26 @@ ixgbe_init(void *arg)
return;
}
+ ixgbe_hw0(&sc->hw, init_hw);
ixgbe_initialize_transmit_units(sc);
/* Setup Multicast table */
ixgbe_set_multi(sc);
- /*
- * If we are resetting MTU smaller than 2K
- * drop to small RX buffers
- */
- if (sc->max_frame_size <= MCLBYTES)
- sc->bigbufs = FALSE;
+ /* Determine the correct buffer size for jumbo/headersplit */
+ if (sc->max_frame_size <= 2048)
+ sc->rx_mbuf_sz = MCLBYTES;
+ else if (sc->max_frame_size <= 4096)
+ sc->rx_mbuf_sz = 4096;
+ else if (sc->max_frame_size <= 9216)
+ sc->rx_mbuf_sz = 9216;
+ else
+ sc->rx_mbuf_sz = 16 * 1024;
/* Prepare receive descriptors and buffers */
if (ixgbe_setup_receive_structures(sc)) {
- printf("%s: Could not setup receive structures\n", ifp->if_xname);
+ printf("%s: Could not setup receive structures\n",
+ ifp->if_xname);
ixgbe_stop(sc);
splx(s);
return;
@@ -654,20 +689,14 @@ ixgbe_init(void *arg)
gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
- if (sc->hw.mac.type == ixgbe_mac_82599EB) {
- gpie |= IXGBE_SDP1_GPIEN;
+ /* Enable Fan Failure Interrupt */
+ gpie |= IXGBE_SDP1_GPIEN;
+
+ /* Add for Thermal detection */
+ if (sc->hw.mac.type == ixgbe_mac_82599EB)
gpie |= IXGBE_SDP2_GPIEN;
- /*
- * Set LL interval to max to reduce the number of low latency
- * interrupts hitting the card when the ring is getting full.
- */
- gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
- }
- /* Enable Fan Failure Interrupt */
- if (sc->hw.phy.media_type == ixgbe_media_type_copper)
- gpie |= IXGBE_SDP1_GPIEN;
- if (sc->msix) {
+ if (sc->msix > 1) {
/* Enable Enhanced MSIX mode */
gpie |= IXGBE_GPIE_MSIX_MODE;
gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
@@ -685,7 +714,7 @@ ixgbe_init(void *arg)
/* Now enable all the queues */
- for (i = 0; i < sc->num_tx_queues; i++) {
+ for (i = 0; i < sc->num_queues; i++) {
txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
txdctl |= IXGBE_TXDCTL_ENABLE;
/* Set WTHRESH to 8, burst writeback */
@@ -693,11 +722,16 @@ ixgbe_init(void *arg)
IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
}
- for (i = 0; i < sc->num_rx_queues; i++) {
+ for (i = 0; i < sc->num_queues; i++) {
rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
if (sc->hw.mac.type == ixgbe_mac_82598EB) {
- /* PTHRESH set to 32 */
- rxdctl |= 0x0020;
+ /*
+ * PTHRESH = 21
+ * HTHRESH = 4
+ * WTHRESH = 8
+ */
+ rxdctl &= ~0x3FFFFF;
+ rxdctl |= 0x080420;
}
rxdctl |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
@@ -708,34 +742,68 @@ ixgbe_init(void *arg)
else
msec_delay(1);
}
- IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_rx_desc_filled);
+ /* XXX wmb() : memory barrier */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
}
+ /* Set up VLAN support and filter */
+ ixgbe_setup_vlan_hw_support(sc);
+
+ /* Enable Receive engine */
+ rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
+ if (sc->hw.mac.type == ixgbe_mac_82598EB)
+ rxctrl |= IXGBE_RXCTRL_DMBYPS;
+ rxctrl |= IXGBE_RXCTRL_RXEN;
+ ixgbe_hw(&sc->hw, enable_rx_dma, rxctrl);
+
timeout_add_sec(&sc->timer, 1);
#ifdef MSI
/* Set up MSI/X routing */
- if (ixgbe_enable_msix)
+ if (ixgbe_enable_msix) {
ixgbe_configure_ivars(sc);
- else /* Simple settings for Legacy/MSI */
+ /* Set up auto-mask */
+ if (sc->hw.mac.type == ixgbe_mac_82598EB)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ else {
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ }
+ } else /* Simple settings for Legacy/MSI */
#else
{
ixgbe_set_ivar(sc, 0, 0, 0);
ixgbe_set_ivar(sc, 0, 0, 1);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
}
#endif
+#ifdef IXGBE_FDIR
+ /* Init Flow director */
+ if (sc->hw.mac.type != ixgbe_mac_82598EB)
+ ixgbe_init_fdir_signature_82599(&sc->hw, fdir_pballoc);
+#endif
+
/*
* Check on any SFP devices that
* need to be kick-started
*/
- err = sc->hw.phy.ops.identify(&sc->hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- printf("Unsupported SFP+ module type was detected.\n");
- splx(s);
- return;
- }
+ if (sc->hw.phy.type == ixgbe_phy_none) {
+ err = sc->hw.phy.ops.identify(&sc->hw);
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ printf("Unsupported SFP+ module type was detected.\n");
+ splx(s);
+ return;
+ }
+ }
+
+ /* Set moderation on the Link interrupt */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(sc->linkvec), IXGBE_LINK_ITR);
+
+ /* Config/Enable Link */
+ ixgbe_config_link(sc);
+ /* And now turn on interrupts */
ixgbe_enable_intr(sc);
/* Now inform the stack we're ready */
@@ -745,6 +813,92 @@ ixgbe_init(void *arg)
splx(s);
}
+/*
+ * MSIX Interrupt Handlers
+ */
+void
+ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
+{
+ uint64_t queue = 1ULL << vector;
+ uint32_t mask;
+
+ if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
+ }
+}
+
+void
+ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
+{
+ uint64_t queue = 1ULL << vector;
+ uint32_t mask;
+
+ if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
+ }
+}
+
+void
+ixgbe_rearm_queue(struct ix_softc *sc, uint32_t vector)
+{
+ uint64_t queue = 1ULL << vector;
+ uint32_t mask;
+
+ if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
+ } else {
+ mask = (queue & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
+ mask = (queue >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
+ }
+}
+
+void
+ixgbe_handle_que(void *context, int pending)
+{
+ struct ix_queue *que = context;
+ struct ix_softc *sc = que->sc;
+ struct tx_ring *txr = que->txr;
+ struct ifnet *ifp = &que->sc->arpcom.ac_if;
+
+ if (ifp->if_flags & IFF_RUNNING) {
+ ixgbe_rxeof(que, -1 /* XXX sc->rx_process_limit */);
+ ixgbe_txeof(txr);
+
+ if (ixgbe_rxfill(que->rxr)) {
+ /* Advance the Rx Queue "Tail Pointer" */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
+ que->rxr->last_desc_filled);
+ }
+
+ if (!IFQ_IS_EMPTY(&ifp->if_snd))
+ ixgbe_start_locked(txr, ifp);
+ }
+
+ /* Reenable this interrupt */
+ ixgbe_enable_queue(que->sc, que->msix);
+}
+
/*********************************************************************
*
* Legacy Interrupt Service routine
@@ -755,19 +909,22 @@ int
ixgbe_legacy_irq(void *arg)
{
struct ix_softc *sc = (struct ix_softc *)arg;
+ struct ix_queue *que = sc->queues;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct tx_ring *txr = sc->tx_rings;
- struct rx_ring *rxr = sc->rx_rings;
struct ixgbe_hw *hw = &sc->hw;
uint32_t reg_eicr;
int refill = 0;
reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
- if (reg_eicr == 0)
+ if (reg_eicr == 0) {
+ ixgbe_enable_intr(sc);
return (0);
+ }
+ ++que->irqs;
if (ifp->if_flags & IFF_RUNNING) {
- ixgbe_rxeof(rxr, -1);
+ ixgbe_rxeof(que, -1);
ixgbe_txeof(txr);
refill = 1;
}
@@ -775,7 +932,7 @@ ixgbe_legacy_irq(void *arg)
/* Check for fan failure */
if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(reg_eicr & IXGBE_EICR_GPI_SDP1)) {
- printf("%s: \nCRITICAL: FAN FAILURE!! "
+ printf("\n%s: CRITICAL: FAN FAILURE!! "
"REPLACE IMMEDIATELY!!\n", ifp->if_xname);
IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS,
IXGBE_EICR_GPI_SDP1);
@@ -788,15 +945,16 @@ ixgbe_legacy_irq(void *arg)
timeout_add_sec(&sc->timer, 1);
}
- if (refill && ixgbe_rxfill(rxr)) {
+ if (refill && ixgbe_rxfill(que->rxr)) {
/* Advance the Rx Queue "Tail Pointer" */
- IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),
- rxr->last_rx_desc_filled);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
+ que->rxr->last_desc_filled);
}
if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
ixgbe_start_locked(txr, ifp);
+ ixgbe_enable_intr(sc);
return (1);
}
@@ -863,9 +1021,9 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
struct ix_softc *sc = txr->sc;
uint32_t olinfo_status = 0, cmd_type_len = 0;
int i, j, error;
- int first;
+ int first, last = 0;
bus_dmamap_t map;
- struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
+ struct ixgbe_tx_buf *txbuf;
union ixgbe_adv_tx_desc *txd = NULL;
uint32_t paylen = 0;
@@ -878,6 +1036,7 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
#endif
+#if 0
/*
* Force a cleanup if number of TX descriptors
* available is below the threshold. If it fails
@@ -887,19 +1046,19 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
ixgbe_txeof(txr);
/* Make sure things have improved */
if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
- txr->no_tx_desc_avail++;
+ txr->no_desc_avail++;
return (ENOBUFS);
}
}
+#endif
/*
* Important to capture the first descriptor
* used because it will contain the index of
* the one we tell the hardware to report back
*/
- first = txr->next_avail_tx_desc;
+ first = txr->next_avail_desc;
txbuf = &txr->tx_buffers[first];
- txbuf_mapped = txbuf;
map = txbuf->map;
/*
@@ -907,7 +1066,7 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
*/
error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
m_head, BUS_DMA_NOWAIT);
-
+ /* XXX EFBIG */
if (error == ENOMEM) {
sc->no_tx_dma_setup++;
return (error);
@@ -918,7 +1077,7 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
/* Make certain there are enough descriptors */
if (map->dm_nsegs > txr->tx_avail - 2) {
- txr->no_tx_desc_avail++;
+ txr->no_desc_avail++;
error = ENOBUFS;
goto xmit_fail;
}
@@ -945,7 +1104,7 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
olinfo_status |= m_head->m_pkthdr.len <<
IXGBE_ADVTXD_PAYLEN_SHIFT;
- i = txr->next_avail_tx_desc;
+ i = txr->next_avail_desc;
for (j = 0; j < map->dm_nsegs; j++) {
txbuf = &txr->tx_buffers[i];
txd = &txr->tx_base[i];
@@ -954,26 +1113,30 @@ ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
txd->read.cmd_type_len = htole32(txr->txd_cmd |
cmd_type_len | map->dm_segs[j].ds_len);
txd->read.olinfo_status = htole32(olinfo_status);
+ last = i; /* descriptor that will get completion IRQ */
if (++i == sc->num_tx_desc)
i = 0;
txbuf->m_head = NULL;
+ txbuf->eop_index = -1;
}
txd->read.cmd_type_len |=
htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
txr->tx_avail -= map->dm_nsegs;
- txr->next_avail_tx_desc = i;
+ txr->next_avail_desc = i;
txbuf->m_head = m_head;
- txbuf_mapped->map = txbuf->map;
+ /* swap maps because last tx descriptor is tracking all the data */
+ txr->tx_buffers[first].map = txbuf->map;
txbuf->map = map;
bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
BUS_DMASYNC_PREWRITE);
/* Set the index of the descriptor that will be marked done */
txbuf = &txr->tx_buffers[first];
+ txbuf->eop_index = last;
++txr->tx_packets;
return (0);
@@ -1010,9 +1173,7 @@ ixgbe_disable_promisc(struct ix_softc * sc)
uint32_t reg_rctl;
reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
-
- reg_rctl &= (~IXGBE_FCTRL_UPE);
- reg_rctl &= (~IXGBE_FCTRL_MPE);
+ reg_rctl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
return;
@@ -1025,13 +1186,11 @@ ixgbe_disable_promisc(struct ix_softc * sc)
* This routine is called whenever multicast address list is updated.
*
**********************************************************************/
-#define IXGBE_RAR_ENTRIES 16
-
void
ixgbe_set_multi(struct ix_softc *sc)
{
uint32_t fctrl;
- uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+ uint8_t *mta;
uint8_t *update_ptr;
struct ether_multi *enm;
struct ether_multistep step;
@@ -1040,6 +1199,10 @@ ixgbe_set_multi(struct ix_softc *sc)
IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
+ mta = sc->mta;
+ bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+ MAX_NUM_MULTICAST_ADDRESSES);
+
fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
if (ifp->if_flags & IFF_PROMISC)
@@ -1111,15 +1274,20 @@ ixgbe_local_timer(void *arg)
s = splnet();
+ /* Check for pluggable optics */
+ if (sc->sfp_probe)
+ if (!ixgbe_sfp_probe(sc))
+ goto out; /* Nothing to do */
+
ixgbe_update_link_status(sc);
ixgbe_update_stats_counters(sc);
+out:
#ifdef IX_DEBUG
if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
(IFF_RUNNING|IFF_DEBUG))
ixgbe_print_hw_stats(sc);
#endif
-
timeout_add_sec(&sc->timer, 1);
splx(s);
@@ -1162,7 +1330,7 @@ ixgbe_update_link_status(struct ix_softc *sc)
} else {
ifp->if_baudrate = 0;
ifp->if_timer = 0;
- for (i = 0; i < sc->num_tx_queues; i++)
+ for (i = 0; i < sc->num_queues; i++)
txr[i].watchdog_timer = FALSE;
}
@@ -1194,11 +1362,15 @@ ixgbe_stop(void *arg)
ixgbe_hw0(&sc->hw, reset_hw);
sc->hw.adapter_stopped = FALSE;
ixgbe_hw0(&sc->hw, stop_adapter);
+ /* Turn off the laser */
+ if (sc->hw.phy.multispeed_fiber)
+ ixgbe_hw0(&sc->hw, disable_tx_laser);
timeout_del(&sc->timer);
/* reprogram the RAR[0] in case user changed it. */
ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
+ /* Should we really clear all structures on stop? */
ixgbe_free_transmit_structures(sc);
ixgbe_free_receive_structures(sc);
}
@@ -1246,8 +1418,9 @@ ixgbe_identify_hardware(struct ix_softc *sc)
sc->hw.mac.type = ixgbe_mac_82598EB;
sc->optics = IFM_10G_LR;
break;
- case PCI_PRODUCT_INTEL_82598AT_DUAL:
case PCI_PRODUCT_INTEL_82598AT:
+ case PCI_PRODUCT_INTEL_82598AT2:
+ case PCI_PRODUCT_INTEL_82598AT_DUAL:
sc->hw.mac.type = ixgbe_mac_82598EB;
sc->optics = IFM_10G_T;
break;
@@ -1257,19 +1430,34 @@ ixgbe_identify_hardware(struct ix_softc *sc)
break;
case PCI_PRODUCT_INTEL_82599_SFP:
case PCI_PRODUCT_INTEL_82599_SFP_EM:
+ case PCI_PRODUCT_INTEL_82599_SFP_FCOE:
sc->hw.mac.type = ixgbe_mac_82599EB;
sc->optics = IFM_10G_SR;
+ sc->hw.phy.smart_speed = ixgbe_smart_speed;
break;
case PCI_PRODUCT_INTEL_82599_KX4:
case PCI_PRODUCT_INTEL_82599_KX4_MEZZ:
case PCI_PRODUCT_INTEL_82599_CX4:
sc->hw.mac.type = ixgbe_mac_82599EB;
sc->optics = IFM_10G_CX4;
+ sc->hw.phy.smart_speed = ixgbe_smart_speed;
+ break;
+ case PCI_PRODUCT_INTEL_82599_T3_LOM:
+ sc->hw.mac.type = ixgbe_mac_82599EB;
+ sc->optics = IFM_10G_T;
+ sc->hw.phy.smart_speed = ixgbe_smart_speed;
break;
case PCI_PRODUCT_INTEL_82599_XAUI:
case PCI_PRODUCT_INTEL_82599_COMBO_BACKPLANE:
+ case PCI_PRODUCT_INTEL_82599_BPLANE_FCOE:
sc->hw.mac.type = ixgbe_mac_82599EB;
sc->optics = IFM_AUTO;
+ sc->hw.phy.smart_speed = ixgbe_smart_speed;
+ break;
+ case PCI_PRODUCT_INTEL_82599VF:
+ sc->hw.mac.type = ixgbe_mac_82599_vf;
+ sc->optics = IFM_AUTO;
+ sc->hw.phy.smart_speed = ixgbe_smart_speed;
break;
default:
sc->optics = IFM_AUTO;
@@ -1279,6 +1467,50 @@ ixgbe_identify_hardware(struct ix_softc *sc)
/*********************************************************************
*
+ * Determine optic type
+ *
+ **********************************************************************/
+void
+ixgbe_setup_optics(struct ix_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ int layer;
+
+ layer = ixgbe_hw(hw, get_supported_physical_layer);
+ switch (layer) {
+ case IXGBE_PHYSICAL_LAYER_10GBASE_T:
+ sc->optics = IFM_10G_T;
+ break;
+ case IXGBE_PHYSICAL_LAYER_1000BASE_T:
+ sc->optics = IFM_1000_T;
+ break;
+ case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
+ case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
+ sc->optics = IFM_10G_LR;
+ break;
+ case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
+ sc->optics = IFM_10G_SR;
+ break;
+ case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
+ case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
+ sc->optics = IFM_10G_CX4;
+ break;
+ case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
+ sc->optics = IFM_10G_SFP_CU;
+ break;
+ case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
+ case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
+ case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
+ case IXGBE_PHYSICAL_LAYER_UNKNOWN:
+ default:
+ sc->optics = IFM_ETHER | IFM_AUTO;
+ break;
+ }
+ return;
+}
+
+/*********************************************************************
+ *
* Setup the Legacy or MSI Interrupt handler
*
**********************************************************************/
@@ -1292,20 +1524,25 @@ ixgbe_allocate_legacy(struct ix_softc *sc)
pci_chipset_tag_t pc = pa->pa_pc;
pci_intr_handle_t ih;
- /* Legacy RID at 0 */
- if (sc->msix == 0)
- sc->rid[0] = 0;
-
/* We allocate a single interrupt resource */
- if (pci_intr_map(pa, &ih)) {
+ if (/* pci_intr_map_msi(pa, &ih) != 0 && */
+ pci_intr_map(pa, &ih) != 0) {
printf(": couldn't map interrupt\n");
return (ENXIO);
}
+#if 0
+ /* XXX */
+ /* Tasklets for Link, SFP and Multispeed Fiber */
+ TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
+ TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
+ TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
+#endif
+
intrstr = pci_intr_string(pc, ih);
- sc->tag[0] = pci_intr_establish(pc, ih, IPL_NET,
+ sc->tag = pci_intr_establish(pc, ih, IPL_NET,
ixgbe_legacy_irq, sc, ifp->if_xname);
- if (sc->tag[0] == NULL) {
+ if (sc->tag == NULL) {
printf(": couldn't establish interrupt");
if (intrstr != NULL)
printf(" at %s", intrstr);
@@ -1314,6 +1551,9 @@ ixgbe_allocate_legacy(struct ix_softc *sc)
}
printf(": %s", intrstr);
+ /* For simplicity in the handlers */
+ sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
+
return (0);
}
@@ -1322,7 +1562,7 @@ ixgbe_allocate_pci_resources(struct ix_softc *sc)
{
struct ixgbe_osdep *os = &sc->osdep;
struct pci_attach_args *pa = os->os_pa;
- int val, i;
+ int val;
val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM &&
@@ -1338,24 +1578,14 @@ ixgbe_allocate_pci_resources(struct ix_softc *sc)
}
sc->hw.hw_addr = (uint8_t *)os->os_membase;
- /*
- * Init the resource arrays
- */
- for (i = 0; i < IXGBE_MSGS; i++) {
- sc->rid[i] = i + 1; /* MSI/X RID starts at 1 */
- sc->tag[i] = NULL;
- sc->res[i] = NULL;
- }
-
/* Legacy defaults */
- sc->num_tx_queues = 1;
- sc->num_rx_queues = 1;
+ sc->num_queues = 1;
+ sc->hw.back = os;
#ifdef notyet
- /* Now setup MSI or MSI/X */
+ /* Now setup MSI or MSI/X, return us the number of supported vectors. */
sc->msix = ixgbe_setup_msix(sc);
#endif
- sc->hw.back = os;
return (0);
}
@@ -1365,10 +1595,20 @@ ixgbe_free_pci_resources(struct ix_softc * sc)
{
struct ixgbe_osdep *os = &sc->osdep;
struct pci_attach_args *pa = os->os_pa;
+ struct ix_queue *que = sc->queues;
+ int i;
- if (sc->tag[0])
- pci_intr_disestablish(pa->pa_pc, sc->tag[0]);
- sc->tag[0] = NULL;
+
+ /* Release all msix queue resources: */
+ for (i = 0; i < sc->num_queues; i++, que++) {
+ if (que->tag)
+ pci_intr_disestablish(pa->pa_pc, que->tag);
+ que->tag = NULL;
+ }
+
+ if (sc->tag)
+ pci_intr_disestablish(pa->pa_pc, sc->tag);
+ sc->tag = NULL;
if (os->os_membase != 0)
bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
os->os_membase = 0;
@@ -1378,52 +1618,6 @@ ixgbe_free_pci_resources(struct ix_softc * sc)
/*********************************************************************
*
- * Initialize the hardware to a configuration as specified by the
- * sc structure. The controller is reset, the EEPROM is
- * verified, the MAC address is set, then the shared initialization
- * routines are called.
- *
- **********************************************************************/
-int
-ixgbe_hardware_init(struct ix_softc *sc)
-{
- struct ifnet *ifp = &sc->arpcom.ac_if;
- uint16_t csum;
-
- csum = 0;
- /* Issue a global reset */
- sc->hw.adapter_stopped = FALSE;
- ixgbe_hw0(&sc->hw, stop_adapter);
-
- /* Make sure we have a good EEPROM before we read from it */
- if (ixgbe_ee(&sc->hw, validate_checksum, &csum) < 0) {
- printf("%s: The EEPROM Checksum Is Not Valid\n", ifp->if_xname);
- return (EIO);
- }
-
- /* Pick up the smart speed setting */
- if (sc->hw.mac.type == ixgbe_mac_82599EB)
- sc->hw.phy.smart_speed = ixgbe_smart_speed;
-
- /* Get Hardware Flow Control setting */
- sc->hw.fc.requested_mode = ixgbe_fc_full;
- sc->hw.fc.pause_time = IXGBE_FC_PAUSE;
- sc->hw.fc.low_water = IXGBE_FC_LO;
- sc->hw.fc.high_water = IXGBE_FC_HI;
- sc->hw.fc.send_xon = TRUE;
-
- if (ixgbe_hw0(&sc->hw, init_hw) != 0) {
- printf("%s: Hardware Initialization Failed", ifp->if_xname);
- return (EIO);
- }
- bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
- IXGBE_ETH_LENGTH_OF_ADDRESS);
-
- return (0);
-}
-
-/*********************************************************************
- *
* Setup networking device structure and register an interface.
*
**********************************************************************/
@@ -1488,6 +1682,50 @@ ixgbe_setup_interface(struct ix_softc *sc)
return;
}
+void
+ixgbe_config_link(struct ix_softc *sc)
+{
+ uint32_t autoneg, err = 0;
+ int sfp, negotiate;
+
+ switch (sc->hw.phy.type) {
+ case ixgbe_phy_sfp_avago:
+ case ixgbe_phy_sfp_ftl:
+ case ixgbe_phy_sfp_intel:
+ case ixgbe_phy_sfp_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
+ sfp = 1;
+ break;
+ default:
+ sfp = 0;
+ break;
+ }
+
+ if (sfp) {
+ if (&sc->hw.phy.multispeed_fiber) {
+ sc->hw.mac.ops.setup_sfp(&sc->hw);
+ ixgbe_hw0(&sc->hw, enable_tx_laser);
+ /* XXX taskqueue_enqueue(sc->tq, &sc->msf_task); */
+ } /* else */
+ /* XXX taskqueue_enqueue(sc->tq, &sc->mod_task); */
+ } else {
+ if (sc->hw.mac.ops.check_link)
+ err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
+ &sc->link_up, FALSE);
+ if (err)
+ return;
+ if (sc->hw.mac.ops.setup_link)
+ err = sc->hw.mac.ops.setup_link(&sc->hw, autoneg,
+ negotiate, sc->link_up);
+ }
+ return;
+}
+
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
int
ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
struct ixgbe_dma_alloc *dma, int mapflags)
@@ -1573,26 +1811,33 @@ int
ixgbe_allocate_queues(struct ix_softc *sc)
{
struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ix_queue *que;
struct tx_ring *txr;
struct rx_ring *rxr;
- int rsize, tsize, error = IXGBE_SUCCESS;
+ int rsize, tsize;
int txconf = 0, rxconf = 0, i;
- /* First allocate the TX ring struct memory */
+ /* First allocate the top level queue structs */
+ if (!(sc->queues =
+ (struct ix_queue *) malloc(sizeof(struct ix_queue) *
+ sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
+ goto fail;
+ }
+
+ /* Then allocate the TX ring struct memory */
if (!(sc->tx_rings =
(struct tx_ring *) malloc(sizeof(struct tx_ring) *
- sc->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
- error = ENOMEM;
goto fail;
}
/* Next allocate the RX */
if (!(sc->rx_rings =
(struct rx_ring *) malloc(sizeof(struct rx_ring) *
- sc->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
- error = ENOMEM;
goto rx_fail;
}
@@ -1605,7 +1850,7 @@ ixgbe_allocate_queues(struct ix_softc *sc)
* possibility that things fail midcourse and we need to
* undo memory gracefully
*/
- for (i = 0; i < sc->num_tx_queues; i++, txconf++) {
+ for (i = 0; i < sc->num_queues; i++, txconf++) {
/* Set up some basics */
txr = &sc->tx_rings[i];
txr->sc = sc;
@@ -1618,21 +1863,10 @@ ixgbe_allocate_queues(struct ix_softc *sc)
&txr->txdma, BUS_DMA_NOWAIT)) {
printf("%s: Unable to allocate TX Descriptor memory\n",
ifp->if_xname);
- error = ENOMEM;
goto err_tx_desc;
}
txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
bzero((void *)txr->tx_base, tsize);
-
- if (ixgbe_dma_malloc(sc, sizeof(uint32_t),
- &txr->txwbdma, BUS_DMA_NOWAIT)) {
- printf("%s: Unable to allocate TX Write Back memory\n",
- ifp->if_xname);
- error = ENOMEM;
- goto err_tx_desc;
- }
- txr->tx_hwb = (uint32_t *)txr->txwbdma.dma_vaddr;
- *txr->tx_hwb = 0;
}
/*
@@ -1640,7 +1874,7 @@ ixgbe_allocate_queues(struct ix_softc *sc)
*/
rsize = roundup2(sc->num_rx_desc *
sizeof(union ixgbe_adv_rx_desc), 4096);
- for (i = 0; i < sc->num_rx_queues; i++, rxconf++) {
+ for (i = 0; i < sc->num_queues; i++, rxconf++) {
rxr = &sc->rx_rings[i];
/* Set up some basics */
rxr->sc = sc;
@@ -1653,13 +1887,22 @@ ixgbe_allocate_queues(struct ix_softc *sc)
&rxr->rxdma, BUS_DMA_NOWAIT)) {
printf("%s: Unable to allocate RxDescriptor memory\n",
ifp->if_xname);
- error = ENOMEM;
goto err_rx_desc;
}
rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
bzero((void *)rxr->rx_base, rsize);
}
+ /*
+ * Finally set up the queue holding structs
+ */
+ for (i = 0; i < sc->num_queues; i++) {
+ que = &sc->queues[i];
+ que->sc = sc;
+ que->txr = &sc->tx_rings[i];
+ que->rxr = &sc->rx_rings[i];
+ }
+
return (0);
err_rx_desc:
@@ -1668,7 +1911,6 @@ err_rx_desc:
err_tx_desc:
for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) {
ixgbe_dma_free(sc, &txr->txdma);
- ixgbe_dma_free(sc, &txr->txwbdma);
}
free(sc->rx_rings, M_DEVBUF);
sc->rx_rings = NULL;
@@ -1676,7 +1918,7 @@ rx_fail:
free(sc->tx_rings, M_DEVBUF);
sc->tx_rings = NULL;
fail:
- return (error);
+ return (ENOMEM);
}
/*********************************************************************
@@ -1754,8 +1996,8 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr)
(sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
/* Reset indices */
- txr->next_avail_tx_desc = 0;
- txr->next_tx_to_clean = 0;
+ txr->next_avail_desc = 0;
+ txr->next_to_clean = 0;
/* Set number of descriptors available */
txr->tx_avail = sc->num_tx_desc;
@@ -1778,7 +2020,7 @@ ixgbe_setup_transmit_structures(struct ix_softc *sc)
struct tx_ring *txr = sc->tx_rings;
int i, error;
- for (i = 0; i < sc->num_tx_queues; i++, txr++) {
+ for (i = 0; i < sc->num_queues; i++, txr++) {
if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
goto fail;
}
@@ -1801,12 +2043,12 @@ ixgbe_initialize_transmit_units(struct ix_softc *sc)
struct tx_ring *txr;
struct ixgbe_hw *hw = &sc->hw;
int i;
- uint64_t tdba, txhwb;
+ uint64_t tdba;
uint32_t txctrl;
/* Setup the Base and Length of the Tx Descriptor Ring */
- for (i = 0; i < sc->num_tx_queues; i++) {
+ for (i = 0; i < sc->num_queues; i++) {
txr = &sc->tx_rings[i];
/* Setup descriptor base address */
@@ -1817,13 +2059,14 @@ ixgbe_initialize_transmit_units(struct ix_softc *sc)
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
- /* Setup for Head WriteBack */
- txhwb = txr->txwbdma.dma_map->dm_segs[0].ds_addr;
- txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
- (txhwb & 0x00000000ffffffffULL));
- IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
- (txhwb >> 32));
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+
+ /* Setup Transmit Descriptor Cmd Settings */
+ txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+ txr->queue_status = IXGBE_QUEUE_IDLE;
+ txr->watchdog_timer = 0;
/* Disable Head Writeback */
switch (hw->mac.type) {
@@ -1845,15 +2088,6 @@ ixgbe_initialize_transmit_units(struct ix_softc *sc)
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
break;
}
-
- /* Setup the HW Tx Head and Tail descriptor pointers */
- IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
-
- /* Setup Transmit Descriptor Cmd Settings */
- txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
-
- txr->watchdog_timer = 0;
}
ifp->if_timer = 0;
@@ -1885,7 +2119,7 @@ ixgbe_free_transmit_structures(struct ix_softc *sc)
struct tx_ring *txr = sc->tx_rings;
int i;
- for (i = 0; i < sc->num_tx_queues; i++, txr++) {
+ for (i = 0; i < sc->num_queues; i++, txr++) {
ixgbe_free_transmit_buffers(txr);
}
}
@@ -1927,10 +2161,8 @@ ixgbe_free_transmit_buffers(struct tx_ring *txr)
}
}
- if (txr->tx_buffers != NULL) {
+ if (txr->tx_buffers != NULL)
free(txr->tx_buffers, M_DEVBUF);
- txr->tx_buffers = NULL;
- }
txr->tx_buffers = NULL;
txr->txtag = NULL;
}
@@ -1957,12 +2189,13 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
int ehdrlen, ip_hlen = 0;
uint16_t etype;
int offload = TRUE;
- int ctxd = txr->next_avail_tx_desc;
+ int ctxd = txr->next_avail_desc;
#if NVLAN > 0
struct ether_vlan_header *eh;
#else
struct ether_header *eh;
#endif
+ uint16_t vtag = 0;
if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
offload = FALSE;
@@ -1976,8 +2209,8 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
*/
#if NVLAN > 0
if (mp->m_flags & M_VLANTAG) {
- vlan_macip_lens |=
- htole16(mp->m_pkthdr.ether_vtag) << IXGBE_ADVTXD_VLAN_SHIFT;
+ vtag = htole16(mp->m_pkthdr.ether_vtag);
+ vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
} else
#endif
if (offload == FALSE)
@@ -2005,47 +2238,48 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
/* Set the ether header length */
vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
- type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- if (offload == TRUE) {
- switch (etype) {
- case ETHERTYPE_IP:
- ip = (struct ip *)(mp->m_data + ehdrlen);
- ip_hlen = ip->ip_hl << 2;
- if (mp->m_len < ehdrlen + ip_hlen)
- return FALSE; /* failure */
- ipproto = ip->ip_p;
- if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- break;
+ switch (etype) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mp->m_data + ehdrlen);
+ ip_hlen = ip->ip_hl << 2;
+ if (mp->m_len < ehdrlen + ip_hlen)
+ return FALSE; /* failure */
+ ipproto = ip->ip_p;
+ if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ break;
#ifdef notyet
- case ETHERTYPE_IPV6:
- ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
- ip_hlen = sizeof(struct ip6_hdr);
- if (mp->m_len < ehdrlen + ip_hlen)
- return FALSE; /* failure */
- ipproto = ip6->ip6_nxt;
- if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
- break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+ ip_hlen = sizeof(struct ip6_hdr);
+ if (mp->m_len < ehdrlen + ip_hlen)
+ return FALSE; /* failure */
+ ipproto = ip6->ip6_nxt;
+ if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+ break;
#endif
- default:
- offload = FALSE;
- break;
- }
+ default:
+ offload = FALSE;
+ break;
+ }
- vlan_macip_lens |= ip_hlen;
+ vlan_macip_lens |= ip_hlen;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- switch (ipproto) {
- case IPPROTO_TCP:
- if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- case IPPROTO_UDP:
- if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
- break;
- }
+ switch (ipproto) {
+ case IPPROTO_TCP:
+ if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case IPPROTO_UDP:
+ if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+ break;
+ default:
+ offload = FALSE;
+ break;
}
/* Now copy bits into descriptor */
@@ -2055,11 +2289,12 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
TXD->mss_l4len_idx = htole32(0);
tx_buffer->m_head = NULL;
+ tx_buffer->eop_index = -1;
/* We've consumed the first desc, adjust counters */
if (++ctxd == sc->num_tx_desc)
ctxd = 0;
- txr->next_avail_tx_desc = ctxd;
+ txr->next_avail_desc = ctxd;
--txr->tx_avail;
return (offload);
@@ -2113,7 +2348,7 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
return FALSE;
- ctxd = txr->next_avail_tx_desc;
+ ctxd = txr->next_avail_desc;
tx_buffer = &txr->tx_buffers[ctxd];
TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
@@ -2162,7 +2397,7 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
ctxd = 0;
txr->tx_avail--;
- txr->next_avail_tx_desc = ctxd;
+ txr->next_avail_desc = ctxd;
return TRUE;
}
@@ -2187,41 +2422,48 @@ ixgbe_txeof(struct tx_ring *txr)
{
struct ix_softc *sc = txr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
- uint first, last, done, num_avail;
+ uint32_t first, last, done, processed;
struct ixgbe_tx_buf *tx_buffer;
- struct ixgbe_legacy_tx_desc *tx_desc;
+ struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
- if (txr->tx_avail == sc->num_tx_desc)
+ if (txr->tx_avail == sc->num_tx_desc) {
+ txr->queue_status = IXGBE_QUEUE_IDLE;
return FALSE;
+ }
- num_avail = txr->tx_avail;
- first = txr->next_tx_to_clean;
-
+ processed = 0;
+ first = txr->next_to_clean;
tx_buffer = &txr->tx_buffers[first];
-
/* For cleanup we just use legacy struct */
tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+ last = tx_buffer->eop_index;
+ if (last == -1)
+ return FALSE;
+ eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
- /* Get the HWB */
- bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
- 0, txr->txwbdma.dma_map->dm_mapsize,
- BUS_DMASYNC_POSTREAD);
- done = *txr->tx_hwb;
+ /*
+ * Get the index of the first descriptor
+ * BEYOND the EOP and call that 'done'.
+ * I do this so the comparison in the
+ * inner while loop below can be simple
+ */
+ if (++last == sc->num_tx_desc) last = 0;
+ done = last;
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
0, txr->txdma.dma_map->dm_mapsize,
BUS_DMASYNC_POSTREAD);
- while (TRUE) {
- /* We clean the range til last head write back */
+ while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
+ /* We clean the range of the packet */
while (first != done) {
tx_desc->upper.data = 0;
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
- num_avail++;
+ ++txr->tx_avail;
+ ++processed;
if (tx_buffer->m_head) {
- ifp->if_opackets++;
bus_dmamap_sync(txr->txdma.dma_tag,
tx_buffer->map,
0, tx_buffer->map->dm_mapsize,
@@ -2231,6 +2473,7 @@ ixgbe_txeof(struct tx_ring *txr)
m_freem(tx_buffer->m_head);
tx_buffer->m_head = NULL;
}
+ tx_buffer->eop_index = -1;
if (++first == sc->num_tx_desc)
first = 0;
@@ -2239,13 +2482,17 @@ ixgbe_txeof(struct tx_ring *txr)
tx_desc = (struct ixgbe_legacy_tx_desc *)
&txr->tx_base[first];
}
+ ++txr->packets;
+ ++ifp->if_opackets;
/* See if there is more work now */
- last = done;
- bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
- 0, txr->txwbdma.dma_map->dm_mapsize,
- BUS_DMASYNC_POSTREAD);
- done = *txr->tx_hwb;
- if (last == done)
+ last = tx_buffer->eop_index;
+ if (last != -1) {
+ eop_desc =
+ (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+ /* Get next done point */
+ if (++last == sc->num_tx_desc) last = 0;
+ done = last;
+ } else
break;
}
@@ -2253,7 +2500,7 @@ ixgbe_txeof(struct tx_ring *txr)
0, txr->txdma.dma_map->dm_mapsize,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- txr->next_tx_to_clean = first;
+ txr->next_to_clean = first;
/*
* If we have enough room, clear IFF_OACTIVE to tell the stack that
@@ -2261,25 +2508,22 @@ ixgbe_txeof(struct tx_ring *txr)
* clear the timeout. Otherwise, if some descriptors have been freed,
* restart the timeout.
*/
- if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
+ if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
ifp->if_flags &= ~IFF_OACTIVE;
/* If all are clean turn off the timer */
- if (num_avail == sc->num_tx_desc) {
+ if (txr->tx_avail == sc->num_tx_desc) {
ifp->if_timer = 0;
txr->watchdog_timer = 0;
- txr->tx_avail = num_avail;
return FALSE;
}
/* Some were cleaned, so reset timer */
- else if (num_avail != txr->tx_avail) {
+ else if (processed) {
ifp->if_timer = IXGBE_TX_TIMEOUT;
txr->watchdog_timer = IXGBE_TX_TIMEOUT;
}
}
- txr->tx_avail = num_avail;
-
return TRUE;
}
@@ -2292,47 +2536,73 @@ int
ixgbe_get_buf(struct rx_ring *rxr, int i)
{
struct ix_softc *sc = rxr->sc;
- struct mbuf *m;
- int error;
- int size = MCLBYTES;
struct ixgbe_rx_buf *rxbuf;
+ struct mbuf *mh, *mp;
+ int error;
union ixgbe_adv_rx_desc *rxdesc;
size_t dsize = sizeof(union ixgbe_adv_rx_desc);
rxbuf = &rxr->rx_buffers[i];
rxdesc = &rxr->rx_base[i];
-
- if (rxbuf->m_head != NULL) {
+ if (rxbuf->m_head != NULL || rxbuf->m_pack) {
printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
sc->dev.dv_xname, i);
return (ENOBUFS);
}
- m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, size);
- if (!m) {
- sc->mbuf_cluster_failed++;
+ /* needed in any case so prealocate since this one will fail for sure */
+ mp = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, sc->rx_mbuf_sz);
+ if (!mp) {
+ sc->mbuf_packet_failed++;
return (ENOBUFS);
}
- m->m_len = m->m_pkthdr.len = size;
- if (sc->max_frame_size <= (size - ETHER_ALIGN))
- m_adj(m, ETHER_ALIGN);
- error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
- m, BUS_DMA_NOWAIT);
+ if (rxr->hdr_split == FALSE)
+ goto no_split;
+
+ mh = m_gethdr(M_DONTWAIT, MT_DATA);
+ if (mh == NULL)
+ return (ENOBUFS);
+
+ mh->m_pkthdr.len = mh->m_len = MHLEN;
+ mh->m_len = MHLEN;
+ /* always offset header buffers */
+ m_adj(mh, ETHER_ALIGN);
+
+ error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->hmap,
+ mh, BUS_DMA_NOWAIT);
+ if (error) {
+ m_freem(mh);
+ return (error);
+ }
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->hmap,
+ 0, rxbuf->hmap->dm_mapsize, BUS_DMASYNC_PREREAD);
+ rxbuf->m_head = mh;
+
+ rxdesc->read.hdr_addr = htole64(rxbuf->hmap->dm_segs[0].ds_addr);
+
+no_split:
+ mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
+ /* only adjust if this is not a split header */
+ if (rxr->hdr_split == FALSE &&
+ sc->max_frame_size <= (sc->rx_mbuf_sz - ETHER_ALIGN))
+ m_adj(mp, ETHER_ALIGN);
+
+ error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->pmap,
+ mp, BUS_DMA_NOWAIT);
if (error) {
- m_freem(m);
+ m_freem(mp);
return (error);
}
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
- 0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
- rxbuf->m_head = m;
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->pmap,
+ 0, rxbuf->pmap->dm_mapsize, BUS_DMASYNC_PREREAD);
+ rxbuf->m_pack = mp;
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
- bzero(rxdesc, dsize);
- rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
+ rxdesc->read.pkt_addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
dsize * i, dsize, BUS_DMASYNC_PREWRITE);
@@ -2356,7 +2626,7 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
struct ix_softc *sc = rxr->sc;
struct ifnet *ifp = &sc->arpcom.ac_if;
struct ixgbe_rx_buf *rxbuf;
- int i, bsize, error, size = MCLBYTES;
+ int i, bsize, error;
bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
if (!(rxr->rx_buffers = (struct ixgbe_rx_buf *) malloc(bsize,
@@ -2366,18 +2636,23 @@ ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
error = ENOMEM;
goto fail;
}
- rxr->rxtag = rxr->rxdma.dma_tag;
rxbuf = rxr->rx_buffers;
for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
- error = bus_dmamap_create(rxr->rxdma.dma_tag, size, 1,
- size, 0, BUS_DMA_NOWAIT, &rxbuf->map);
+ error = bus_dmamap_create(rxr->rxdma.dma_tag, MSIZE, 1,
+ MSIZE, 0, BUS_DMA_NOWAIT, &rxbuf->hmap);
+ if (error) {
+ printf("%s: Unable to create Head DMA map\n",
+ ifp->if_xname);
+ goto fail;
+ }
+ error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
+ 16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->pmap);
if (error) {
- printf("%s: Unable to create Rx DMA map\n",
+ printf("%s: Unable to create Pack DMA map\n",
ifp->if_xname);
goto fail;
}
- rxbuf->m_head = NULL;
}
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
rxr->rxdma.dma_map->dm_mapsize,
@@ -2410,7 +2685,7 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr)
/* Setup our descriptor indices */
rxr->next_to_check = 0;
- rxr->last_rx_desc_filled = sc->num_rx_desc - 1;
+ rxr->last_desc_filled = sc->num_rx_desc - 1;
rxr->rx_ndescs = 0;
ixgbe_rxfill(rxr);
@@ -2430,7 +2705,7 @@ ixgbe_rxfill(struct rx_ring *rxr)
int post = 0;
int i;
- i = rxr->last_rx_desc_filled;
+ i = rxr->last_desc_filled;
while (rxr->rx_ndescs < sc->num_rx_desc) {
if (++i == sc->num_rx_desc)
i = 0;
@@ -2438,7 +2713,7 @@ ixgbe_rxfill(struct rx_ring *rxr)
if (ixgbe_get_buf(rxr, i) != 0)
break;
- rxr->last_rx_desc_filled = i;
+ rxr->last_desc_filled = i;
post = 1;
}
@@ -2456,7 +2731,7 @@ ixgbe_setup_receive_structures(struct ix_softc *sc)
struct rx_ring *rxr = sc->rx_rings;
int i;
- for (i = 0; i < sc->num_rx_queues; i++, rxr++)
+ for (i = 0; i < sc->num_queues; i++, rxr++)
if (ixgbe_setup_receive_ring(rxr))
goto fail;
@@ -2472,15 +2747,16 @@ fail:
* Enable receive unit.
*
**********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
void
ixgbe_initialize_receive_units(struct ix_softc *sc)
{
struct rx_ring *rxr = sc->rx_rings;
struct ifnet *ifp = &sc->arpcom.ac_if;
- uint32_t rxctrl, fctrl, srrctl, rxcsum;
- uint32_t reta, mrqc, hlreg, linkvec;
+ uint32_t bufsz, rxctrl, fctrl, srrctl, rxcsum;
+ uint32_t reta, mrqc = 0, hlreg;
uint32_t random[10];
- uint32_t llimod = 0;
int i;
/*
@@ -2494,8 +2770,11 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
/* Enable broadcasts */
fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
fctrl |= IXGBE_FCTRL_BAM;
+ fctrl |= IXGBE_FCTRL_DPF;
+ fctrl |= IXGBE_FCTRL_PMCF;
IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
+ /* Set for Jumbo Frames? */
hlreg = IXGBE_READ_REG(&sc->hw, IXGBE_HLREG0);
if (ifp->if_mtu > ETHERMTU)
hlreg |= IXGBE_HLREG0_JUMBOEN;
@@ -2503,28 +2782,11 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
hlreg &= ~IXGBE_HLREG0_JUMBOEN;
IXGBE_WRITE_REG(&sc->hw, IXGBE_HLREG0, hlreg);
- srrctl = IXGBE_READ_REG(&sc->hw, IXGBE_SRRCTL(0));
- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- if (sc->bigbufs)
- srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(0), srrctl);
-
- /* Set Queue moderation rate */
- if (sc->hw.mac.type == ixgbe_mac_82599EB)
- llimod = IXGBE_EITR_LLI_MOD;
- for (i = 0; i < IXGBE_MSGS; i++)
- IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(i), DEFAULT_ITR | llimod);
-
- /* Set Link moderation lower */
- linkvec = sc->num_tx_queues + sc->num_rx_queues;
- IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(linkvec), LINK_ITR);
+ bufsz = sc->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
+ for (i = 0; i < sc->num_queues; i++, rxr++) {
uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
+
/* Setup the Base and Length of the Rx Descriptor Ring */
IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAL(i),
(rdba & 0x00000000ffffffffULL));
@@ -2532,46 +2794,58 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
IXGBE_WRITE_REG(&sc->hw, IXGBE_RDLEN(i),
sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ /* Set up the SRRCTL register */
+ srrctl = IXGBE_READ_REG(&sc->hw, IXGBE_SRRCTL(i));
+ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ srrctl |= bufsz;
+ if (rxr->hdr_split) {
+ /* Use a standard mbuf for the header */
+ srrctl |= ((IXGBE_RX_HDR <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
+ & IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ } else
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(i), srrctl);
+
/* Setup the HW Rx Head and Tail Descriptor Pointers */
IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
- IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i),
- rxr->last_rx_desc_filled);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), 0);
+ }
+
+ if (sc->hw.mac.type != ixgbe_mac_82598EB) {
+ uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_PSRTYPE(0), psrtype);
}
rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
- if (sc->num_rx_queues > 1) {
+ /* Setup RSS */
+ if (sc->num_queues > 1) {
+ int j;
+ reta = 0;
/* set up random bits */
arc4random_buf(&random, sizeof(random));
- switch (sc->num_rx_queues) {
- case 8:
- case 4:
- reta = 0x00010203;
- break;
- case 2:
- reta = 0x00010001;
- break;
- default:
- reta = 0x00000000;
- }
/* Set up the redirection table */
- for (i = 0; i < 32; i++) {
- IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i), reta);
- if (sc->num_rx_queues > 4) {
- ++i;
- IXGBE_WRITE_REG(&sc->hw,
- IXGBE_RETA(i), 0x04050607);
- }
+ for (i = 0, j = 0; i < 128; i++, j++) {
+ if (j == sc->num_queues)
+ j = 0;
+ reta = (reta << 8) | (j * 0x11);
+ if ((i & 3) == 3)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i >> 2), reta);
}
/* Now fill our hash function seeds */
for (i = 0; i < 10; i++)
- IXGBE_WRITE_REG_ARRAY(&sc->hw,
- IXGBE_RSSRK(0), i, random[i]);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RSSRK(i), random[i]);
+ /* Perform hash on these packet types */
mrqc = IXGBE_MRQC_RSSEN
- /* Perform hash on these packet types */
| IXGBE_MRQC_RSS_FIELD_IPV4
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP
| IXGBE_MRQC_RSS_FIELD_IPV4_UDP
@@ -2595,13 +2869,6 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCSUM, rxcsum);
- /* Enable Receive engine */
- rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
- if (sc->hw.mac.type == ixgbe_mac_82598EB)
- rxctrl |= IXGBE_RXCTRL_DMBYPS;
- rxctrl |= IXGBE_RXCTRL_RXEN;
- sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
-
return;
}
@@ -2616,7 +2883,7 @@ ixgbe_free_receive_structures(struct ix_softc *sc)
struct rx_ring *rxr = sc->rx_rings;
int i;
- for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
+ for (i = 0; i < sc->num_queues; i++, rxr++) {
ixgbe_free_receive_buffers(rxr);
}
}
@@ -2629,40 +2896,40 @@ ixgbe_free_receive_structures(struct ix_softc *sc)
void
ixgbe_free_receive_buffers(struct rx_ring *rxr)
{
- struct ix_softc *sc = NULL;
- struct ixgbe_rx_buf *rxbuf = NULL;
+ struct ix_softc *sc;
+ struct ixgbe_rx_buf *rxbuf;
int i;
- INIT_DEBUGOUT("free_receive_buffers: begin");
sc = rxr->sc;
if (rxr->rx_buffers != NULL) {
- rxbuf = rxr->rx_buffers;
- for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
+ for (i = 0; i < sc->num_rx_desc; i++) {
+ rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head != NULL) {
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
- 0, rxbuf->map->dm_mapsize,
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->hmap,
+ 0, rxbuf->hmap->dm_mapsize,
BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
+ bus_dmamap_unload(rxr->rxdma.dma_tag,
+ rxbuf->hmap);
m_freem(rxbuf->m_head);
rxbuf->m_head = NULL;
}
- bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
- rxbuf->map = NULL;
+ if (rxbuf->m_pack != NULL) {
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->pmap,
+ 0, rxbuf->pmap->dm_mapsize,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxdma.dma_tag,
+ rxbuf->pmap);
+ m_freem(rxbuf->m_pack);
+ rxbuf->m_pack = NULL;
+ }
+ bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->hmap);
+ bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->pmap);
+ rxbuf->hmap = NULL;
+ rxbuf->pmap = NULL;
}
- }
- if (rxr->rx_buffers != NULL) {
free(rxr->rx_buffers, M_DEVBUF);
rxr->rx_buffers = NULL;
}
-
- if (rxr->rxtag != NULL)
- rxr->rxtag = NULL;
-
- if (rxr->fmp != NULL) {
- m_freem(rxr->fmp);
- rxr->fmp = NULL;
- rxr->lmp = NULL;
- }
}
/*********************************************************************
@@ -2676,35 +2943,29 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr)
*
*********************************************************************/
int
-ixgbe_rxeof(struct rx_ring *rxr, int count)
+ixgbe_rxeof(struct ix_queue *que, int count)
{
- struct ix_softc *sc = rxr->sc;
+ struct ix_softc *sc = que->sc;
+ struct rx_ring *rxr = que->rxr;
struct ifnet *ifp = &sc->arpcom.ac_if;
- struct mbuf *m;
- uint8_t accept_frame = 0;
+ struct mbuf *mh, *mp, *sendmp;
uint8_t eop = 0;
- uint16_t len, desc_len, prev_len_adj;
- uint32_t staterr;
- struct ixgbe_rx_buf *rxbuf;
+ uint16_t hlen, plen, hdr, vtag;
+ uint32_t staterr, ptype;
+ struct ixgbe_rx_buf *rxbuf, *nxbuf;
union ixgbe_adv_rx_desc *rxdesc;
size_t dsize = sizeof(union ixgbe_adv_rx_desc);
- int i;
+ int i, nextp;
if (!ISSET(ifp->if_flags, IFF_RUNNING))
return FALSE;
i = rxr->next_to_check;
-
while (count != 0 && rxr->rx_ndescs > 0) {
- m = NULL;
-
- rxdesc = &rxr->rx_base[i];
- rxbuf = &rxr->rx_buffers[i];
-
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
- dsize * i, dsize,
- BUS_DMASYNC_POSTREAD);
+ dsize * i, dsize, BUS_DMASYNC_POSTREAD);
+ rxdesc = &rxr->rx_base[i];
staterr = letoh32(rxdesc->wb.upper.status_error);
if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
@@ -2713,111 +2974,175 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
break;
}
+ /* Zero out the receive descriptors status */
+ rxdesc->wb.upper.status_error = 0;
+ rxbuf = &rxr->rx_buffers[i];
+
/* pull the mbuf off the ring */
- bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
- rxbuf->map->dm_mapsize,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
- m = rxbuf->m_head;
- rxbuf->m_head = NULL;
-
- if (m == NULL) {
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->hmap, 0,
+ rxbuf->hmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->hmap);
+ bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->pmap, 0,
+ rxbuf->pmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->pmap);
+
+ mh = rxbuf->m_head;
+ mp = rxbuf->m_pack;
+ plen = letoh16(rxdesc->wb.upper.length);
+ ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_PKTTYPE_MASK;
+ hdr = letoh16(rxdesc->wb.lower.lo_dword.hs_rss.hdr_info);
+ vtag = letoh16(rxdesc->wb.upper.vlan);
+ eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+ if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
+ ifp->if_ierrors++;
+ sc->dropped_pkts++;
+
+ if (rxbuf->fmp) {
+ m_freem(rxbuf->fmp);
+ rxbuf->fmp = NULL;
+ }
+
+ m_freem(mh);
+ m_freem(mp);
+ rxbuf->m_head = NULL;
+ rxbuf->m_pack = NULL;
+ goto next_desc;
+ }
+
+ if (mp == NULL) {
panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
"(nrx %d, filled %d)", sc->dev.dv_xname,
i, rxr->rx_ndescs,
- rxr->last_rx_desc_filled);
+ rxr->last_desc_filled);
}
- m_cluncount(m, 1);
- rxr->rx_ndescs--;
-
- accept_frame = 1;
- prev_len_adj = 0;
- desc_len = letoh16(rxdesc->wb.upper.length);
-
- if (staterr & IXGBE_RXD_STAT_EOP) {
- count--;
- eop = 1;
- } else {
- eop = 0;
+ /* XXX ixgbe_realign() STRICT_ALIGN */
+ /* Currently no HW RSC support of 82599 */
+ if (!eop) {
+ /*
+ * Figure out the next descriptor of this frame.
+ */
+ nextp = i + 1;
+ if (nextp == sc->num_rx_desc)
+ nextp = 0;
+ nxbuf = &rxr->rx_buffers[nextp];
+ /* prefetch(nxbuf); */
}
- len = desc_len;
-
- if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
- accept_frame = 0;
-
- if (accept_frame) {
- m->m_len = len;
-
- /* XXX ixgbe_realign() STRICT_ALIGN */
-
- if (rxr->fmp == NULL) {
- m->m_pkthdr.len = m->m_len;
- rxr->fmp = m; /* Store the first mbuf */
- rxr->lmp = m;
+ /*
+ * The header mbuf is ONLY used when header
+ * split is enabled, otherwise we get normal
+ * behavior, ie, both header and payload
+ * are DMA'd into the payload buffer.
+ *
+ * Rather than using the fmp/lmp global pointers
+ * we now keep the head of a packet chain in the
+ * buffer struct and pass this along from one
+ * descriptor to the next, until we get EOP.
+ */
+ if (rxr->hdr_split && (rxbuf->fmp == NULL)) {
+ /* This must be an initial descriptor */
+ hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR)
+ hlen = IXGBE_RX_HDR;
+ mh->m_len = hlen;
+ mh->m_pkthdr.len = mh->m_len;
+ rxbuf->m_head = NULL;
+ /*
+ * Check the payload length, this could be zero if
+ * its a small packet.
+ */
+ if (plen > 0) {
+ mp->m_len = plen;
+ mp->m_flags &= ~M_PKTHDR;
+ mh->m_next = mp;
+ mh->m_pkthdr.len += mp->m_len;
+ rxbuf->m_pack = NULL;
+ rxr->rx_split_packets++;
} else {
- /* Chain mbuf's together */
- m->m_flags &= ~M_PKTHDR;
-#if 0
- /*
- * Adjust length of previous mbuf in chain if
- * we received less than 4 bytes in the last
- * descriptor.
- */
- if (prev_len_adj > 0) {
- rxr->lmp->m_len -= prev_len_adj;
- rxr->fmp->m_pkthdr.len -= prev_len_adj;
+ m_freem(mp);
+ rxbuf->m_pack = NULL;
+ }
+ /* Now create the forward chain. */
+ if (eop == 0) {
+ /* stash the chain head */
+ nxbuf->fmp = mh;
+ /* Make forward chain */
+ if (plen)
+ mp->m_next = nxbuf->m_pack;
+ else
+ mh->m_next = nxbuf->m_pack;
+ } else {
+ /* Singlet, prepare to send */
+ sendmp = mh;
+#if NVLAN > 0
+ if ((sc->num_vlans) &&
+ (staterr & IXGBE_RXD_STAT_VP)) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
}
#endif
- rxr->lmp->m_next = m;
- rxr->lmp = m;
- rxr->fmp->m_pkthdr.len += m->m_len;
}
-
- if (eop) {
- ifp->if_ipackets++;
-
- m = rxr->fmp;
- m->m_pkthdr.rcvif = ifp;
-
- rxr->packet_count++;
- rxr->byte_count += rxr->fmp->m_pkthdr.len;
-
- ixgbe_rx_checksum(sc, staterr, m);
-
+ } else {
+ /*
+ * Either no header split, or a
+ * secondary piece of a fragmented
+ * split packet.
+ */
+ mp->m_len = plen;
+ /*
+ * See if there is a stored head
+ * that determines what we are
+ */
+ sendmp = rxbuf->fmp;
+ rxbuf->m_pack = rxbuf->fmp = NULL;
+
+ if (sendmp != NULL) /* secondary frag */
+ sendmp->m_pkthdr.len += mp->m_len;
+ else {
+ /* first desc of a non-ps chain */
+ sendmp = mp;
+ sendmp->m_pkthdr.len = mp->m_len;
#if NVLAN > 0
- if (staterr & IXGBE_RXD_STAT_VP) {
- m->m_pkthdr.ether_vtag =
- letoh16(rxdesc->wb.upper.vlan);
- m->m_flags |= M_VLANTAG;
+ if ((sc->num_vlans) &&
+ (staterr & IXGBE_RXD_STAT_VP)) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
}
#endif
-#if NBPFILTER > 0
- if (ifp->if_bpf)
- bpf_mtap_ether(ifp->if_bpf, m,
- BPF_DIRECTION_IN);
-#endif
-
- ether_input_mbuf(ifp, m);
-
- rxr->fmp = NULL;
- rxr->lmp = NULL;
}
- } else {
- sc->dropped_pkts++;
-
- if (rxr->fmp != NULL) {
- m_freem(rxr->fmp);
- rxr->fmp = NULL;
- rxr->lmp = NULL;
+ /* Pass the head pointer on */
+ if (eop == 0) {
+ nxbuf->fmp = sendmp;
+ sendmp = NULL;
+ mp->m_next = nxbuf->m_pack;
}
-
- m_freem(m);
}
+ rxr->rx_ndescs--;
+ /* Sending this frame? */
+ if (eop) {
+ m_cluncount(sendmp, 1);
- /* Zero out the receive descriptors status */
- rxdesc->wb.upper.status_error = 0;
+ sendmp->m_pkthdr.rcvif = ifp;
+ ifp->if_ipackets++;
+ rxr->rx_packets++;
+ /* capture data for AIM */
+ rxr->bytes += sendmp->m_pkthdr.len;
+ rxr->rx_bytes += sendmp->m_pkthdr.len;
+ ixgbe_rx_checksum(staterr, sendmp, ptype);
+
+#if NBPFILTER > 0
+ if (ifp->if_bpf)
+ bpf_mtap_ether(ifp->if_bpf, sendmp,
+ BPF_DIRECTION_IN);
+#endif
+
+ ether_input_mbuf(ifp, sendmp);
+ }
+next_desc:
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
dsize * i, dsize,
BUS_DMASYNC_PREREAD);
@@ -2842,8 +3167,7 @@ ixgbe_rxeof(struct rx_ring *rxr, int count)
*
*********************************************************************/
void
-ixgbe_rx_checksum(struct ix_softc *sc,
- uint32_t staterr, struct mbuf * mp)
+ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
{
uint16_t status = (uint16_t) staterr;
uint8_t errors = (uint8_t) (staterr >> 24);
@@ -2867,47 +3191,87 @@ ixgbe_rx_checksum(struct ix_softc *sc,
}
void
-ixgbe_enable_hw_vlans(struct ix_softc *sc)
+ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
{
uint32_t ctrl;
+ int i;
+
+ /*
+ * We get here thru ixgbe_init, meaning
+ * a soft reset, this has already cleared
+ * the VFTA and other state, so if there
+ * have been no vlan's registered do nothing.
+ */
+ if (sc->num_vlans == 0)
+ return;
+
+ /*
+ * A soft reset zero's out the VFTA, so
+ * we need to repopulate it now.
+ */
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++)
+ if (sc->shadow_vfta[i] != 0)
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
+ sc->shadow_vfta[i]);
- ixgbe_disable_intr(sc);
ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
+#if 0
+ /* Enable the Filter Table if enabled */
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ }
+#endif
if (sc->hw.mac.type == ixgbe_mac_82598EB)
ctrl |= IXGBE_VLNCTRL_VME;
- ctrl &= ~IXGBE_VLNCTRL_CFIEN;
- ctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
- ixgbe_enable_intr(sc);
+
+ /* On 82599 the VLAN enable is per/queue in RXDCTL */
+ if (sc->hw.mac.type != ixgbe_mac_82598EB)
+ for (i = 0; i < sc->num_queues; i++) {
+ ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
+ }
+
}
void
ixgbe_enable_intr(struct ix_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- uint32_t mask = IXGBE_EIMS_ENABLE_MASK;
+ struct ix_queue *que = sc->queues;
+ uint32_t mask = IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE;
+ int i;
/* Enable Fan Failure detection */
- if (hw->phy.media_type == ixgbe_media_type_copper)
+ if (hw->device_id == IXGBE_DEV_ID_82598AT)
mask |= IXGBE_EIMS_GPI_SDP1;
-
- /* 82599 specific interrupts */
- if (sc->hw.mac.type == ixgbe_mac_82599EB) {
+ else {
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
}
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+
/* With RSS we use auto clear */
- if (sc->msix_mem) {
+ if (sc->msix) {
+ mask = IXGBE_EIMS_ENABLE_MASK;
/* Dont autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
- IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC,
- sc->eims_mask | mask);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, mask);
}
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ /*
+ * Now enable all queues, this is done separately to
+ * allow for handling the extended (beyond 32) MSIX
+ * vectors that can be used by 82599
+ */
+ for (i = 0; i < sc->num_queues; i++, que++)
+ ixgbe_enable_queue(sc, que->msix);
+
IXGBE_WRITE_FLUSH(hw);
return;
@@ -2916,7 +3280,7 @@ ixgbe_enable_intr(struct ix_softc *sc)
void
ixgbe_disable_intr(struct ix_softc *sc)
{
- if (sc->msix_mem)
+ if (sc->msix)
IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
if (sc->hw.mac.type == ixgbe_mac_82598EB) {
IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
@@ -2933,25 +3297,41 @@ uint16_t
ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
{
struct pci_attach_args *pa;
- uint16_t value;
+ uint32_t value;
+ int high = 0;
+ if (reg & 0x2) {
+ high = 1;
+ reg &= ~0x2;
+ }
pa = ((struct ixgbe_osdep *)hw->back)->os_pa;
+ value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
- /* Should we do read/mask/write...? 16 vs 32 bit!!! */
- value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg) & 0xffff;
+ if (high)
+ value >>= 16;
- return (value);
+ return (value & 0xffff);
}
void
ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
{
struct pci_attach_args *pa;
+ uint32_t rv;
+ int high = 0;
+ /* Need to do read/mask/write... because 16 vs 32 bit!!! */
+ if (reg & 0x2) {
+ high = 1;
+ reg &= ~0x2;
+ }
pa = ((struct ixgbe_osdep *)hw->back)->os_pa;
-
- /* Should we do read/mask/write...? 16 vs 32 bit!!! */
- pci_conf_write(pa->pa_pc, pa->pa_tag, reg, value);
+ rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
+ if (!high)
+ rv = (rv & 0xffff0000) | value;
+ else
+ rv = (rv & 0xffff) | ((uint32_t)value << 16);
+ pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
}
/*
@@ -3006,20 +3386,61 @@ ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
void
ixgbe_configure_ivars(struct ix_softc *sc)
{
- struct tx_ring *txr = sc->tx_rings;
- struct rx_ring *rxr = sc->rx_rings;
+ struct ix_queue *que = sc->queues;
+ uint32_t newitr;
int i;
- for (i = 0; i < sc->num_rx_queues; i++, rxr++)
- ixgbe_set_ivar(sc, i, rxr->msix, 0);
+#if 0
+ if (ixgbe_max_interrupt_rate > 0)
+ newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
+ else
+#endif
+ newitr = 0;
- for (i = 0; i < sc->num_tx_queues; i++, txr++)
- ixgbe_set_ivar(sc, i, txr->msix, 1);
+ for (i = 0; i < sc->num_queues; i++, que++) {
+ /* First the RX queue entry */
+ ixgbe_set_ivar(sc, i, que->msix, 0);
+ /* ... and the TX */
+ ixgbe_set_ivar(sc, i, que->msix, 1);
+ /* Set an Initial EITR value */
+ IXGBE_WRITE_REG(&sc->hw,
+ IXGBE_EITR(que->msix), newitr);
+ }
/* For the Link interrupt */
ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
}
+/*
+ * ixgbe_sfp_probe - called in the local timer to
+ * determine if a port had optics inserted.
+ */
+int
+ixgbe_sfp_probe(struct ix_softc *sc)
+{
+ int result = FALSE;
+
+ if ((sc->hw.phy.type == ixgbe_phy_nl) &&
+ (sc->hw.phy.sfp_type == ixgbe_sfp_type_not_present)) {
+ int32_t ret = sc->hw.phy.ops.identify_sfp(&sc->hw);
+ if (ret)
+ goto out;
+ ret = sc->hw.phy.ops.reset(&sc->hw);
+ if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ printf("%s: Unsupported SFP+ module detected!",
+ sc->dev.dv_xname);
+ goto out;
+ }
+ /* We now have supported optics */
+ sc->sfp_probe = FALSE;
+ /* Set the optics type so system reports correctly */
+ ixgbe_setup_optics(sc);
+ result = TRUE;
+ }
+out:
+ return (result);
+}
+
/**********************************************************************
*
* Update the board statistics counters.
diff --git a/sys/dev/pci/if_ix.h b/sys/dev/pci/if_ix.h
index bca4050468f..0bc365a7311 100644
--- a/sys/dev/pci/if_ix.h
+++ b/sys/dev/pci/if_ix.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ix.h,v 1.11 2010/08/27 08:24:53 deraadt Exp $ */
+/* $OpenBSD: if_ix.h,v 1.12 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -39,10 +39,6 @@
#include <dev/pci/ixgbe.h>
-#if 0
-#include "tcp_lro.h"
-#endif
-
/* Tunables */
/*
@@ -76,12 +72,6 @@
#define DBA_ALIGN 128
/*
- * This parameter controls the maximum no of times the driver will loop in
- * the isr. Minimum Value = 1
- */
-#define MAX_INTR 10
-
-/*
* This parameter controls the duration of transmit watchdog timer.
*/
#define IXGBE_TX_TIMEOUT 5 /* set to 5 seconds */
@@ -96,7 +86,7 @@
#define IXGBE_MAX_FRAME_SIZE 0x3F00
/* Flow control constants */
-#define IXGBE_FC_PAUSE 0x680
+#define IXGBE_FC_PAUSE 0xFFFF
#define IXGBE_FC_HI 0x20000
#define IXGBE_FC_LO 0x10000
@@ -119,47 +109,38 @@
#define IXGBE_82598_SCATTER 100
#define IXGBE_82599_SCATTER 32
#define IXGBE_MSIX_BAR 3
-#if 0
#define IXGBE_TSO_SIZE 65535
-#else
-#define IXGBE_TSO_SIZE IXGBE_MAX_FRAME_SIZE
-#endif
#define IXGBE_TX_BUFFER_SIZE ((uint32_t) 1514)
-#define IXGBE_RX_HDR_SIZE ((uint32_t) 256)
-#define CSUM_OFFLOAD 7 /* Bits in csum flags */
-
-/* The number of MSIX messages the 82598 supports */
-#define IXGBE_MSGS 18
-
-/* For 6.X code compatibility */
-#if __FreeBSD_version < 700000
-#define ETHER_BPF_MTAP BPF_MTAP
-#define CSUM_TSO 0
-#define IFCAP_TSO4 0
-#define FILTER_STRAY
-#define FILTER_HANDLED
-#endif
+#define IXGBE_RX_HDR 128
+#define IXGBE_VFTA_SIZE 128
+#define IXGBE_BR_SIZE 4096
+#define IXGBE_QUEUE_IDLE 0
+#define IXGBE_QUEUE_WORKING 1
+#define IXGBE_QUEUE_HUNG 2
/*
* Interrupt Moderation parameters
- * for now we hardcode, later
- * it would be nice to do dynamic
*/
-#define MAX_IRQ_SEC 8000
-#define DEFAULT_ITR 1000000000/(MAX_IRQ_SEC * 256)
-#define LINK_ITR 1000000000/(1950 * 256)
+#define IXGBE_LOW_LATENCY 128
+#define IXGBE_AVE_LATENCY 400
+#define IXGBE_BULK_LATENCY 1200
+#define IXGBE_LINK_ITR 2000
/* Used for auto RX queue configuration */
extern int mp_ncpus;
struct ixgbe_tx_buf {
+ uint32_t eop_index;
struct mbuf *m_head;
bus_dmamap_t map;
};
struct ixgbe_rx_buf {
struct mbuf *m_head;
- bus_dmamap_t map;
+ struct mbuf *m_pack;
+ struct mbuf *fmp;
+ bus_dmamap_t hmap;
+ bus_dmamap_t pmap;
};
/*
@@ -175,29 +156,42 @@ struct ixgbe_dma_alloc {
};
/*
+ * Driver queue struct: this is the interrupt container
+ * for the associated tx and rx ring.
+ */
+struct ix_queue {
+ struct ix_softc *sc;
+ uint32_t msix; /* This queue's MSIX vector */
+ uint32_t eims; /* This queue's EIMS bit */
+ uint32_t eitr_setting;
+ /* struct resource *res; */
+ void *tag;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ uint64_t irqs;
+};
+
+/*
* The transmit ring, one per tx queue
*/
struct tx_ring {
struct ix_softc *sc;
struct mutex tx_mtx;
uint32_t me;
- uint32_t msix;
- uint32_t eims;
+ int queue_status;
uint32_t watchdog_timer;
union ixgbe_adv_tx_desc *tx_base;
- uint32_t *tx_hwb;
struct ixgbe_dma_alloc txdma;
- struct ixgbe_dma_alloc txwbdma;
- uint32_t next_avail_tx_desc;
- uint32_t next_tx_to_clean;
+ uint32_t next_avail_desc;
+ uint32_t next_to_clean;
struct ixgbe_tx_buf *tx_buffers;
volatile uint16_t tx_avail;
uint32_t txd_cmd;
bus_dma_tag_t txtag;
+ uint32_t bytes; /* Used for AIM calc */
+ uint32_t packets;
/* Soft Stats */
- uint32_t no_tx_desc_avail;
- uint32_t no_tx_desc_late;
- uint64_t tx_irq;
+ uint64_t no_desc_avail;
uint64_t tx_packets;
};
@@ -209,71 +203,91 @@ struct rx_ring {
struct ix_softc *sc;
struct mutex rx_mtx;
uint32_t me;
- uint32_t msix;
- uint32_t eims;
- uint32_t payload;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
#if 0
struct lro_ctrl lro;
#endif
- unsigned int last_rx_desc_filled;
+ int lro_enabled;
+ int hdr_split;
+ int hw_rsc;
+ int discard;
+ unsigned int next_to_refresh;
unsigned int next_to_check;
+ unsigned int last_desc_filled;
int rx_ndescs;
struct ixgbe_rx_buf *rx_buffers;
- bus_dma_tag_t rxtag;
- struct mbuf *fmp;
- struct mbuf *lmp;
+
+ uint32_t bytes; /* Used for AIM calc */
+ uint32_t packets;
+
/* Soft stats */
uint64_t rx_irq;
- uint64_t packet_count;
- uint64_t byte_count;
+ uint64_t rx_split_packets;
+ uint64_t rx_packets;
+ uint64_t rx_bytes;
+ uint64_t rx_discarded;
+ uint64_t rsc_num;
};
/* Our adapter structure */
struct ix_softc {
- struct device dev;
- struct arpcom arpcom;
+ struct device dev;
+ struct arpcom arpcom;
struct ixgbe_hw hw;
- struct ixgbe_osdep osdep;
+ struct ixgbe_osdep osdep;
+
+ /* struct resource *pci_mem; */
+ /* struct resource *msix_mem; */
+
+ void *tag;
+ /* struct resource *res; */
+
+ struct ifmedia media;
+ struct timeout timer;
+ int msix;
+ int if_flags;
- struct resource *pci_mem;
- struct resource *msix_mem;
+ struct mutex core_mtx;
+
+ uint16_t num_vlans;
+ uint16_t num_queues;
/*
- * Interrupt resources:
- * Oplin has 20 MSIX messages
- * so allocate that for now.
+ * Shadow VFTA table, this is needed because
+ * the real vlan filter table gets cleared during
+ * a soft reset and the driver needs to be able
+ * to repopulate it.
*/
- void *tag[IXGBE_MSGS];
- struct resource *res[IXGBE_MSGS];
- int rid[IXGBE_MSGS];
- uint32_t eims_mask;
-
- struct ifmedia media;
- struct timeout timer;
- int msix;
- int if_flags;
-
- struct mutex core_mtx;
-
- /* Legacy Fast Intr handling */
- int sfp_probe;
- workq_fn link_task;
-
- /* Info about the board itself */
- uint32_t part_num;
- int link_active;
- uint16_t max_frame_size;
- uint32_t link_speed;
- uint32_t tx_int_delay;
- uint32_t tx_abs_int_delay;
- uint32_t rx_int_delay;
- uint32_t rx_abs_int_delay;
-
- /* Indicates the cluster size to use */
- int bigbufs;
+ uint32_t shadow_vfta[IXGBE_VFTA_SIZE];
+
+ /* Info about the interface */
+ uint optics;
+ int advertise; /* link speeds */
+ int link_active;
+ uint16_t max_frame_size;
+ uint16_t num_segs;
+ uint32_t link_speed;
+ int link_up;
+ uint32_t linkvec;
+
+ /* Mbuf cluster size */
+ uint32_t rx_mbuf_sz;
+
+ /* Support for pluggable optics */
+ int sfp_probe;
+ workq_fn link_task; /* Link tasklet */
+ workq_fn mod_task; /* SFP tasklet */
+ workq_fn msf_task; /* Multispeed Fiber */
+
+ /*
+ * Queues:
+ * This is the irq holder, it has
+ * and RX/TX pair or rings associated
+ * with it.
+ */
+ struct ix_queue *queues;
/*
* Transmit rings:
@@ -281,27 +295,28 @@ struct ix_softc {
*/
struct tx_ring *tx_rings;
int num_tx_desc;
- int num_tx_queues;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
+ uint64_t que_mask;
int num_rx_desc;
- int num_rx_queues;
uint32_t rx_process_limit;
- uint optics;
+
+ /* Multicast array memory */
+ uint8_t *mta;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
- unsigned long mbuf_alloc_failed;
- unsigned long mbuf_cluster_failed;
+ unsigned long mbuf_defrag_failed;
+ unsigned long mbuf_header_failed;
+ unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
- unsigned long linkvec;
unsigned long link_irq;
struct ixgbe_hw_stats stats;
diff --git a/sys/dev/pci/ixgbe.c b/sys/dev/pci/ixgbe.c
index 0acb6d8f7a8..e3227631e69 100644
--- a/sys/dev/pci/ixgbe.c
+++ b/sys/dev/pci/ixgbe.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe.c,v 1.5 2010/02/19 18:55:12 jsg Exp $ */
+/* $OpenBSD: ixgbe.c,v 1.6 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -47,11 +47,42 @@ uint16_t ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, uint16_t count);
void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, uint32_t *eec);
void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-uint16_t ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
+int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+int32_t ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg, uint32_t lp_reg,
+ uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm);
+
+
int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan);
+/* MBX */
+int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
+int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
+uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
+int32_t ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, uint32_t mask);
+int32_t ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, uint16_t mbx_id);
+int32_t ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, uint16_t mbx_id);
+int32_t ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, uint16_t mbx_id);
+int32_t ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw);
+int32_t ixgbe_write_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t mbx_id);
+int32_t ixgbe_read_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t mbx_id);
+int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index);
+int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
+int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number);
+int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number);
+int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number);
+int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t vf_number);
+int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t vf_number);
+
+
/**
* ixgbe_init_ops_generic - Inits function ptrs
* @hw: pointer to the hardware structure
@@ -136,7 +167,6 @@ int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw)
int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
uint32_t ctrl_ext;
- int32_t ret_val = IXGBE_SUCCESS;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -161,7 +191,46 @@ int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Clear adapter stopped flag */
hw->adapter_stopped = FALSE;
- return ret_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ * X540
+ **/
+int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ uint32_t i;
+ uint32_t regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return IXGBE_SUCCESS;
}
/**
@@ -271,15 +340,190 @@ int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_BPTC);
for (i = 0; i < 16; i++) {
IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- IXGBE_READ_REG(hw, IXGBE_QBRC(i));
IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
}
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the EEPROM.
+ **/
+int32_t ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, uint8_t *pba_num,
+ uint32_t pba_num_size)
+{
+ int32_t ret_val;
+ uint16_t data;
+ uint16_t pba_ptr;
+ uint16_t offset;
+ uint16_t length;
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return IXGBE_SUCCESS;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((uint32_t)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (uint8_t)(data >> 8);
+ pba_num[(offset * 2) + 1] = (uint8_t)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_pba_length_generic - Reads part number length from EEPROM
+ * @hw: pointer to hardware structure
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number length from the EEPROM.
+ * Returns expected buffer size in pba_num_size
+ **/
+int32_t ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, uint32_t *pba_num_size)
+{
+ int32_t ret_val;
+ uint16_t data;
+ uint16_t pba_ptr;
+ uint16_t length;
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ *pba_num_size = 11;
+ return IXGBE_SUCCESS;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /*
+ * Convert from length in 16bit values to 8bit chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((uint32_t)length * 2) - 1;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_read_pba_num_generic - Reads part number from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number from the EEPROM
@@ -295,6 +539,9 @@ int32_t ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, uint32_t *pba_num)
if (ret_val) {
DEBUGOUT("NVM Read Error\n");
return ret_val;
+ } else if (data == IXGBE_PBANUM_PTR_GUARD) {
+ DEBUGOUT("NVM Not supported\n");
+ return IXGBE_NOT_IMPLEMENTED;
}
*pba_num = (uint32_t)(data << 16);
@@ -456,8 +703,7 @@ int32_t ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS)
- DEBUGOUT("PCI-E Master disable polling has failed.\n");
+ ixgbe_disable_pcie_master(hw);
return IXGBE_SUCCESS;
}
@@ -710,6 +956,47 @@ out:
}
/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data)
+{
+ uint32_t eewr;
+ int32_t status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START;
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+out:
+ return status;
+}
+
+/**
* ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
* @ee_reg: EEPROM flag for polling
@@ -777,15 +1064,15 @@ int32_t ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
- }
- /* Setup EEPROM for Read/Write */
- if (status == IXGBE_SUCCESS) {
- /* Clear CS and SK */
- eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
- usec_delay(1);
+ /* Setup EEPROM for Read/Write */
+ if (status == IXGBE_SUCCESS) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(1);
+ }
}
return status;
}
@@ -1245,36 +1532,38 @@ int32_t ixgbe_set_rar_generic(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr
uint32_t rar_low, rar_high;
uint32_t rar_entries = hw->mac.num_rar_entries;
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
/* setup VMDq pool selection before this RAR gets enabled */
hw->mac.ops.set_vmdq(hw, index, vmdq);
/* Make sure we are using a valid rar index range */
- if (index < rar_entries) {
- /*
- * HW expects these in little endian so we reverse the byte
- * order from network order (big endian) to little endian
- */
- rar_low = ((uint32_t)addr[0] |
- ((uint32_t)addr[1] << 8) |
- ((uint32_t)addr[2] << 16) |
- ((uint32_t)addr[3] << 24));
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((uint32_t)addr[0] |
+ ((uint32_t)addr[1] << 8) |
+ ((uint32_t)addr[2] << 16) |
+ ((uint32_t)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((uint32_t)addr[4] | ((uint32_t)addr[5] << 8));
- if (enable_addr != 0)
- rar_high |= IXGBE_RAH_AV;
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
- } else {
- DEBUGOUT1("RAR index %d is out of range.\n", index);
- }
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return IXGBE_SUCCESS;
}
@@ -1292,21 +1581,22 @@ int32_t ixgbe_clear_rar_generic(struct ixgbe_hw *hw, uint32_t index)
uint32_t rar_entries = hw->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
- if (index < rar_entries) {
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
- } else {
+ if (index >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
}
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
@@ -1351,6 +1641,9 @@ int32_t ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw->mac.addr[4], hw->mac.addr[5]);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
hw->addr_ctrl.overflow_promisc = 0;
@@ -1527,7 +1820,6 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
uint32_t vector;
uint32_t vector_bit;
uint32_t vector_reg;
- uint32_t mta_reg;
hw->addr_ctrl.mta_in_use++;
@@ -1545,9 +1837,7 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
- IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
}
/**
@@ -1575,18 +1865,21 @@ int32_t ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, uint8_t *mc_addr_
hw->addr_ctrl.num_mc_addrs = mc_addr_count;
hw->addr_ctrl.mta_in_use = 0;
- /* Clear the MTA */
+ /* Clear mta_shadow */
DEBUGOUT(" Clearing MTA\n");
- for (i = 0; i < hw->mac.mcft_size; i++)
- IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /* Add the new addresses */
+ /* Update mta_shadow */
for (i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n");
ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
}
/* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1641,10 +1934,11 @@ int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw, int32_t packetbuf_num)
uint32_t mflcn_reg, fccfg_reg;
uint32_t reg;
uint32_t rx_pba_size;
+ uint32_t fcrtl, fcrth;
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val)
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
@@ -1666,7 +1960,8 @@ int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw, int32_t packetbuf_num)
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
- /* Flow control is disabled by software override or autoneg.
+ /*
+ * Flow control is disabled by software override or autoneg.
* The code below will actually disable it in the HW.
*/
break;
@@ -1705,39 +2000,21 @@ int32_t ixgbe_fc_enable_generic(struct ixgbe_hw *hw, int32_t packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
- /* Thresholds are different for link flow control when in DCB mode */
- if (reg & IXGBE_MTQC_RT_ENA) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
- /* Always disable XON for LFC when in DCB mode */
- reg = (rx_pba_size >> 5) & 0xFFE0;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+ fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+ fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
- reg = (rx_pba_size >> 2) & 0xFFE0;
- if (hw->fc.current_mode & ixgbe_fc_tx_pause)
- reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
- } else {
- /* Set up and enable Rx high/low water mark thresholds,
- * enable XON. */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water |
- IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
- }
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth |= IXGBE_FCRTH_FCEN;
+ if (hw->fc.send_xon)
+ fcrtl |= IXGBE_FCRTL_XONE;
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
@@ -1763,10 +2040,11 @@ int32_t ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
int32_t ret_val = IXGBE_SUCCESS;
ixgbe_link_speed speed;
- uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
- uint32_t links2, anlp1_reg, autoc_reg, links;
int link_up;
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
/*
* AN should have completed when the cable was plugged in.
* Look for reasons to bail out. Bail out if:
@@ -1777,153 +2055,201 @@ int32_t ixgbe_fc_autoneg(struct ixgbe_hw *hw)
* So use link_up_wait_to_complete=FALSE.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
-
- if (hw->fc.disable_fc_autoneg || (!link_up)) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
+ if (!link_up) {
+ ret_val = IXGBE_ERR_FLOW_CONTROL;
goto out;
}
- /*
- * On backplane, bail out if
- * - backplane autoneg was not completed, or if
- * - we are 82599 and link partner is not AN enabled
- */
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- links = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
- if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
- }
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
}
+out:
+ if (ret_val == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = TRUE;
+ } else {
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ * @speed:
+ * @link_up
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+int32_t ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ uint32_t pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ int32_t ret_val;
+
/*
* On multispeed fiber at 1g, bail out if
* - link is up but AN did not complete, or if
* - link is up and AN completed but timed out
*/
- if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
- linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
- hw->fc.fc_was_autonegged = FALSE;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
}
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+int32_t ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ uint32_t links2, anlp1_reg, autoc_reg, links;
+ int32_t ret_val;
+
/*
- * Bail out on
- * - copper or CX4 adapters
- * - fiber adapters running at 10gig
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
*/
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.media_type == ixgbe_media_type_cx4) ||
- ((hw->phy.media_type == ixgbe_media_type_fiber) &&
- (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
hw->fc.fc_was_autonegged = FALSE;
hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ hw->fc.fc_was_autonegged = FALSE;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+ }
/*
- * Read the AN advertisement and LP ability registers and resolve
+ * Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
*/
- if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
- (hw->phy.media_type != ixgbe_media_type_backplane)) {
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control=RX PAUSE frames only\n");
- }
- } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
- }
- }
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+int32_t ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ uint16_t technology_ability_reg = 0;
+ uint16_t lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (uint32_t)technology_ability_reg,
+ (uint32_t)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+int32_t ixgbe_negotiate_fc(struct ixgbe_hw *hw, uint32_t adv_reg, uint32_t lp_reg,
+ uint32_t adv_sym, uint32_t adv_asm, uint32_t lp_sym, uint32_t lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg)))
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
- * Read the 10g AN autoc and LP ability registers and resolve
- * local flow control settings accordingly
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
- if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control=RX PAUSE frames only\n");
- }
- } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
- !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ DEBUGOUT("Flow Control = FULL.\n");
} else {
- hw->fc.current_mode = ixgbe_fc_none;
- DEBUGOUT("Flow Control = NONE.\n");
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control=RX PAUSE frames only\n");
}
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
}
- /* Record that current_mode is the result of a successful autoneg */
- hw->fc.fc_was_autonegged = TRUE;
-
-out:
- return ret_val;
+ return IXGBE_SUCCESS;
}
/**
@@ -1935,7 +2261,8 @@ out:
int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
{
int32_t ret_val = IXGBE_SUCCESS;
- uint32_t reg;
+ uint32_t reg = 0, reg_bp = 0;;
+ uint16_t reg_cu = 0;
/* Validate the packetbuf configuration */
if (packetbuf_num < 0 || packetbuf_num > 7) {
@@ -1973,11 +2300,26 @@ int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
hw->fc.requested_mode = ixgbe_fc_full;
/*
- * Set up the 1G flow control advertisement registers so the HW will be
- * able to do fc autoneg once the cable is plugged in. If we end up
- * using 10g instead, this is harmless.
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
*/
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+ break;
+
+ default:
+ ;
+ }
/*
* The possible values of fc.requested_mode are:
@@ -1993,6 +2335,11 @@ int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
case ixgbe_fc_none:
/* Flow control completely disabled by software override. */
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_rx_pause:
/*
@@ -2004,6 +2351,11 @@ int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
* disable the adapter's ability to send PAUSE frames.
*/
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_tx_pause:
/*
@@ -2012,10 +2364,22 @@ int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
*/
reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+ }
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
default:
DEBUGOUT("Flow control param set incorrectly\n");
@@ -2024,6 +2388,10 @@ int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
break;
}
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
@@ -2035,64 +2403,20 @@ int32_t ixgbe_setup_fc(struct ixgbe_hw *hw, int32_t packetbuf_num)
DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
/*
- * Set up the 10G flow control advertisement registers so the HW
- * can do fc autoneg once the cable is plugged in. If we end up
- * using 1g instead, this is harmless.
- */
- reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-
- /*
- * The possible values of fc.requested_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
*/
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= (IXGBE_AUTOC_ASM_PAUSE);
- reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
}
- /*
- * AUTOC restart handles negotiation of 1G and 10G. There is
- * no need to set the PCS1GCTL register.
- */
- reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
- DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+ DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
out:
return ret_val;
}
@@ -2113,6 +2437,10 @@ int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
uint32_t number_of_queues;
int32_t status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ /* Just jump out if bus mastering is already disabled */
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
+
/* Disable the receive unit by stopping each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
@@ -2128,13 +2456,42 @@ int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
- status = IXGBE_SUCCESS;
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto check_device_status;
+ usec_delay(100);
+ }
+
+ DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+check_device_status:
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
+ IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
break;
- }
usec_delay(100);
}
+ if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
+ DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
+ else
+ goto out;
+
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+out:
return status;
}
@@ -2242,7 +2599,6 @@ int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index)
hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
if (!link_up) {
-
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
@@ -2382,37 +2738,38 @@ int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmd
uint32_t mpsar_lo, mpsar_hi;
uint32_t rar_entries = hw->mac.num_rar_entries;
- if (rar < rar_entries) {
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
- if (!mpsar_lo && !mpsar_hi)
- goto done;
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
done:
return IXGBE_SUCCESS;
}
@@ -2428,18 +2785,20 @@ int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
uint32_t mpsar;
uint32_t rar_entries = hw->mac.num_rar_entries;
- if (rar < rar_entries) {
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- } else {
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return IXGBE_SUCCESS;
}
@@ -2474,6 +2833,10 @@ int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
uint32_t first_empty_slot = 0;
int32_t regindex;
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
/*
* Search for the vlan id in the VLVF entries. Save off the first empty
* slot found along the way
@@ -2496,7 +2859,7 @@ int32_t ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, uint32_t vlan)
regindex = first_empty_slot;
else {
DEBUGOUT("No space in VLVF.\n");
- regindex = -1;
+ regindex = IXGBE_ERR_NO_SPACE;
}
}
@@ -2517,8 +2880,11 @@ int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
{
int32_t regindex;
uint32_t bitindex;
+ uint32_t vfta;
uint32_t bits;
uint32_t vt;
+ uint32_t targetbit;
+ int vfta_changed = FALSE;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
@@ -2526,6 +2892,7 @@ int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
/*
* this is a 2 part operation - first the VFTA, then the
* VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
*/
/* Part 1
@@ -2536,13 +2903,20 @@ int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
*/
regindex = (vlan >> 5) & 0x7F;
bitindex = vlan & 0x1F;
- bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
- if (vlan_on)
- bits |= (1 << bitindex);
- else
- bits &= ~(1 << bitindex);
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+ targetbit = (1 << bitindex);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ if (vlan_on) {
+ if (!(vfta & targetbit)) {
+ vfta |= targetbit;
+ vfta_changed = TRUE;
+ }
+ } else {
+ if ((vfta & targetbit)) {
+ vfta &= ~targetbit;
+ vfta_changed = TRUE;
+ }
+ }
/* Part 2
* If VT Mode is set
@@ -2554,61 +2928,84 @@ int32_t ixgbe_set_vfta_generic(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind
*/
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
- if (vlan == 0) {
- regindex = 0;
- } else {
- regindex = ixgbe_find_vlvf_slot(hw, vlan);
- if (regindex < 0)
- goto out;
- }
+ int32_t vlvf_index;
+
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+ if (vlvf_index < 0)
+ return vlvf_index;
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex*2));
+ IXGBE_VLVFB(vlvf_index*2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex*2),
+ IXGBE_VLVFB(vlvf_index*2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex*2)+1));
- bits |= (1 << vind);
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits |= (1 << (vind-32));
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex*2)+1),
+ IXGBE_VLVFB((vlvf_index*2)+1),
bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex*2));
+ IXGBE_VLVFB(vlvf_index*2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex*2),
+ IXGBE_VLVFB(vlvf_index*2),
bits);
bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex*2)+1));
+ IXGBE_VLVFB((vlvf_index*2)+1));
} else {
bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex*2)+1));
- bits &= ~(1 << vind);
+ IXGBE_VLVFB((vlvf_index*2)+1));
+ bits &= ~(1 << (vind-32));
IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex*2)+1),
+ IXGBE_VLVFB((vlvf_index*2)+1),
bits);
bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex*2));
+ IXGBE_VLVFB(vlvf_index*2));
}
}
- if (bits)
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
+ /*
+ * If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
+ if (!vlan_on) {
+ /* someone wants to clear the vfta entry
+ * but some pools/VFs are still using it.
+ * Ignore it. */
+ vfta_changed = FALSE;
+ }
+ }
else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
-out:
+
+ if (vfta_changed)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
return IXGBE_SUCCESS;
}
@@ -2646,10 +3043,19 @@ int32_t ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
int *link_up, int link_up_wait_to_complete)
{
- uint32_t links_reg;
+ uint32_t links_reg, links_orig;
uint32_t i;
+ /* clear the old state */
+ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_orig != links_reg) {
+ DEBUGOUT2("LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if (links_reg & IXGBE_LINKS_UP) {
@@ -2674,8 +3080,11 @@ int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *spee
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_1G_82599)
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_100_82599)
*speed = IXGBE_LINK_SPEED_100_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
/* if link is down, zero out the current_mode */
if (*link_up == FALSE) {
@@ -2685,3 +3094,783 @@ int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *spee
return IXGBE_SUCCESS;
}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+int32_t ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return IXGBE_SUCCESS;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_get_device_caps_generic - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps)
+{
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
+{
+ uint32_t regval;
+ uint32_t i;
+
+ DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
+
+ /* Enable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+}
+
+/*
+ * MBX: Mailbox handling
+ */
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_mbx");
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_SUCCESS;
+
+ DEBUGFUNC("ixgbe_write_mbx");
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_msg");
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_ack");
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_rst(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst");
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_msg");
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ DEBUGFUNC("ixgbe_poll_for_ack");
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ usec_delay(mbx->usec_delay);
+ }
+
+out:
+ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_read_posted_mbx");
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_write_posted_mbx");
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ * ixgbe_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbe_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+int32_t ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, uint32_t mask)
+{
+ uint32_t v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = IXGBE_SUCCESS;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns TRUE if the PF has set the reset done bit or else FALSE
+ **/
+int32_t ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+ if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+int32_t ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = IXGBE_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+int32_t ixgbe_write_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t mbx_id)
+{
+ int32_t ret_val;
+ uint16_t i;
+
+ UNREFERENCED_PARAMETER(mbx_id);
+
+ DEBUGFUNC("ixgbe_write_mbx_vf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_vf(hw, 0);
+ ixgbe_check_for_ack_vf(hw, 0);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+int32_t ixgbe_read_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t mbx_id)
+{
+ int32_t ret_val = IXGBE_SUCCESS;
+ uint16_t i;
+
+ DEBUGFUNC("ixgbe_read_mbx_vf");
+ UNREFERENCED_PARAMETER(mbx_id);
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_vf;
+ mbx->ops.write = ixgbe_write_mbx_vf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t index)
+{
+ uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+ int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
+ uint32_t vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_msg_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+ int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
+ uint32_t vf_bit = vf_number % 16;
+
+ DEBUGFUNC("ixgbe_check_for_ack_pf");
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = IXGBE_SUCCESS;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+{
+ uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
+ uint32_t vf_shift = vf_number % 32;
+ uint32_t vflre = 0;
+ int32_t ret_val = IXGBE_ERR_MBX;
+
+ DEBUGFUNC("ixgbe_check_for_rst_pf");
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ default:
+ goto out;
+ break;
+ }
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = IXGBE_SUCCESS;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+{
+ int32_t ret_val = IXGBE_ERR_MBX;
+ uint32_t p2v_mailbox;
+
+ DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = IXGBE_SUCCESS;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t vf_number)
+{
+ int32_t ret_val;
+ uint16_t i;
+
+ DEBUGFUNC("ixgbe_write_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+ uint16_t vf_number)
+{
+ int32_t ret_val;
+ uint16_t i;
+
+ DEBUGFUNC("ixgbe_read_mbx_pf");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->ops.read = ixgbe_read_mbx_pf;
+ mbx->ops.write = ixgbe_write_mbx_pf;
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
diff --git a/sys/dev/pci/ixgbe.h b/sys/dev/pci/ixgbe.h
index c09791aa0c9..604fa15b3d9 100644
--- a/sys/dev/pci/ixgbe.h
+++ b/sys/dev/pci/ixgbe.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe.h,v 1.6 2010/02/23 18:43:15 jsg Exp $ */
+/* $OpenBSD: ixgbe.h,v 1.7 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -110,7 +110,6 @@
#define PCI_COMMAND_REGISTER PCIR_COMMAND
/* Compat glue */
-#define MJUMPAGESIZE MCLBYTES
#define PCIR_BAR(_x) (0x10 + (_x) * 4)
#define roundup2(size, unit) (((size) + (unit) - 1) & ~((unit) - 1))
#define usec_delay(x) delay(x)
@@ -119,12 +118,6 @@
/* This is needed by the shared code */
struct ixgbe_hw;
-extern uint16_t ixgbe_read_pci_cfg(struct ixgbe_hw *, uint32_t);
-#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg
-
-extern void ixgbe_write_pci_cfg(struct ixgbe_hw *, uint32_t, uint16_t);
-#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg
-
struct ixgbe_osdep {
bus_dma_tag_t os_dmat;
bus_space_tag_t os_memt;
@@ -137,6 +130,12 @@ struct ixgbe_osdep {
struct pci_attach_args *os_pa;
};
+extern uint16_t ixgbe_read_pci_cfg(struct ixgbe_hw *, uint32_t);
+#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg
+
+extern void ixgbe_write_pci_cfg(struct ixgbe_hw *, uint32_t, uint16_t);
+#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg
+
#define IXGBE_WRITE_FLUSH(a) \
IXGBE_READ_REG(a, IXGBE_STATUS)
#define IXGBE_READ_REG(a, reg) \
@@ -147,7 +146,7 @@ struct ixgbe_osdep {
((struct ixgbe_osdep *)(a)->back)->os_memh, reg, value)
#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
bus_space_read_4(((struct ixgbe_osdep *)(a)->back)->os_memt, \
- ((struct ixgbe_osdep *)(a)->back)->os_memh, (reg + ((offset) << 2))))
+ ((struct ixgbe_osdep *)(a)->back)->os_memh, (reg + ((offset) << 2)))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
bus_space_write_4(((struct ixgbe_osdep *)(a)->back)->os_memt, \
((struct ixgbe_osdep *)(a)->back)->os_memh, (reg + ((offset) << 2)), value)
@@ -163,8 +162,12 @@ uint32_t ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_generic(struct ixgbe_hw *hw);
int32_t ixgbe_init_hw_generic(struct ixgbe_hw *hw);
int32_t ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+int32_t ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
int32_t ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
int32_t ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, uint32_t *pba_num);
+int32_t ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, uint8_t *pba_num,
+ uint32_t pba_num_size);
+int32_t ixgbe_read_pba_length_generic(struct ixgbe_hw *hw, uint32_t *pba_num_size);
int32_t ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *mac_addr);
int32_t ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -176,6 +179,7 @@ int32_t ixgbe_led_off_generic(struct ixgbe_hw *hw, uint32_t index);
int32_t ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
int32_t ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data);
int32_t ixgbe_read_eerd_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t *data);
+int32_t ixgbe_write_eewr_generic(struct ixgbe_hw *hw, uint16_t offset, uint16_t data);
int32_t ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, uint16_t offset,
uint16_t *data);
uint16_t ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
@@ -209,6 +213,9 @@ int32_t ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
int32_t ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, uint32_t index);
int32_t ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, uint32_t index);
+int32_t ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *san_mac_addr);
+int32_t ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *san_mac_addr);
+
int32_t ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
int32_t ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
int32_t ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq);
@@ -221,9 +228,29 @@ int32_t ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
int *link_up, int link_up_wait_to_complete);
+int32_t ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, uint16_t *wwnn_prefix,
+ uint16_t *wwpn_prefix);
+
+int32_t ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, uint16_t *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, int enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, int enable, int vf);
+int32_t ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, uint16_t *device_caps);
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
+
+/* API */
void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq);
void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr);
+int32_t ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+int32_t ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, uint32_t pballoc);
+int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc);
+int32_t ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *masks,
+ uint16_t soft_id,
+ uint8_t queue);
+uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *input, uint32_t key);
+
int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw);
int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw);
@@ -247,6 +274,7 @@ int32_t ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
int *autoneg);
+/* PHY specific */
int32_t ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
int *link_up);
@@ -261,6 +289,7 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
int32_t ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
uint16_t *list_offset,
uint16_t *data_offset);
+int32_t ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
int32_t ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
uint8_t dev_addr, uint8_t *data);
int32_t ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
@@ -269,4 +298,17 @@ int32_t ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
uint8_t *eeprom_data);
int32_t ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
uint8_t eeprom_data);
+
+/* MBX */
+int32_t ixgbe_read_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+int32_t ixgbe_write_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+int32_t ixgbe_check_for_msg(struct ixgbe_hw *, uint16_t);
+int32_t ixgbe_check_for_ack(struct ixgbe_hw *, uint16_t);
+int32_t ixgbe_check_for_rst(struct ixgbe_hw *, uint16_t);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
#endif /* _IXGBE_H_ */
diff --git a/sys/dev/pci/ixgbe_82598.c b/sys/dev/pci/ixgbe_82598.c
index 1aee2fafc62..2202d54d42a 100644
--- a/sys/dev/pci/ixgbe_82598.c
+++ b/sys/dev/pci/ixgbe_82598.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe_82598.c,v 1.5 2010/02/19 18:55:12 jsg Exp $ */
+/* $OpenBSD: ixgbe_82598.c,v 1.6 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -46,6 +46,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw, int32_t packetbuf_num);
int32_t ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
int autoneg_wait_to_complete);
+int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw);
int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
int *link_up, int link_up_wait_to_complete);
@@ -59,6 +60,7 @@ int32_t ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
int autoneg_wait_to_complete);
int32_t ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq);
int32_t ixgbe_set_vfta_82598(struct ixgbe_hw *hw, uint32_t vlan,
@@ -72,7 +74,6 @@ uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
int32_t ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
-int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -160,6 +161,7 @@ int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* MAC */
mac->ops.start_hw = &ixgbe_start_hw_82598;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
mac->ops.reset_hw = &ixgbe_reset_hw_82598;
mac->ops.get_media_type = &ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
@@ -180,6 +182,7 @@ int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 16;
+ mac->rx_pb_size = 512;
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
@@ -190,6 +193,7 @@ int32_t ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+ mac->ops.flap_tx_laser = NULL;
mac->ops.get_link_capabilities =
&ixgbe_get_link_capabilities_82598;
@@ -279,17 +283,17 @@ int32_t ixgbe_start_hw_82598(struct ixgbe_hw *hw)
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@@ -370,11 +374,14 @@ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
enum ixgbe_media_type media_type;
/* Detect if there is a copper PHY attached. */
- if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_aq) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
+ default:
+ break;
}
/* Media type for I82598 is based on device ID */
@@ -421,24 +428,33 @@ int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw, int32_t packetbuf_num)
uint32_t fctrl_reg;
uint32_t rmcs_reg;
uint32_t reg;
+ uint32_t rx_pba_size;
uint32_t link_speed = 0;
int link_up;
/*
- * On 82598 backplane having FC on causes resets while doing
- * KX, so turn off here.
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
*/
hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
- if (link_up &&
- link_speed == IXGBE_LINK_SPEED_1GB_FULL &&
- hw->mac.ops.get_media_type(hw) == ixgbe_media_type_backplane) {
- hw->fc.disable_fc_autoneg = TRUE;
- hw->fc.requested_mode = ixgbe_fc_none;
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
}
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val)
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
@@ -460,7 +476,8 @@ int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw, int32_t packetbuf_num)
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
- /* Flow control is disabled by software override or autoneg.
+ /*
+ * Flow control is disabled by software override or autoneg.
* The code below will actually disable it in the HW.
*/
break;
@@ -501,16 +518,19 @@ int32_t ixgbe_fc_enable_82598(struct ixgbe_hw *hw, int32_t packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- hw->fc.low_water);
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 6;
+ if (hw->fc.send_xon)
+ reg |= IXGBE_FCRTL_XONE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+ reg = (rx_pba_size - hw->fc.high_water) << 6;
+ reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
@@ -574,6 +594,41 @@ int32_t ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ uint32_t timeout;
+ uint16_t an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return IXGBE_SUCCESS;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+ (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+ break;
+
+ msec_delay(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ DEBUGOUT("Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_check_mac_link_82598 - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -618,8 +673,7 @@ int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
&adapt_comp_reg);
}
} else {
- if ((link_reg & 1) &&
- ((adapt_comp_reg & 1) == 0))
+ if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
*link_up = TRUE;
else
*link_up = FALSE;
@@ -654,7 +708,7 @@ int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
- (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+ (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
*link_up = FALSE;
/* if link is down, zero out the current_mode */
@@ -662,7 +716,6 @@ int32_t ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
hw->fc.current_mode = ixgbe_fc_none;
hw->fc.fc_was_autonegged = FALSE;
}
-
out:
return IXGBE_SUCCESS;
}
@@ -718,6 +771,7 @@ int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
return status;
}
+
/**
* ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
@@ -816,12 +870,9 @@ no_phy_reset:
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
- status = ixgbe_disable_pcie_master(hw);
- if (status != IXGBE_SUCCESS) {
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
- DEBUGOUT("PCI-E Master disable polling has failed.\n");
- }
+ ixgbe_disable_pcie_master(hw);
+mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@@ -842,6 +893,19 @@ no_phy_reset:
DEBUGOUT("Reset polling failed to complete.\n");
}
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete. We use 1usec since that is
+ * what is needed for ixgbe_disable_pcie_master(). The second reset
+ * then clears out any effects of those events.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ usec_delay(1);
+ goto mac_reset_top;
+ }
+
msec_delay(50);
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -857,8 +921,9 @@ no_phy_reset:
if (hw->mac.orig_link_settings_stored == FALSE) {
hw->mac.orig_autoc = autoc;
hw->mac.orig_link_settings_stored = TRUE;
- } else if (autoc != hw->mac.orig_autoc)
+ } else if (autoc != hw->mac.orig_autoc) {
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ }
/* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
@@ -869,11 +934,10 @@ no_phy_reset:
*/
hw->mac.ops.init_rx_addrs(hw);
-
-
reset_hw_out:
if (phy_status != IXGBE_SUCCESS)
status = phy_status;
+
return status;
}
@@ -886,6 +950,13 @@ reset_hw_out:
int32_t ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
{
uint32_t rar_high;
+ uint32_t rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -907,14 +978,16 @@ int32_t ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, uint32_t rar, uint32_t vmdq)
UNREFERENCED_PARAMETER(vmdq);
- if (rar < rar_entries) {
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
- if (rar_high & IXGBE_RAH_VIND_MASK) {
- rar_high &= ~IXGBE_RAH_VIND_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
- }
- } else {
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
DEBUGOUT1("RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
}
return IXGBE_SUCCESS;
@@ -1112,8 +1185,10 @@ uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
/* Copper PHY must be checked before AUTOC LMS to determine correct
* physical layer because 10GBase-T PHYs use LMS = KX4/KX */
- if (hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_cu_unknown) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
@@ -1123,6 +1198,8 @@ uint32_t ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
goto out;
+ default:
+ break;
}
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1223,36 +1300,31 @@ void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
/**
- * ixgbe_validate_link_ready - Function looks for phy link
+ * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
* @hw: pointer to hardware structure
*
- * Function indicates success when phy link is available. If phy is not ready
- * within 5 seconds of MAC indicating link, the function returns error.
**/
-int32_t ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
{
- uint32_t timeout;
- uint16_t an_reg;
-
- if (hw->device_id != IXGBE_DEV_ID_82598AT2)
- return IXGBE_SUCCESS;
+ uint32_t regval;
+ uint32_t i;
- for (timeout = 0;
- timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+ DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
- if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
- (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
- break;
-
- msec_delay(100);
+ /* Enable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
- if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
- DEBUGOUT("Link was indicated but link is down\n");
- return IXGBE_ERR_LINK_SETUP;
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
- return IXGBE_SUCCESS;
}
diff --git a/sys/dev/pci/ixgbe_82599.c b/sys/dev/pci/ixgbe_82599.c
index 56dc114be3a..f6c90d87a04 100644
--- a/sys/dev/pci/ixgbe_82599.c
+++ b/sys/dev/pci/ixgbe_82599.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe_82599.c,v 1.2 2010/02/25 10:56:07 jsg Exp $ */
+/* $OpenBSD: ixgbe_82599.c,v 1.3 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -42,6 +42,9 @@ int32_t ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
int *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, int autoneg,
int autoneg_wait_to_complete);
@@ -68,50 +71,16 @@ int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
int32_t ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
int32_t ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, uint32_t regval);
-int32_t ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, uint16_t *device_caps);
int32_t ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+int ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-int32_t ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-int32_t ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, uint32_t pballoc);
-int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc);
+uint32_t ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common);
int32_t ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
uint8_t queue);
-int32_t ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- uint16_t soft_id,
- uint8_t queue);
-uint16_t ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, uint32_t key);
-int32_t ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, uint16_t vlan_id);
-int32_t ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, uint32_t src_addr);
-int32_t ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, uint32_t dst_addr);
-int32_t ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, uint32_t src_addr_1,
- uint32_t src_addr_2, uint32_t src_addr_3,
- uint32_t src_addr_4);
-int32_t ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, uint32_t dst_addr_1,
- uint32_t dst_addr_2, uint32_t dst_addr_3,
- uint32_t dst_addr_4);
-int32_t ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, uint16_t src_port);
-int32_t ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, uint16_t dst_port);
-int32_t ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, uint16_t flex_byte);
-int32_t ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, uint8_t vm_pool);
-int32_t ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, uint8_t l4type);
-int32_t ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, uint16_t *vlan_id);
-int32_t ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, uint32_t *src_addr);
-int32_t ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, uint32_t *dst_addr);
-int32_t ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, uint32_t *src_addr_1,
- uint32_t *src_addr_2, uint32_t *src_addr_3,
- uint32_t *src_addr_4);
-int32_t ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, uint32_t *dst_addr_1,
- uint32_t *dst_addr_2, uint32_t *dst_addr_3,
- uint32_t *dst_addr_4);
-int32_t ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, uint16_t *src_port);
-int32_t ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, uint16_t *dst_port);
-int32_t ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
- uint16_t *flex_byte);
-int32_t ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, uint8_t *vm_pool);
-int32_t ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, uint8_t *l4type);
-
+uint32_t ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -119,16 +88,32 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
+ /* enable the laser control functions for SFP+ fiber */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ mac->ops.disable_tx_laser =
+ &ixgbe_disable_tx_laser_multispeed_fiber;
+ mac->ops.enable_tx_laser =
+ &ixgbe_enable_tx_laser_multispeed_fiber;
+ mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+
+ } else {
+ mac->ops.disable_tx_laser = NULL;
+ mac->ops.enable_tx_laser = NULL;
+ mac->ops.flap_tx_laser = NULL;
+ }
+
if (hw->phy.multispeed_fiber) {
/* Set up dual speed SFP+ support */
mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
} else {
- if ((ixgbe_get_media_type_82599(hw) == ixgbe_media_type_backplane) &&
+ if ((ixgbe_hw0(hw, get_media_type) == ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
- hw->phy.smart_speed == ixgbe_smart_speed_on))
+ hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+ !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
- else
+ } else {
mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+ }
}
}
@@ -188,6 +173,8 @@ init_phy_ops_out:
int32_t ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
int32_t ret_val = IXGBE_SUCCESS;
+ uint32_t reg_anlp1 = 0;
+ uint32_t i = 0;
uint16_t list_offset, data_offset, data_value;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
@@ -215,14 +202,34 @@ int32_t ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
}
- /* Now restart DSP by setting Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
/* Release the semaphore */
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
+
+ /* Now restart DSP by setting Restart_AN and clearing LMS */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msec_delay(4);
+ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+ DEBUGOUT("sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ /* Restart DSP by setting Restart_AN and return to SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -254,6 +261,7 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
/* MAC */
mac->ops.reset_hw = &ixgbe_reset_hw_82599;
+ mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
mac->ops.get_media_type = &ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_82599;
@@ -261,7 +269,14 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599;
- mac->ops.get_device_caps = &ixgbe_get_device_caps_82599;
+
+ mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+#if 0
+ mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+ mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+ mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+ mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+#endif
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
@@ -272,6 +287,10 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
+#if 0
+ mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+ mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+#endif
/* Link */
mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
@@ -281,10 +300,12 @@ int32_t ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 128;
+ mac->rx_pb_size = 512;
mac->max_tx_queues = 128;
mac->max_rx_queues = 128;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
return ret_val;
}
@@ -306,6 +327,14 @@ int32_t ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_82599");
+ /* Check if 1G SFP module. */
+ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ *negotiation = TRUE;
+ goto out;
+ }
+
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not
@@ -394,28 +423,35 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_media_type_82599");
/* Detect if there is a copper PHY attached. */
- if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_aq) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
+ default:
+ break;
}
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
- case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
media_type = ixgbe_media_type_cx4;
break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -452,8 +488,8 @@ int32_t ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
- IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN
- || (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+ IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
links_reg = 0; /* Just in case Autoneg time = 0 */
for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
@@ -476,6 +512,67 @@ int32_t ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ uint32_t esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Disable tx laser; allow 100us to go dark per spec */
+ esdp_reg |= IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ usec_delay(100);
+}
+
+/**
+ * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively turning on the Tx
+ * laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ uint32_t esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ /* Enable tx laser; allow 100ms to light up */
+ esdp_reg &= ~IXGBE_ESDP_SDP3;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(100);
+}
+
+/**
+ * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * When the driver changes the link speeds that it can support,
+ * it sets autotry_restart to TRUE to indicate that we need to
+ * initiate a new autotry session with the link partner. To do
+ * so, we set the speed then disable and re-enable the tx laser, to
+ * alert the link partner that it also needs to restart autotry on its
+ * end. This is consistent with TRUE clause 37 autoneg, which also
+ * involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+ DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+
+ if (hw->mac.autotry_restart) {
+ ixgbe_disable_tx_laser_multispeed_fiber(hw);
+ ixgbe_enable_tx_laser_multispeed_fiber(hw);
+ hw->mac.autotry_restart = FALSE;
+ }
+}
+
+/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -500,23 +597,13 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
/* Mask off requested but non-supported speeds */
- status = ixgbe_get_link_capabilities_82599(hw, &link_speed, &negotiation);
+ status = ixgbe_hw(hw, get_link_capabilities, &link_speed, &negotiation);
if (status != IXGBE_SUCCESS)
return status;
speed &= link_speed;
/*
- * When the driver changes the link speeds that it can support,
- * it sets autotry_restart to TRUE to indicate that we need to
- * initiate a new autotry session with the link partner. To do
- * so, we set the speed then disable and re-enable the tx laser, to
- * alert the link partner that it also needs to restart autotry on its
- * end. This is consistent with TRUE clause 37 autoneg, which also
- * involves a loss of signal.
- */
-
- /*
* Try each speed one by one, highest priority first. We do this in
* software because 10gb fiber doesn't support speed autonegotiation.
*/
@@ -525,7 +612,7 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
/* If we already have link at this speed, just jump out */
- status = ixgbe_check_mac_link_generic(hw, &link_speed, &link_up, FALSE);
+ status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE);
if (status != IXGBE_SUCCESS)
return status;
@@ -535,30 +622,20 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
/* Allow module to change analog characteristics (1G->10G) */
msec_delay(40);
- status = ixgbe_setup_mac_link_82599(
- hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg,
- autoneg_wait_to_complete);
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
/* Flap the tx laser if it has not already been done */
- if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- usec_delay(100);
-
- /* Enable tx laser; allow 2ms to light up per spec */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- msec_delay(2);
-
- hw->mac.autotry_restart = FALSE;
- }
+ ixgbe_hw(hw, flap_tx_laser);
/*
* Wait for the controller to acquire link. Per IEEE 802.3ap,
@@ -570,7 +647,7 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
msec_delay(100);
/* If we have link, just jump out */
- status = ixgbe_check_mac_link_generic(hw, &link_speed,
+ status = ixgbe_hw(hw, check_link, &link_speed,
&link_up, FALSE);
if (status != IXGBE_SUCCESS)
return status;
@@ -586,7 +663,7 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
/* If we already have link at this speed, just jump out */
- status = ixgbe_check_mac_link_generic(hw, &link_speed, &link_up, FALSE);
+ status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE);
if (status != IXGBE_SUCCESS)
return status;
@@ -597,36 +674,26 @@ int32_t ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
/* Allow module to change analog characteristics (10G->1G) */
msec_delay(40);
- status = ixgbe_setup_mac_link_82599(
- hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg,
- autoneg_wait_to_complete);
+ status = ixgbe_setup_mac_link_82599(hw,
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != IXGBE_SUCCESS)
return status;
/* Flap the tx laser if it has not already been done */
- if (hw->mac.autotry_restart) {
- /* Disable tx laser; allow 100us to go dark per spec */
- esdp_reg |= IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- usec_delay(100);
-
- /* Enable tx laser; allow 2ms to light up per spec */
- esdp_reg &= ~IXGBE_ESDP_SDP3;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- msec_delay(2);
-
- hw->mac.autotry_restart = FALSE;
- }
+ ixgbe_hw(hw, flap_tx_laser);
/* Wait for the link partner to also set speed */
msec_delay(100);
/* If we have link, just jump out */
- status = ixgbe_check_mac_link_generic(hw, &link_speed, &link_up, FALSE);
+ status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE);
if (status != IXGBE_SUCCESS)
return status;
@@ -714,7 +781,7 @@ int32_t ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
msec_delay(100);
/* If we have link, just jump out */
- status = ixgbe_check_mac_link_generic(hw, &link_speed, &link_up,
+ status = ixgbe_hw(hw, check_link, &link_speed, &link_up,
FALSE);
if (status != IXGBE_SUCCESS)
goto out;
@@ -749,7 +816,7 @@ int32_t ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
msec_delay(100);
/* If we have link, just jump out */
- status = ixgbe_check_mac_link_generic(hw, &link_speed, &link_up, FALSE);
+ status = ixgbe_hw(hw, check_link, &link_speed, &link_up, FALSE);
if (status != IXGBE_SUCCESS)
goto out;
@@ -763,6 +830,9 @@ int32_t ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
autoneg_wait_to_complete);
out:
+ if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+ DEBUGOUT("Smartspeed has downgraded the link speed "
+ "from the maximum advertised\n");
return status;
}
@@ -794,7 +864,7 @@ int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_setup_mac_link_82599");
/* Check to see if speed passed in is supported. */
- status = ixgbe_get_link_capabilities_82599(hw, &link_capabilities, &autoneg);
+ status = ixgbe_hw(hw, get_link_capabilities, &link_capabilities, &autoneg);
if (status != IXGBE_SUCCESS)
goto out;
@@ -812,8 +882,8 @@ int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
orig_autoc = autoc;
if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+ link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
@@ -834,7 +904,7 @@ int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -847,7 +917,6 @@ int32_t ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
-
/* Restart link */
autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
@@ -918,7 +987,7 @@ int32_t ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
int32_t status = IXGBE_SUCCESS;
- uint32_t ctrl, ctrl_ext;
+ uint32_t ctrl;
uint32_t i;
uint32_t autoc;
uint32_t autoc2;
@@ -953,12 +1022,9 @@ int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
- status = ixgbe_disable_pcie_master(hw);
- if (status != IXGBE_SUCCESS) {
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
- DEBUGOUT("PCI-E Master disable polling has failed.\n");
- }
+ ixgbe_disable_pcie_master(hw);
+mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@@ -978,10 +1044,19 @@ int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
DEBUGOUT("Reset polling failed to complete.\n");
}
- /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete. We use 1usec since that is
+ * what is needed for ixgbe_disable_pcie_master(). The second reset
+ * then clears out any effects of those events.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ usec_delay(1);
+ goto mac_reset_top;
+ }
msec_delay(50);
@@ -1010,7 +1085,7 @@ int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
}
}
- /* Store the permanent mac address */
+ /* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
/*
@@ -1021,6 +1096,23 @@ int32_t ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
+#if 0
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+#endif
reset_hw_out:
return status;
}
@@ -1162,10 +1254,8 @@ int32_t ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, uint32_t pballoc)
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1212,7 +1302,6 @@ int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc)
* must be reduced. The new value is the current size minus
* flow director memory usage size.
*/
-
pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
(IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
@@ -1229,6 +1318,9 @@ int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+ /* Initialize the drop queue to Rx queue 127 */
+ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@@ -1255,10 +1347,8 @@ int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc)
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1291,13 +1381,13 @@ int32_t ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, uint32_t pballoc)
return IXGBE_SUCCESS;
}
-
/**
* ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
-uint16_t ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, uint32_t key)
+uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ uint32_t key)
{
/*
* The algorithm is as follows:
@@ -1317,524 +1407,158 @@ uint16_t ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, uint32_
* To simplify for programming, the algorithm is implemented
* in software this way:
*
- * Key[31:0], Stream[335:0]
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
- * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
- * int_key[350:0] = tmp_key[351:1]
- * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+ * hi_hash_dword[31:0] ^= Stream[351:320];
*
- * hash[15:0] = 0;
- * for (i = 0; i < 351; i++) {
- * if (int_key[i])
- * hash ^= int_stream[(i + 15):i];
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
+ *
*/
+ __be32 common_hash_dword = 0;
+ uint32_t hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ uint32_t hash_result = 0;
+ uint8_t i;
- union {
- uint64_t fill[6];
- uint32_t key[11];
- uint8_t key_stream[44];
- } tmp_key;
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
- uint8_t *stream = (uint8_t *)atr_input;
- uint8_t int_key[44]; /* upper-most bit unused */
- uint8_t hash_str[46]; /* upper-most 2 bits unused */
- uint16_t hash_result = 0;
- int i, j, k, h;
+ /* generate common hash dword */
+ for (i = 10; i; i -= 2)
+ common_hash_dword ^= atr_input->dword_stream[i] ^
+ atr_input->dword_stream[i - 1];
- DEBUGFUNC("ixgbe_atr_compute_hash_82599");
+ hi_hash_dword = ntohl(common_hash_dword);
- /*
- * Initialize the fill member to prevent warnings
- * on some compilers
- */
- tmp_key.fill[0] = 0;
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
- /* First load the temporary key stream */
- for (i = 0; i < 6; i++) {
- uint64_t fillkey = ((uint64_t)key << 32) | key;
- tmp_key.fill[i] = fillkey;
- }
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- /*
- * Set the interim key for the hashing. Bit 352 is unused, so we must
- * shift and compensate when building the key.
- */
-
- int_key[0] = tmp_key.key_stream[0] >> 1;
- for (i = 1, j = 0; i < 44; i++) {
- unsigned int this_key = tmp_key.key_stream[j] << 7;
- j++;
- int_key[i] = (uint8_t)(this_key | (tmp_key.key_stream[j] >> 1));
- }
+ /* Process bits 0 and 16 */
+ if (key & 0x0001) hash_result ^= lo_hash_dword;
+ if (key & 0x00010000) hash_result ^= hi_hash_dword;
/*
- * Set the interim bit string for the hashing. Bits 368 and 367 are
- * unused, so shift and compensate when building the string.
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
*/
- hash_str[0] = (stream[40] & 0x7f) >> 1;
- for (i = 1, j = 40; i < 46; i++) {
- unsigned int this_str = stream[j] << 7;
- j++;
- if (j > 41)
- j = 0;
- hash_str[i] = (uint8_t)(this_str | (stream[j] >> 1));
- }
-
- /*
- * Now compute the hash. i is the index into hash_str, j is into our
- * key stream, k is counting the number of bits, and h interates within
- * each byte.
- */
- for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
- for (h = 0; h < 8 && k < 351; h++, k++) {
- if (int_key[j] & (1 << h)) {
- /*
- * Key bit is set, XOR in the current 16-bit
- * string. Example of processing:
- * h = 0,
- * tmp = (hash_str[i - 2] & 0 << 16) |
- * (hash_str[i - 1] & 0xff << 8) |
- * (hash_str[i] & 0xff >> 0)
- * So tmp = hash_str[15 + k:k], since the
- * i + 2 clause rolls off the 16-bit value
- * h = 7,
- * tmp = (hash_str[i - 2] & 0x7f << 9) |
- * (hash_str[i - 1] & 0xff << 1) |
- * (hash_str[i] & 0x80 >> 7)
- */
- int tmp = (hash_str[i] >> h);
- tmp |= (hash_str[i - 1] << (8 - h));
- tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
- << (16 - h);
- hash_result ^= (uint16_t)tmp;
- }
- }
- }
-
- return hash_result;
-}
-
-/**
- * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- * @input: input stream to modify
- * @vlan: the VLAN id to load
- **/
-int32_t ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, uint16_t vlan)
-{
- DEBUGFUNC("ixgbe_atr_set_vlan_id_82599");
-
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- * @input: input stream to modify
- * @src_addr: the IP address to load
- **/
-int32_t ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, uint32_t src_addr)
-{
- DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599");
-
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
- (src_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
- (src_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- * @input: input stream to modify
- * @dst_addr: the IP address to load
- **/
-int32_t ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, uint32_t dst_addr)
-{
- DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599");
-
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
- (dst_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
- (dst_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
- * @input: input stream to modify
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-int32_t ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- uint32_t src_addr_1, uint32_t src_addr_2,
- uint32_t src_addr_3, uint32_t src_addr_4)
-{
- DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599");
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
- (src_addr_4 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
- (src_addr_4 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
- (src_addr_3 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
- (src_addr_3 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
- (src_addr_2 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
- (src_addr_2 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
-
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
- (src_addr_1 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
- (src_addr_1 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
- * @input: input stream to modify
- * @dst_addr_1: the first 4 bytes of the IP address to load
- * @dst_addr_2: the second 4 bytes of the IP address to load
- * @dst_addr_3: the third 4 bytes of the IP address to load
- * @dst_addr_4: the fourth 4 bytes of the IP address to load
- **/
-int32_t ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- uint32_t dst_addr_1, uint32_t dst_addr_2,
- uint32_t dst_addr_3, uint32_t dst_addr_4)
-{
- DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599");
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
- (dst_addr_4 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
- (dst_addr_4 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
- (dst_addr_3 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
- (dst_addr_3 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
- (dst_addr_2 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
- (dst_addr_2 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
-
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
- (dst_addr_1 >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
- (dst_addr_1 >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_src_port_82599 - Sets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
- **/
-int32_t ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, uint16_t src_port)
-{
- DEBUGFUNC("ixgbe_atr_set_src_port_82599");
-
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_dst_port_82599 - Sets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- **/
-int32_t ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, uint16_t dst_port)
-{
- DEBUGFUNC("ixgbe_atr_set_dst_port_82599");
-
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-int32_t ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, uint16_t flex_byte)
-{
- DEBUGFUNC("ixgbe_atr_set_flex_byte_82599");
-
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
- * @input: input stream to modify
- * @vm_pool: the Virtual Machine pool to load
- **/
-int32_t ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, uint8_t vm_pool)
-{
- DEBUGFUNC("ixgbe_atr_set_vm_pool_82599");
-
- input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-int32_t ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, uint8_t l4type)
-{
- DEBUGFUNC("ixgbe_atr_set_l4type_82599");
-
- input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- * @input: input stream to search
- * @vlan: the VLAN id to load
- **/
-int32_t ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, uint16_t *vlan)
-{
- DEBUGFUNC("ixgbe_atr_get_vlan_id_82599");
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
- *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
- *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- * @input: input stream to search
- * @src_addr: the IP address to load
- **/
-int32_t ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, uint32_t *src_addr)
-{
- DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599");
-
- *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- * @input: input stream to search
- * @dst_addr: the IP address to load
- **/
-int32_t ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, uint32_t *dst_addr)
-{
- DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599");
-
- *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- * @input: input stream to search
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-int32_t ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
- uint32_t *src_addr_1, uint32_t *src_addr_2,
- uint32_t *src_addr_3, uint32_t *src_addr_4)
-{
- DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599");
-
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i-- ) {
+ if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ }
- return IXGBE_SUCCESS;
+ return hash_result & IXGBE_ATR_HASH_MASK;
}
-/**
- * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
- * @input: input stream to search
- * @dst_addr_1: the first 4 bytes of the IP address to load
- * @dst_addr_2: the second 4 bytes of the IP address to load
- * @dst_addr_3: the third 4 bytes of the IP address to load
- * @dst_addr_4: the fourth 4 bytes of the IP address to load
- **/
-int32_t ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
- uint32_t *dst_addr_1, uint32_t *dst_addr_2,
- uint32_t *dst_addr_3, uint32_t *dst_addr_4)
-{
- DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599");
-
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
- *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
-
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
- *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
-
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
- *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
-
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
- *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
-
- return IXGBE_SUCCESS;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ uint32_t n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
/**
- * ixgbe_atr_get_src_port_82599 - Gets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
- *
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
- **/
-int32_t ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, uint16_t *src_port)
-{
- DEBUGFUNC("ixgbe_atr_get_src_port_82599");
-
- *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
- *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_get_dst_port_82599 - Gets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
*
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
**/
-int32_t ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, uint16_t *dst_port)
+uint32_t ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
{
- DEBUGFUNC("ixgbe_atr_get_dst_port_82599");
+ uint32_t hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ uint32_t sig_hash = 0, bucket_hash = 0, common_hash = 0;
- *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
- *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
- return IXGBE_SUCCESS;
-}
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
-/**
- * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-int32_t ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, uint16_t *flex_byte)
-{
- DEBUGFUNC("ixgbe_atr_get_flex_byte_82599");
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
- *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
- *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- return IXGBE_SUCCESS;
-}
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
-/**
- * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
- * @input: input stream to modify
- * @vm_pool: the Virtual Machine pool to load
- **/
-int32_t ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, uint8_t *vm_pool)
-{
- DEBUGFUNC("ixgbe_atr_get_vm_pool_82599");
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
- *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-int32_t ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, uint8_t *l4type)
-{
- DEBUGFUNC("ixgbe_atr_get_l4type__82599");
-
- *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
-
- return IXGBE_SUCCESS;
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
}
/**
@@ -1844,159 +1568,228 @@ int32_t ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, uint8_t *l4typ
* @queue: queue index to direct traffic to
**/
int32_t ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
uint8_t queue)
{
uint64_t fdirhashcmd;
uint64_t fdircmd;
- uint32_t fdirhash;
- uint16_t bucket_hash, sig_hash;
- uint8_t l4type;
DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- sig_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /* Get the l4type in order to program FDIRCMD properly */
- /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
- ixgbe_atr_get_l4type_82599(input, &l4type);
-
/*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
break;
default:
- DEBUGOUT(" Error on l4type input\n");
+ DEBUGOUT(" Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
- fdircmd |= IXGBE_FDIRCMD_IPV6;
-
- fdircmd |= ((uint64_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
- fdirhashcmd = ((fdircmd << 32) | fdirhash);
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF);
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (uint64_t)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+ DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (uint32_t)fdirhashcmd);
+
return IXGBE_SUCCESS;
}
/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+uint32_t ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+ uint32_t mask = ntohs(input_masks->dst_port_mask);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= ntohs(input_masks->src_port_mask);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((uint32_t)(_value) >> 24) | (((uint32_t)(_value) & 0x00FF0000) >> 8) | \
+ (((uint32_t)(_value) & 0x0000FF00) << 8) | ((uint32_t)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ (((uint16_t)(_value) >> 8) | ((uint16_t)(_value) << 8))
+
+
+/**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
- * @input: input bitstream
+ * @input_masks: masks for the input bitstream
+ * @soft_id: software index for the filters
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
int32_t ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- uint16_t soft_id,
- uint8_t queue)
+ union ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ uint16_t soft_id, uint8_t queue)
{
- uint32_t fdircmd = 0;
uint32_t fdirhash;
- uint32_t src_ipv4, dst_ipv4;
- uint32_t src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
- uint16_t src_port, dst_port, vlan_id, flex_bytes;
- uint16_t bucket_hash;
- uint8_t l4type;
+ uint32_t fdircmd;
+ uint32_t fdirport, fdirtcpm;
+ uint32_t fdirvlan;
+ /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+ uint32_t fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+ IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
- /* Get our input values */
- ixgbe_atr_get_l4type_82599(input, &l4type);
-
/*
- * Check l4type formatting, and bail out before we touch the hardware
+ * Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+ DEBUGOUT(" Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
break;
default:
- DEBUGOUT(" Error on l4type input\n");
+ DEBUGOUT(" Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
- ixgbe_atr_get_src_port_82599(input, &src_port);
- ixgbe_atr_get_dst_port_82599(input, &dst_port);
- ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
- fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- /* Now figure out if we're IPv4 or IPv6 */
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
- /* IPv6 */
- ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
- &src_ipv6_3, &src_ipv6_4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
- /* The last 4 bytes is the same register as IPv4 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
- fdircmd |= IXGBE_FDIRCMD_IPV6;
- fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
- } else {
- /* IPv4 */
- ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+ /* Program FDIRM */
+ switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+ case 0xEFFF:
+ /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
+ case 0xE000:
+ /* Unmask VLAN prio - bit 1 */
+ fdirm &= ~IXGBE_FDIRM_VLANP;
+ break;
+ case 0x0FFF:
+ /* Unmask VLAN ID - bit 0 */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
+ break;
+ case 0x0000:
+ /* do nothing, vlans already masked */
+ break;
+ default:
+ DEBUGOUT(" Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
}
- ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
- (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+ if (input_masks->flex_mask & 0xFFFF) {
+ if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+ DEBUGOUT(" Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ /* Unmask Flex Bytes - bit 4 */
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ }
- fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
- fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
- fdircmd |= IXGBE_FDIRCMD_LAST;
- fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_masks->src_ip_mask[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_masks->dst_ip_mask[0]);
+
+ /* Apply masks to input data */
+ input->formatted.vlan_id &= input_masks->vlan_id_mask;
+ input->formatted.flex_bytes &= input_masks->flex_mask;
+ input->formatted.src_port &= input_masks->src_port_mask;
+ input->formatted.dst_port &= input_masks->dst_port_mask;
+ input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+ input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan =
+ IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /* we only want the bucket hash so drop the upper 16 bits */
+ fdirhash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY);
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
@@ -2054,46 +1847,45 @@ int32_t ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, uint32_t reg, uint8_t
* ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
- * Starts the hardware using the generic start_hw function.
- * Then performs revision-specific operations:
- * Clears the rate limiter registers.
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
**/
int32_t ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
{
- uint32_t i;
- uint32_t regval;
int32_t ret_val = IXGBE_SUCCESS;
+ uint32_t gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
DEBUGFUNC("ixgbe_start_hw_rev_1__82599");
ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
- /* Clear the rate limiters */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
- }
- IXGBE_WRITE_FLUSH(hw);
-
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = TRUE;
+ /*
+ * From the 82599 specification update:
+ * set the completion timeout value for 16ms to 55ms if needed
+ */
+ if (gcr & IXGBE_GCR_CAP_VER2) {
+ uint16_t reg;
+ reg = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ if ((reg & 0x0f) == 0) {
+ reg |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+ IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2,
+ reg);
+ }
+ }
+
if (ret_val == IXGBE_SUCCESS)
ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
return ret_val;
}
@@ -2113,8 +1905,14 @@ int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
- if (status != IXGBE_SUCCESS)
- status = ixgbe_identify_sfp_module_generic(hw);
+ if (status != IXGBE_SUCCESS) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ goto out;
+ else
+ status = ixgbe_identify_sfp_module_generic(hw);
+ }
+
/* Set PHY type none if no PHY detected */
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.type = ixgbe_phy_none;
@@ -2125,6 +1923,7 @@ int32_t ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->phy.type == ixgbe_phy_sfp_unsupported)
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+out:
return status;
}
@@ -2144,14 +1943,16 @@ uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
uint32_t pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
uint16_t ext_ability = 0;
uint8_t comp_codes_10g = 0;
+ uint8_t comp_codes_1g = 0;
DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
hw->phy.ops.identify(hw);
- if (hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_aq ||
- hw->phy.type == ixgbe_phy_cu_unknown) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
@@ -2161,6 +1962,8 @@ uint32_t ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
goto out;
+ default:
+ break;
}
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -2214,20 +2017,28 @@ sfp_check:
goto out;
switch (hw->phy.type) {
- case ixgbe_phy_tw_tyco:
- case ixgbe_phy_tw_unknown:
+ case ixgbe_phy_sfp_passive_tyco:
+ case ixgbe_phy_sfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
+ case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_sfp_active_unknown:
+ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+ break;
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+ hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
default:
break;
@@ -2285,23 +2096,6 @@ int32_t ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, uint32_t regval)
}
/**
- * ixgbe_get_device_caps_82599 - Get additional device capabilities
- * @hw: pointer to hardware structure
- * @device_caps: the EEPROM word with the extra device capabilities
- *
- * This function will read the EEPROM location for the device capabilities,
- * and return the word through device_caps.
- **/
-int32_t ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, uint16_t *device_caps)
-{
- DEBUGFUNC("ixgbe_get_device_caps_82599");
-
- hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_verify_fw_version_82599 - verify fw version for 82599
* @hw: pointer to hardware structure
*
@@ -2350,3 +2144,49 @@ int32_t ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
fw_version_out:
return status;
}
+
+/**
+ * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns TRUE if the LESM FW module is present and enabled. Otherwise
+ * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+int ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+ int lesm_enabled = FALSE;
+ uint16_t fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ int32_t status;
+
+ DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
+
+ /* get the offset to the Firmware Module block */
+ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((status != IXGBE_SUCCESS) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != IXGBE_SUCCESS) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the lesm state word */
+ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == IXGBE_SUCCESS) &&
+ (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = TRUE;
+
+out:
+ return lesm_enabled;
+}
+
+
diff --git a/sys/dev/pci/ixgbe_phy.c b/sys/dev/pci/ixgbe_phy.c
index c859aef86f1..7bfcd7ff9b7 100644
--- a/sys/dev/pci/ixgbe_phy.c
+++ b/sys/dev/pci/ixgbe_phy.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe_phy.c,v 1.5 2010/09/21 00:29:29 claudio Exp $ */
+/* $OpenBSD: ixgbe_phy.c,v 1.6 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -75,7 +75,7 @@ int32_t ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
-
+ phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
return IXGBE_SUCCESS;
}
@@ -105,9 +105,8 @@ int32_t ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&ext_ability);
if (ext_ability &
- IXGBE_MDIO_PHY_10GBASET_ABILITY ||
- ext_ability &
- IXGBE_MDIO_PHY_1000BASET_ABILITY)
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
hw->phy.type =
ixgbe_phy_cu_unknown;
else
@@ -119,6 +118,7 @@ int32_t ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
break;
}
}
+ /* clear value if nothing found */
if (status != IXGBE_SUCCESS)
hw->phy.addr = 0;
} else {
@@ -221,6 +221,11 @@ int32_t ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
goto out;
+ /* Don't reset PHY if it's shut down due to overtemp. */
+ if (!hw->phy.reset_if_overtemp &&
+ (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -229,13 +234,19 @@ int32_t ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
IXGBE_MDIO_PHY_XS_DEV_TYPE,
IXGBE_MDIO_PHY_XS_RESET);
- /* Poll for reset bit to self-clear indicating reset is complete */
- for (i = 0; i < 500; i++) {
- msec_delay(1);
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msec_delay(100);
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
- if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET))
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
break;
+ }
}
if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
@@ -289,9 +300,8 @@ int32_t ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, uint32_t reg_addr,
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
break;
- }
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
@@ -438,10 +448,10 @@ int32_t ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, uint32_t reg_addr,
}
/**
- * ixgbe_setup_phy_link_generic - Set and restart autoneg
- * @hw: pointer to hardware structure
+ * ixgbe_setup_phy_link_generic - Set and restart autoneg
+ * @hw: pointer to hardware structure
*
- * Restart autonegotiation and PHY and waits for completion.
+ * Restart autonegotiation and PHY and waits for completion.
**/
int32_t ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
@@ -520,12 +530,15 @@ int32_t ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
&autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
break;
+ }
}
- if (time_out == max_time_out)
+ if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
+ DEBUGOUT("ixgbe_setup_phy_link_generic: time out");
+ }
return status;
}
@@ -727,8 +740,9 @@ int32_t ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
&autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
- if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
+ if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
break;
+ }
}
if (time_out == max_time_out) {
@@ -739,7 +753,6 @@ int32_t ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
return status;
}
-
/**
* ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
* @hw: pointer to hardware structure
@@ -757,7 +770,6 @@ int32_t ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
return status;
}
-
/**
* ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
* @hw: pointer to hardware structure
@@ -888,6 +900,7 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
uint8_t comp_codes_10g = 0;
uint8_t oui_bytes[3] = {0, 0, 0};
uint8_t cable_tech = 0;
+ uint8_t cable_spec = 0;
uint16_t enforce_sfp = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
@@ -900,15 +913,10 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
- if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- if (hw->phy.type != ixgbe_phy_nl) {
- hw->phy.id = 0;
- hw->phy.type = ixgbe_phy_unknown;
- }
- goto out;
- }
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
hw->mac.ops.set_lan_id(hw);
@@ -917,15 +925,31 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
} else {
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
- &comp_codes_1g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
- &cable_tech);
-
- DEBUGOUT3("SFP+ capa codes 1G %x 10G %x cable %x\n",
- comp_codes_1g, comp_codes_10g, cable_tech);
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
/* ID Module
* =========
@@ -936,6 +960,10 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 4 SFP_DA_CORE1 - 82599-specific
* 5 SFP_SR/LR_CORE0 - 82599-specific
* 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -949,29 +977,48 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core1;
- else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+ hw->phy.ops.read_i2c_eeprom(
+ hw, IXGBE_SFF_CABLE_SPEC_COMP,
+ &cable_spec);
+ if (cable_spec &
+ IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_unknown;
+ }
+ } else if (comp_codes_10g &
+ (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
+ ixgbe_sfp_type_1g_cu_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
- else
+ ixgbe_sfp_type_1g_cu_core1;
+ } else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
}
if (hw->phy.sfp_type != stored_sfp_type)
@@ -988,28 +1035,49 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Determine PHY vendor */
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
- hw->phy.ops.read_i2c_eeprom(hw,
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]);
- hw->phy.ops.read_i2c_eeprom(hw,
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- hw->phy.ops.read_i2c_eeprom(hw,
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
vendor_oui =
- ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
- (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
- (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_tyco;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
- hw->phy.type = ixgbe_phy_sfp_ftl;
+ if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_ftl_active;
+ else
+ hw->phy.type = ixgbe_phy_sfp_ftl;
break;
case IXGBE_SFF_VENDOR_OUI_AVAGO:
hw->phy.type = ixgbe_phy_sfp_avago;
@@ -1019,21 +1087,28 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_tw_unknown;
+ hw->phy.type =
+ ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type =
+ ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
- /* All passive DA cables are supported */
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+ /* Allow any DA cable vendor */
+ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+ IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = IXGBE_SUCCESS;
goto out;
}
- /* 1G SFP modules are not supported */
- if (comp_codes_10g == 0) {
+ /* Verify supported 1G SFP modules */
+ if (comp_codes_10g == 0 &&
+ !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1048,7 +1123,9 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* unimplemented even in the intel driver */
/* ixgbe_get_device_caps(hw, &enforce_sfp); */
enforce_sfp = IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP;
- if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+ !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
@@ -1065,6 +1142,14 @@ int32_t ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
out:
return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
}
/**
@@ -1081,6 +1166,7 @@ int32_t ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
uint16_t *data_offset)
{
uint16_t sfp_id;
+ uint16_t sfp_type = hw->phy.sfp_type;
if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1092,6 +1178,17 @@ int32_t ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
return IXGBE_ERR_SFP_NOT_SUPPORTED;
+ /*
+ * Limiting active cables and 1G Phys must be initialized as
+ * SR modules
+ */
+ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type = ixgbe_sfp_type_srlr_core0;
+ else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type = ixgbe_sfp_type_srlr_core1;
+
/* Read offset to PHY init contents */
hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
@@ -1108,7 +1205,7 @@ int32_t ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
- if (sfp_id == hw->phy.sfp_type) {
+ if (sfp_id == sfp_type) {
(*list_offset)++;
hw->eeprom.ops.read(hw, *list_offset, data_offset);
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
@@ -1135,9 +1232,10 @@ int32_t ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
(*list_offset) += 2;
hw->eeprom.ops.read(hw, *list_offset, data_offset);
- } else if (sfp_id == IXGBE_PHY_INIT_END_NL)
+ } else if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+ DEBUGOUT("No matching SFP+ module found\n");
return IXGBE_ERR_SFP_NOT_SUPPORTED;
-
+ }
return IXGBE_SUCCESS;
}
@@ -1196,7 +1294,6 @@ int32_t ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, uint8_t byte_offset,
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
-
do {
if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
@@ -1401,7 +1498,7 @@ int32_t ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, uint8_t *data)
for (i = 7; i >= 0; i--) {
status = ixgbe_clock_in_i2c_bit(hw, &bit);
- *data |= bit<<i;
+ *data |= bit << i;
if (status != IXGBE_SUCCESS)
break;
@@ -1670,3 +1767,29 @@ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
/* Put the i2c bus back to default state */
ixgbe_i2c_stop(hw);
}
+
+/**
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
+ * @hw: pointer to hardware structure
+ *
+ * Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+int32_t ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+ int32_t status = IXGBE_SUCCESS;
+ uint16_t phy_data = 0;
+
+ if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+ goto out;
+
+ /* Check that the LASI temp alarm status was triggered */
+ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
+
+ if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+ goto out;
+
+ status = IXGBE_ERR_OVERTEMP;
+out:
+ return status;
+}
diff --git a/sys/dev/pci/ixgbe_type.h b/sys/dev/pci/ixgbe_type.h
index 3fa9b69c48f..239a4f19db7 100644
--- a/sys/dev/pci/ixgbe_type.h
+++ b/sys/dev/pci/ixgbe_type.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: ixgbe_type.h,v 1.7 2010/11/10 15:23:25 claudio Exp $ */
+/* $OpenBSD: ixgbe_type.h,v 1.8 2011/06/10 12:46:35 claudio Exp $ */
/******************************************************************************
@@ -56,11 +56,17 @@
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
+#define IXGBE_DEV_ID_82599_VF 0x10ED
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -89,17 +95,17 @@
#define IXGBE_GRC 0x10200
/* General Receive Control */
-#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
+#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
#define IXGBE_VPDDIAG0 0x10204
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN 0x00000001
-#define IXGBE_I2C_CLK_OUT 0x00000002
-#define IXGBE_I2C_DATA_IN 0x00000004
-#define IXGBE_I2C_DATA_OUT 0x00000008
+#define IXGBE_I2C_CLK_IN 0x00000001
+#define IXGBE_I2C_CLK_OUT 0x00000002
+#define IXGBE_I2C_DATA_IN 0x00000004
+#define IXGBE_I2C_DATA_OUT 0x00000008
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
@@ -108,19 +114,19 @@
#define IXGBE_EIMC 0x00888
#define IXGBE_EIAC 0x00810
#define IXGBE_EIAM 0x00890
-#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
-#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
-#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
-#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
+#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4)
/* 82599 EITR is only 12 bits, with the lower 3 always zero */
/*
* 82598 EITR is 16 bits but set the limits based on the max
* supported by all ixgbe hardware
*/
-#define IXGBE_MAX_INT_RATE 488281
-#define IXGBE_MIN_INT_RATE 956
-#define IXGBE_MAX_EITR 0x00000FF8
-#define IXGBE_MIN_EITR 8
+#define IXGBE_MAX_INT_RATE 488281
+#define IXGBE_MIN_INT_RATE 956
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
(0x012300 + (((_i) - 24) * 4)))
#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
@@ -199,6 +205,7 @@
#define IXGBE_RFCTL 0x05008
#define IXGBE_DRECCCTL 0x02F08
#define IXGBE_DRECCCTL_DISABLE 0
+
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
@@ -229,9 +236,11 @@
#define IXGBE_MTQC 0x08120
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
#define IXGBE_QDE 0x2F04
#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
@@ -286,13 +295,14 @@
#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
#define IXGBE_DTXCTL 0x07E00
-#define IXGBE_DMATXCTL 0x04A80
-#define IXGBE_PFDTXGSWC 0x08220
-#define IXGBE_DTXMXSZRQ 0x08100
-#define IXGBE_DTXTCPFLGL 0x04A88
-#define IXGBE_DTXTCPFLGH 0x04A8C
-#define IXGBE_LBDRPEN 0x0CA00
-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC 0x08220
+#define IXGBE_DTXMXSZRQ 0x08100
+#define IXGBE_DTXTCPFLGL 0x04A88
+#define IXGBE_DTXTCPFLGH 0x04A8C
+#define IXGBE_LBDRPEN 0x0CA00
+#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
@@ -300,6 +310,12 @@
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -335,7 +351,7 @@
/* Wake Up Control */
#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
/* Wake Up Filter Control */
#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -512,6 +528,11 @@
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
/* BCN (for DCB) Registers */
#define IXGBE_RTTBCNRM 0x04980
@@ -650,6 +671,8 @@
#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -737,6 +760,12 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -890,6 +919,8 @@
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -959,8 +990,8 @@
#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
-#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */
-#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/
#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
@@ -1009,6 +1040,8 @@
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */
#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
@@ -1021,7 +1054,9 @@
#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
@@ -1077,7 +1112,6 @@
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
#define IXGBE_GPIE_EIAME 0x40000000
#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
-#define IXGBE_GPIE_LLI_DELAY_SHIFT 7
#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
@@ -1371,10 +1405,12 @@
* EAPOL 802.1x (0x888e): Filter 0
* FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@@ -1386,6 +1422,9 @@
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1478,6 +1517,7 @@
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1529,6 +1569,7 @@
#define IXGBE_ANLP1_PAUSE 0x0C00
#define IXGBE_ANLP1_SYM_PAUSE 0x0400
#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1557,6 +1598,7 @@
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1566,7 +1608,11 @@
#define IXGBE_EEPROM_WORD_SIZE_BASE_SHIFT 6
#define IXGBE_EEPROM_OPCODE_BITS 8
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
@@ -1645,18 +1691,28 @@
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
-#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1789,6 +1845,7 @@
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
@@ -2002,10 +2059,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
-#define IXGBE_FDIRM_L3P 0x00000008
-#define IXGBE_FDIRM_L4P 0x00000010
-#define IXGBE_FDIRM_FLEX 0x00000020
-#define IXGBE_FDIRM_DIPv6 0x00000040
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -2045,6 +2101,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_LAST 0x00000800
#define IXGBE_FDIRCMD_COLLISION 0x00001000
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2052,7 +2109,7 @@ enum ixgbe_fdir_pballoc_type {
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
- uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+ uint64_t buffer_addr; /* Address of the descriptor's data buffer */
union {
__le32 data;
struct {
@@ -2220,44 +2277,103 @@ typedef uint32_t ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+
+/* Flow Control Macros */
+#define PAUSE_RTT 8
+#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+ PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
-
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET 0
-#define IXGBE_ATR_SRC_IPV6_OFFSET 2
-#define IXGBE_ATR_SRC_IPV4_OFFSET 14
-#define IXGBE_ATR_DST_IPV6_OFFSET 18
-#define IXGBE_ATR_DST_IPV4_OFFSET 30
-#define IXGBE_ATR_SRC_PORT_OFFSET 34
-#define IXGBE_ATR_DST_PORT_OFFSET 36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET 40
-#define IXGBE_ATR_L4TYPE_OFFSET 41
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define IXGBE_ATR_L4TYPE_UDP 0x1
#define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
/* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
- /* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
*
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
* vlan_id - 2 bytes
* src_ip - 16 bytes
* dst_ip - 16 bytes
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
- * vm_pool - 1 byte
- * l4type - 1 byte
+ * rsvd0 - 2 bytes - space reserved must be 0.
*/
- uint8_t byte_stream[42];
+ struct {
+ uint8_t vm_pool;
+ uint8_t flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 rsvd0;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ uint8_t vm_pool;
+ uint8_t flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+
+struct ixgbe_atr_input_masks {
+ __be16 rsvd0;
+ __be16 vlan_id_mask;
+ __be32 dst_ip_mask[4];
+ __be32 src_ip_mask[4];
+ __be16 src_port_mask;
+ __be16 dst_port_mask;
+ __be16 flex_mask;
+};
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+ ixgbe_fcoe_bootstatus_disabled = 0,
+ ixgbe_fcoe_bootstatus_enabled = 1,
+ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF
};
enum ixgbe_eeprom_type {
@@ -2271,6 +2387,7 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
+ ixgbe_mac_82599_vf,
ixgbe_num_macs
};
@@ -2283,10 +2400,12 @@ enum ixgbe_phy_type {
ixgbe_phy_qt,
ixgbe_phy_xaui,
ixgbe_phy_nl,
- ixgbe_phy_tw_tyco,
- ixgbe_phy_tw_unknown,
+ ixgbe_phy_sfp_passive_tyco,
+ ixgbe_phy_sfp_passive_unknown,
+ ixgbe_phy_sfp_active_unknown,
ixgbe_phy_sfp_avago,
ixgbe_phy_sfp_ftl,
+ ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
@@ -2314,6 +2433,10 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu_core1 = 4,
ixgbe_sfp_type_srlr_core0 = 5,
ixgbe_sfp_type_srlr_core1 = 6,
+ ixgbe_sfp_type_da_act_lmt_core0 = 7,
+ ixgbe_sfp_type_da_act_lmt_core1 = 8,
+ ixgbe_sfp_type_1g_cu_core0 = 9,
+ ixgbe_sfp_type_1g_cu_core1 = 10,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2356,25 +2479,25 @@ enum ixgbe_bus_type {
/* PCI bus speeds */
enum ixgbe_bus_speed {
ixgbe_bus_speed_unknown = 0,
- ixgbe_bus_speed_33,
- ixgbe_bus_speed_66,
- ixgbe_bus_speed_100,
- ixgbe_bus_speed_120,
- ixgbe_bus_speed_133,
- ixgbe_bus_speed_2500,
- ixgbe_bus_speed_5000,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_reserved
};
/* PCI bus widths */
enum ixgbe_bus_width {
ixgbe_bus_width_unknown = 0,
- ixgbe_bus_width_pcie_x1,
- ixgbe_bus_width_pcie_x2,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
ixgbe_bus_width_pcie_x4 = 4,
ixgbe_bus_width_pcie_x8 = 8,
- ixgbe_bus_width_32,
- ixgbe_bus_width_64,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
ixgbe_bus_width_reserved
};
@@ -2460,8 +2583,6 @@ struct ixgbe_hw_stats {
uint64_t mptc;
uint64_t bptc;
uint64_t xec;
- uint64_t rqsmr[16];
- uint64_t tqsmr[8];
uint64_t qprc[16];
uint64_t qptc[16];
uint64_t qbrc[16];
@@ -2487,8 +2608,8 @@ struct ixgbe_hw_stats {
struct ixgbe_hw;
/* iterator type for walking multicast address lists */
-typedef uint8_t* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, uint8_t **mc_addr_ptr,
- uint32_t *vmdq);
+typedef uint8_t* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw,
+ uint8_t **mc_addr_ptr, uint32_t *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
@@ -2505,6 +2626,7 @@ struct ixgbe_mac_operations {
int32_t (*reset_hw)(struct ixgbe_hw *);
int32_t (*start_hw)(struct ixgbe_hw *);
int32_t (*clear_hw_cntrs)(struct ixgbe_hw *);
+ void (*enable_relaxed_ordering)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
uint32_t (*get_supported_physical_layer)(struct ixgbe_hw *);
int32_t (*get_mac_addr)(struct ixgbe_hw *, uint8_t *);
@@ -2512,6 +2634,7 @@ struct ixgbe_mac_operations {
int32_t (*set_san_mac_addr)(struct ixgbe_hw *, uint8_t *);
int32_t (*get_device_caps)(struct ixgbe_hw *, uint16_t *);
int32_t (*get_wwn_prefix)(struct ixgbe_hw *, uint16_t *, uint16_t *);
+ int32_t (*get_fcoe_boot_status)(struct ixgbe_hw *, uint16_t *);
int32_t (*stop_adapter)(struct ixgbe_hw *);
int32_t (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
@@ -2523,6 +2646,9 @@ struct ixgbe_mac_operations {
void (*release_swfw_sync)(struct ixgbe_hw *, uint16_t);
/* Link */
+ void (*disable_tx_laser)(struct ixgbe_hw *);
+ void (*enable_tx_laser)(struct ixgbe_hw *);
+ void (*flap_tx_laser)(struct ixgbe_hw *);
int32_t (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, int, int);
int32_t (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, int *, int);
int32_t (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2550,6 +2676,8 @@ struct ixgbe_mac_operations {
int32_t (*clear_vfta)(struct ixgbe_hw *);
int32_t (*set_vfta)(struct ixgbe_hw *, uint32_t, uint32_t, int);
int32_t (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, int, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, int, int);
/* Flow Control */
int32_t (*fc_enable)(struct ixgbe_hw *, int32_t);
@@ -2572,6 +2700,7 @@ struct ixgbe_phy_operations {
int32_t (*read_i2c_eeprom)(struct ixgbe_hw *, uint8_t , uint8_t *);
int32_t (*write_i2c_eeprom)(struct ixgbe_hw *, uint8_t, uint8_t);
void (*i2c_bus_clear)(struct ixgbe_hw *);
+ int32_t (*check_overtemp)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@@ -2582,6 +2711,7 @@ struct ixgbe_eeprom_info {
uint16_t address_bits;
};
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
@@ -2592,11 +2722,14 @@ struct ixgbe_mac_info {
uint16_t wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
uint16_t wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ uint32_t mta_shadow[IXGBE_MAX_MTA];
int32_t mc_filter_type;
uint32_t mcft_size;
uint32_t vft_size;
uint32_t num_rar_entries;
uint32_t rar_highwater;
+ uint32_t rx_pb_size;
uint32_t max_tx_queues;
uint32_t max_rx_queues;
uint32_t max_msix_vectors;
@@ -2605,6 +2738,7 @@ struct ixgbe_mac_info {
uint32_t orig_autoc2;
int orig_link_settings_stored;
int autotry_restart;
+ uint8_t flags;
};
struct ixgbe_phy_info {
@@ -2621,6 +2755,101 @@ struct ixgbe_phy_info {
enum ixgbe_smart_speed smart_speed;
int smart_speed_active;
int multispeed_fiber;
+ int reset_if_overtemp;
+};
+
+/* MBX */
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is TRUE if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* end MBX */
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ int32_t (*read)(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+ int32_t (*write)(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+ int32_t (*read_posted)(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+ int32_t (*write_posted)(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+ int32_t (*check_for_msg)(struct ixgbe_hw *, uint16_t);
+ int32_t (*check_for_ack)(struct ixgbe_hw *, uint16_t);
+ int32_t (*check_for_rst)(struct ixgbe_hw *, uint16_t);
+};
+
+struct ixgbe_mbx_stats {
+ uint32_t msgs_tx;
+ uint32_t msgs_rx;
+
+ uint32_t acks;
+ uint32_t reqs;
+ uint32_t rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ uint32_t timeout;
+ uint32_t usec_delay;
+ uint32_t v2p_mailbox;
+ uint16_t size;
};
struct ixgbe_hw {
@@ -2632,6 +2861,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
uint16_t device_id;
uint16_t vendor_id;
uint16_t subsystem_device_id;
@@ -2676,6 +2906,14 @@ struct ixgbe_hw {
#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
#define IXGBE_ERR_FDIR_REINIT_FAILED -23
#define IXGBE_ERR_EEPROM_VERSION -24
+#define IXGBE_ERR_NO_SPACE -25
+#define IXGBE_ERR_OVERTEMP -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_FLOW_CONTROL -29
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define UNREFERENCED_PARAMETER(_p)
@@ -2692,11 +2930,15 @@ struct ixgbe_hw {
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_DA_BAD_HP_CABLE 0x80
@@ -2707,6 +2949,10 @@ struct ixgbe_hw {
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
/* Bit-shift macros */
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
@@ -2730,6 +2976,9 @@ struct ixgbe_hw {
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
+#define IXGBE_TN_LASI_STATUS_REG 0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
/* end PHY */
#endif /* _IXGBE_TYPE_H_ */