aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ixgbe')
-rw-r--r--sys/dev/ixgbe/if_bypass.c134
-rw-r--r--sys/dev/ixgbe/if_fdir.c24
-rw-r--r--sys/dev/ixgbe/if_ix.c1050
-rw-r--r--sys/dev/ixgbe/if_ixv.c275
-rw-r--r--sys/dev/ixgbe/if_sriov.c140
-rw-r--r--sys/dev/ixgbe/ix_txrx.c106
-rw-r--r--sys/dev/ixgbe/ixgbe.h8
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c29
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c21
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c218
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.c2
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.h4
-rw-r--r--sys/dev/ixgbe/ixgbe_features.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.c909
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.h84
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c6
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c43
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_rss.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h116
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c76
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.c4
-rw-r--r--sys/dev/ixgbe/ixgbe_x550.c150
-rw-r--r--sys/dev/ixgbe/ixgbe_x550.h5
27 files changed, 2224 insertions, 1192 deletions
diff --git a/sys/dev/ixgbe/if_bypass.c b/sys/dev/ixgbe/if_bypass.c
index e9ea77dfb49c..138b4e17db0d 100644
--- a/sys/dev/ixgbe/if_bypass.c
+++ b/sys/dev/ixgbe/if_bypass.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "ixgbe.h"
@@ -114,11 +114,11 @@ ixgbe_get_bypass_time(u32 *year, u32 *sec)
static int
ixgbe_bp_version(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int version = 0;
- u32 cmd;
+ int error = 0;
+ static int version = 0;
+ u32 cmd;
ixgbe_bypass_mutex_enter(sc);
cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
@@ -154,15 +154,14 @@ err:
static int
ixgbe_bp_set_state(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int state = 0;
+ int error = 0;
+ static int state = 0;
/* Get the current state */
ixgbe_bypass_mutex_enter(sc);
- error = hw->mac.ops.bypass_rw(hw,
- BYPASS_PAGE_CTL0, &state);
+ error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &state);
ixgbe_bypass_mutex_clear(sc);
if (error != 0)
return (error);
@@ -216,10 +215,10 @@ out:
static int
ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int timeout = 0;
+ int error = 0;
+ static int timeout = 0;
/* Get the current value */
ixgbe_bypass_mutex_enter(sc);
@@ -259,10 +258,10 @@ ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int main_on = 0;
+ int error = 0;
+ static int main_on = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_on);
@@ -301,10 +300,10 @@ ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int main_off = 0;
+ int error = 0;
+ static int main_off = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_off);
@@ -343,10 +342,10 @@ ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int aux_on = 0;
+ int error = 0;
+ static int aux_on = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_on);
@@ -385,10 +384,10 @@ ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int aux_off = 0;
+ int error = 0;
+ static int aux_off = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_off);
@@ -432,11 +431,11 @@ ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error, tmp;
- static int timeout = 0;
- u32 mask, arg;
+ int error, tmp;
+ static int timeout = 0;
+ u32 mask, arg;
/* Get the current hardware value */
ixgbe_bypass_mutex_enter(sc);
@@ -503,11 +502,11 @@ ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- u32 sec, year;
- int cmd, count = 0, error = 0;
- int reset_wd = 0;
+ u32 sec, year;
+ int cmd, count = 0, error = 0;
+ int reset_wd = 0;
error = sysctl_handle_int(oidp, &reset_wd, 0, req);
if ((error) || (req->newptr == NULL))
@@ -530,8 +529,7 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
error = IXGBE_BYPASS_FW_WRITE_FAILURE;
break;
}
- error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd);
- if (error != 0) {
+ if (hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd)) {
error = IXGBE_ERR_INVALID_ARGUMENT;
break;
}
@@ -550,14 +548,14 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
- struct ixgbe_hw *hw = &sc->hw;
- u32 cmd, base, head;
- u32 log_off, count = 0;
- static int status = 0;
- u8 data;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_hw *hw = &sc->hw;
+ u32 cmd, base, head;
+ u32 log_off, count = 0;
+ static int status = 0;
+ u8 data;
struct ixgbe_bypass_eeprom eeprom[BYPASS_MAX_LOGS];
- int i, error = 0;
+ int i, error = 0;
error = sysctl_handle_int(oidp, &status, 0, req);
if ((error) || (req->newptr == NULL))
@@ -640,12 +638,15 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
BYPASS_LOG_EVENT_SHIFT;
u8 action = eeprom[count].actions & BYPASS_LOG_ACTION_M;
u16 day_mon[2][13] = {
- {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
- {0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
+ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304,
+ 334, 365},
+ {0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305,
+ 335, 366}
};
char *event_str[] = {"unknown", "main on", "aux on",
"main off", "aux off", "WDT", "user" };
- char *action_str[] = {"ignore", "normal", "bypass", "isolate",};
+ char *action_str[] =
+ {"ignore", "normal", "bypass", "isolate",};
/* verify vaild data 1 - 6 */
if (event < BYPASS_EVENT_MAIN_ON || event > BYPASS_EVENT_USR)
@@ -712,11 +713,11 @@ unlock_err:
void
ixgbe_bypass_init(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- struct sysctl_oid *bp_node;
+ struct ixgbe_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct sysctl_oid *bp_node;
struct sysctl_oid_list *bp_list;
- u32 mask, value, sec, year;
+ u32 mask, value, sec, year;
if (!(sc->feat_cap & IXGBE_FEATURE_BYPASS))
return;
@@ -724,13 +725,13 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
/* First set up time for the hardware */
ixgbe_get_bypass_time(&year, &sec);
- mask = BYPASS_CTL1_TIME_M
- | BYPASS_CTL1_VALID_M
- | BYPASS_CTL1_OFFTRST_M;
+ mask = BYPASS_CTL1_TIME_M |
+ BYPASS_CTL1_VALID_M |
+ BYPASS_CTL1_OFFTRST_M;
- value = (sec & BYPASS_CTL1_TIME_M)
- | BYPASS_CTL1_VALID
- | BYPASS_CTL1_OFFTRST;
+ value = (sec & BYPASS_CTL1_TIME_M) |
+ BYPASS_CTL1_VALID |
+ BYPASS_CTL1_OFFTRST;
ixgbe_bypass_mutex_enter(sc);
hw->mac.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
@@ -745,8 +746,7 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
*/
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "bypass_log",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "bypass_log", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
sc, 0, ixgbe_bp_log, "I", "Bypass Log");
/* All other setting are hung from the 'bypass' node */
@@ -757,39 +757,39 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
bp_list = SYSCTL_CHILDREN(bp_node);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "version", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "version", CTLTYPE_INT | CTLFLAG_RD,
sc, 0, ixgbe_bp_version, "I", "Bypass Version");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "state", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_set_state, "I", "Bypass State");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "timeout", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "timeout", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_timeout, "I", "Bypass Timeout");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "main_on", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "main_on", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_main_on, "I", "Bypass Main On");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "main_off", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "main_off", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_main_off, "I", "Bypass Main Off");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "aux_on", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "aux_on", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_aux_on, "I", "Bypass Aux On");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "aux_off", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "aux_off", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_aux_off, "I", "Bypass Aux Off");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "wd_set", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "wd_set", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_wd_set, "I", "Set BP Watchdog");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "wd_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "wd_reset", CTLTYPE_INT | CTLFLAG_WR,
sc, 0, ixgbe_bp_wd_reset, "S", "Bypass WD Reset");
sc->feat_en |= IXGBE_FEATURE_BYPASS;
diff --git a/sys/dev/ixgbe/if_fdir.c b/sys/dev/ixgbe/if_fdir.c
index 6c52cc452987..37f45cb3808f 100644
--- a/sys/dev/ixgbe/if_fdir.c
+++ b/sys/dev/ixgbe/if_fdir.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "ixgbe.h"
@@ -51,9 +51,9 @@ ixgbe_init_fdir(struct ixgbe_softc *sc)
void
ixgbe_reinit_fdir(void *context)
{
- if_ctx_t ctx = context;
+ if_ctx_t ctx = context;
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
if (!(sc->feat_en & IXGBE_FEATURE_FDIR))
return;
@@ -79,16 +79,16 @@ ixgbe_reinit_fdir(void *context)
void
ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
{
- struct ixgbe_softc *sc = txr->sc;
- struct ix_queue *que;
- struct ip *ip;
- struct tcphdr *th;
- struct udphdr *uh;
- struct ether_vlan_header *eh;
+ struct ixgbe_softc *sc = txr->sc;
+ struct ix_queue *que;
+ struct ip *ip;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct ether_vlan_header *eh;
union ixgbe_atr_hash_dword input = {.dword = 0};
union ixgbe_atr_hash_dword common = {.dword = 0};
- int ehdrlen, ip_hlen;
- u16 etype;
+ int ehdrlen, ip_hlen;
+ u16 etype;
eh = mtod(mp, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 17f1f73a526e..959afa79e7da 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "opt_inet.h"
#include "opt_inet6.h"
@@ -58,53 +58,94 @@ static const char ixgbe_driver_version[] = "4.0.1-k";
************************************************************************/
static const pci_vendor_info_t ixgbe_vendor_info_array[] =
{
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
+ "Intel(R) 82598EB AF (Dual Fiber)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
+ "Intel(R) 82598EB AF (Fiber)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
+ "Intel(R) 82598EB AT (CX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
+ "Intel(R) 82598EB AT"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
+ "Intel(R) 82598EB AT2"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
+ "Intel(R) 82598EB AF DA (Dual Fiber)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
+ "Intel(R) 82598EB AT (Dual CX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
+ "Intel(R) 82598EB AF (Dual Fiber LR)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
+ "Intel(R) 82598EB AF (Dual Fiber SR)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
+ "Intel(R) 82598EB LOM"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
+ "Intel(R) X520 82599 (KX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
+ "Intel(R) X520 82599 (KX4 Mezzanine)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
+ "Intel(R) X520 82599ES (SFI/SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
+ "Intel(R) X520 82599 (XAUI/BX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
+ "Intel(R) X520 82599 (Dual CX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
+ "Intel(R) X520-T 82599 LOM"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
+ "Intel(R) X520 82599 LS"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
+ "Intel(R) X520 82599 (Combined Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
+ "Intel(R) X520 82599 (Backplane w/FCoE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
+ "Intel(R) X520 82599 (Dual SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
+ "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
+ "Intel(R) X520-1 82599EN (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
+ "Intel(R) X520-4 82599 (Quad SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
+ "Intel(R) X520-Q1 82599 (QSFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
+ "Intel(R) X540-AT2"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
+ "Intel(R) X552 (KR Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
+ "Intel(R) X552 (KX4 Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
+ "Intel(R) X552/X557-AT (10GBASE-T)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
+ "Intel(R) X552 (1000BASE-T)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
+ "Intel(R) X552 (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
+ "Intel(R) X553 (KR Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
+ "Intel(R) X553 L (KR Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
+ "Intel(R) X553 (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
+ "Intel(R) X553 N (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
+ "Intel(R) X553 (1GbE SGMII)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
+ "Intel(R) X553 L (1GbE SGMII)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
+ "Intel(R) X553/X557-AT (10GBASE-T)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
+ "Intel(R) X553 (1GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
+ "Intel(R) X553 L (1GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
+ "Intel(R) X540-T2 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
+ "Intel(R) X520 82599 (Bypass)"),
/* required last entry */
- PVID_END
+ PVID_END
};
static void *ixgbe_register(device_t);
@@ -127,8 +168,10 @@ static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
static void ixgbe_if_multi_set(if_ctx_t);
static int ixgbe_if_promisc_set(if_ctx_t, int);
-static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
-static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
static void ixgbe_if_queues_free(if_ctx_t);
static void ixgbe_if_timer(if_ctx_t, uint16_t);
static void ixgbe_if_update_admin_status(if_ctx_t);
@@ -172,6 +215,7 @@ static void ixgbe_add_media_types(if_ctx_t);
static void ixgbe_update_stats_counters(struct ixgbe_softc *);
static void ixgbe_config_link(if_ctx_t);
static void ixgbe_get_slot_info(struct ixgbe_softc *);
+static void ixgbe_fw_mode_timer(void *);
static void ixgbe_check_wol_support(struct ixgbe_softc *);
static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
@@ -203,6 +247,7 @@ static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
/* Deferred interrupt tasklets */
static void ixgbe_handle_msf(void *);
@@ -316,7 +361,8 @@ static int ixgbe_smart_speed = ixgbe_smart_speed_on;
* but this allows it to be forced off for testing.
*/
static int ixgbe_enable_msix = 1;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
+ 0,
"Enable MSI-X interrupts");
/*
@@ -334,12 +380,14 @@ SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
* so we'll default to turning it off.
*/
static int ixgbe_enable_fdir = 0;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
+ 0,
"Enable Flow Director");
/* Receive-Side Scaling */
static int ixgbe_enable_rss = 1;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
+ 0,
"Enable Receive-Side Scaling (RSS)");
/*
@@ -349,7 +397,8 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
* traffic for that interrupt vector
*/
static int ixgbe_enable_aim = false;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
+ 0,
"Enable adaptive interrupt moderation");
#if 0
@@ -405,9 +454,9 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int ntxqs, int ntxqsets)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = sc->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
- int i, j, error;
+ int i, j, error;
MPASS(sc->num_tx_queues > 0);
MPASS(sc->num_tx_queues == ntxqsets);
@@ -415,8 +464,8 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
sc->tx_queues =
- (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
- M_IXGBE, M_NOWAIT | M_ZERO);
+ (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
+ ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
if (!sc->tx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -427,20 +476,20 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
struct tx_ring *txr = &que->txr;
/* In case SR-IOV is enabled, align the index properly */
- txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
- i);
+ txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
txr->sc = que->sc = sc;
/* Allocate report status array */
- txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
+ txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
+ scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
if (txr->tx_rsq == NULL) {
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
- /* get the virtual and physical address of the hardware queues */
+ /* get virtual and physical address of the hardware queues */
txr->tail = IXGBE_TDT(txr->me);
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
txr->tx_paddr = paddrs[i];
@@ -472,9 +521,9 @@ static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int nrxqs, int nrxqsets)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que;
- int i;
+ int i;
MPASS(sc->num_rx_queues > 0);
MPASS(sc->num_rx_queues == nrxqsets);
@@ -483,7 +532,7 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
sc->rx_queues =
(struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
- M_IXGBE, M_NOWAIT | M_ZERO);
+ M_IXGBE, M_NOWAIT | M_ZERO);
if (!sc->rx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -494,8 +543,7 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
struct rx_ring *rxr = &que->rxr;
/* In case SR-IOV is enabled, align the index properly */
- rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
- i);
+ rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
rxr->sc = que->sc = sc;
@@ -519,10 +567,10 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
static void
ixgbe_if_queues_free(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_tx_queue *tx_que = sc->tx_queues;
struct ix_rx_queue *rx_que = sc->rx_queues;
- int i;
+ int i;
if (tx_que != NULL) {
for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
@@ -550,10 +598,10 @@ static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 reta = 0, mrqc, rss_key[10];
- int queue_id, table_size, index_mult;
- int i, j;
- u32 rss_hash_config;
+ u32 reta = 0, mrqc, rss_key[10];
+ int queue_id, table_size, index_mult;
+ int i, j;
+ u32 rss_hash_config;
if (sc->feat_en & IXGBE_FEATURE_RSS) {
/* Fetch the configured RSS key */
@@ -605,8 +653,8 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
if (i < 128)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
else
- IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
- reta);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_ERETA((i >> 2) - 32), reta);
reta = 0;
}
}
@@ -624,12 +672,12 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
* and so we end up with a mix of 2-tuple and 4-tuple
* traffic.
*/
- rss_hash_config = RSS_HASHTYPE_RSS_IPV4
- | RSS_HASHTYPE_RSS_TCP_IPV4
- | RSS_HASHTYPE_RSS_IPV6
- | RSS_HASHTYPE_RSS_TCP_IPV6
- | RSS_HASHTYPE_RSS_IPV6_EX
- | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
+ rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
+ RSS_HASHTYPE_RSS_TCP_IPV4 |
+ RSS_HASHTYPE_RSS_IPV6 |
+ RSS_HASHTYPE_RSS_TCP_IPV6 |
+ RSS_HASHTYPE_RSS_IPV6_EX |
+ RSS_HASHTYPE_RSS_TCP_IPV6_EX;
}
mrqc = IXGBE_MRQC_RSSEN;
@@ -663,14 +711,14 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
static void
ixgbe_initialize_receive_units(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = sc->shared;
- struct ixgbe_hw *hw = &sc->hw;
- if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
+ struct ixgbe_hw *hw = &sc->hw;
+ if_t ifp = iflib_get_ifp(ctx);
struct ix_rx_queue *que;
- int i, j;
- u32 bufsz, fctrl, srrctl, rxcsum;
- u32 hlreg;
+ int i, j;
+ u32 bufsz, fctrl, srrctl, rxcsum;
+ u32 hlreg;
/*
* Make sure receives are disabled while
@@ -701,7 +749,7 @@ ixgbe_initialize_receive_units(if_ctx_t ctx)
/* Setup the Base and Length of the Rx Descriptor Ring */
for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
- u64 rdba = rxr->rx_paddr;
+ u64 rdba = rxr->rx_paddr;
j = rxr->me;
@@ -743,10 +791,10 @@ ixgbe_initialize_receive_units(if_ctx_t ctx)
}
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR
- | IXGBE_PSRTYPE_UDPHDR
- | IXGBE_PSRTYPE_IPV4HDR
- | IXGBE_PSRTYPE_IPV6HDR;
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
@@ -776,9 +824,9 @@ ixgbe_initialize_receive_units(if_ctx_t ctx)
static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
- if_softc_ctx_t scctx = sc->shared;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
int i;
@@ -819,7 +867,8 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
break;
default:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+ txctrl =
+ IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
break;
}
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
@@ -828,7 +877,8 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
break;
default:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
+ txctrl);
break;
}
@@ -873,12 +923,13 @@ ixgbe_register(device_t dev)
static int
ixgbe_if_attach_pre(if_ctx_t ctx)
{
- struct ixgbe_softc *sc;
- device_t dev;
- if_softc_ctx_t scctx;
+ struct ixgbe_softc *sc;
+ device_t dev;
+ if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
- int error = 0;
- u32 ctrl_ext;
+ int error = 0;
+ u32 ctrl_ext;
+ size_t i;
INIT_DEBUGOUT("ixgbe_attach: begin");
@@ -919,8 +970,10 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
- if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
- device_printf(dev, "Firmware recovery mode detected. Limiting "
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw)) {
+ device_printf(dev,
+ "Firmware recovery mode detected. Limiting "
"functionality.\nRefer to the Intel(R) Ethernet Adapters "
"and Devices User Guide for details on firmware recovery "
"mode.");
@@ -928,8 +981,11 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
- if (hw->mbx.ops.init_params)
- hw->mbx.ops.init_params(hw);
+ /* 82598 Does not support SR-IOV, initialize everything else */
+ if (hw->mac.type >= ixgbe_mac_82599_vf) {
+ for (i = 0; i < sc->num_vfs; i++)
+ hw->mbx.ops[i].init_params(hw);
+ }
hw->allow_unsupported_sfp = allow_unsupported_sfp;
@@ -984,7 +1040,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
error = ixgbe_start_hw(hw);
switch (error) {
case IXGBE_ERR_EEPROM_VERSION:
- device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+ device_printf(dev,
+ "This device is a pre-production adapter/LOM. Please be"
+ " aware there may be issues associated with your"
+ " hardware.\nIf you are experiencing problems please"
+ " contact your Intel or hardware representative who"
+ " provided you with this hardware.\n");
break;
case IXGBE_ERR_SFP_NOT_SUPPORTED:
device_printf(dev, "Unsupported SFP+ Module\n");
@@ -1066,15 +1127,14 @@ static int
ixgbe_if_attach_post(if_ctx_t ctx)
{
device_t dev;
- struct ixgbe_softc *sc;
+ struct ixgbe_softc *sc;
struct ixgbe_hw *hw;
- int error = 0;
+ int error = 0;
dev = iflib_get_dev(ctx);
sc = iflib_get_softc(ctx);
hw = &sc->hw;
-
if (sc->intr_type == IFLIB_INTR_LEGACY &&
(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
device_printf(dev, "Device does not support legacy interrupts");
@@ -1083,10 +1143,11 @@ ixgbe_if_attach_post(if_ctx_t ctx)
}
/* Allocate multicast array memory. */
- sc->mta = malloc(sizeof(*sc->mta) *
- MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
+ sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
+ M_IXGBE, M_NOWAIT);
if (sc->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
+ device_printf(dev,
+ "Can not allocate multicast setup array\n");
error = ENOMEM;
goto err;
}
@@ -1137,6 +1198,17 @@ ixgbe_if_attach_post(if_ctx_t ctx)
/* Add sysctls */
ixgbe_add_device_sysctls(ctx);
+ /* Init recovery mode timer and state variable */
+ if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
+ sc->recovery_mode = 0;
+
+ /* Set up the timer callout */
+ callout_init(&sc->fw_mode_timer, true);
+
+ /* Start the task */
+ callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
+ }
+
return (0);
err:
return (error);
@@ -1155,7 +1227,7 @@ static void
ixgbe_check_wol_support(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u16 dev_caps = 0;
+ u16 dev_caps = 0;
/* Find out WoL support for port */
sc->wol_support = hw->wol_enabled = 0;
@@ -1179,7 +1251,7 @@ ixgbe_check_wol_support(struct ixgbe_softc *sc)
static int
ixgbe_setup_interface(if_ctx_t ctx)
{
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
INIT_DEBUGOUT("ixgbe_setup_interface: begin");
@@ -1205,7 +1277,7 @@ static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_IPACKETS:
@@ -1239,10 +1311,9 @@ ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
static int
ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
- int i;
-
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ int i;
if (hw->phy.ops.read_i2c_byte == NULL)
return (ENXIO);
@@ -1252,7 +1323,8 @@ ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
return (0);
} /* ixgbe_if_i2c_req */
-/* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
+/* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
+ * reinitialized
* @ctx: iflib context
* @event: event code to check
*
@@ -1276,10 +1348,10 @@ ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
static void
ixgbe_add_media_types(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- u64 layer;
+ device_t dev = iflib_get_dev(ctx);
+ u64 layer;
layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
@@ -1299,9 +1371,11 @@ ixgbe_add_media_types(if_ctx_t ctx)
}
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
- layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
+ layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ }
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
@@ -1350,8 +1424,10 @@ ixgbe_add_media_types(if_ctx_t ctx)
ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
}
#endif
- if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
device_printf(dev, "Media supported: 1000baseBX\n");
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
+ }
if (hw->device_id == IXGBE_DEV_ID_82598AT) {
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
@@ -1397,10 +1473,10 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
static void
ixgbe_config_link(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 autoneg, err = 0;
- bool sfp, negotiate;
+ u32 autoneg, err = 0;
+ bool sfp, negotiate;
sfp = ixgbe_is_sfp(hw);
@@ -1461,11 +1537,11 @@ ixgbe_config_link(if_ctx_t ctx)
static void
ixgbe_update_stats_counters(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_hw *hw = &sc->hw;
struct ixgbe_hw_stats *stats = &sc->stats.pf;
- u32 missed_rx = 0, bprc, lxon, lxoff, total;
- u32 lxoffrxc;
- u64 total_missed_rx = 0;
+ u32 missed_rx = 0, bprc, lxon, lxoff, total;
+ u32 lxoffrxc;
+ u64 total_missed_rx = 0;
stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
@@ -1593,8 +1669,8 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
* - jabber count.
*/
IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
- stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
- stats->rjc);
+ stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
+ stats->roc + stats->rjc);
} /* ixgbe_update_stats_counters */
/************************************************************************
@@ -1605,19 +1681,19 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
static void
ixgbe_add_hw_stats(struct ixgbe_softc *sc)
{
- device_t dev = iflib_get_dev(sc->ctx);
- struct ix_rx_queue *rx_que;
- struct ix_tx_queue *tx_que;
+ device_t dev = iflib_get_dev(sc->ctx);
+ struct ix_rx_queue *rx_que;
+ struct ix_tx_queue *tx_que;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct ixgbe_hw_stats *stats = &sc->stats.pf;
- struct sysctl_oid *stat_node, *queue_node;
+ struct ixgbe_hw_stats *stats = &sc->stats.pf;
+ struct sysctl_oid *stat_node, *queue_node;
struct sysctl_oid_list *stat_list, *queue_list;
- int i;
+ int i;
#define QUEUE_NAME_LEN 32
- char namebuf[QUEUE_NAME_LEN];
+ char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
@@ -1627,7 +1703,8 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
- for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
+ for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
+ i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
@@ -1635,11 +1712,13 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
- ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
+ CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
+ ixgbe_sysctl_tdh_handler, "IU",
+ "Transmit Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
- ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
+ CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
+ ixgbe_sysctl_tdt_handler, "IU",
+ "Transmit Descriptor Tail");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &txr->tso_tx, "TSO");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
@@ -1647,7 +1726,8 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
"Queue Packets Transmitted");
}
- for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
+ for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
+ i++, rx_que++) {
struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
@@ -1655,7 +1735,7 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
- CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_UINT | CTLFLAG_RW,
&sc->rx_queues[i], 0,
ixgbe_sysctl_interrupt_rate_handler, "IU",
"Interrupt Rate");
@@ -1663,11 +1743,13 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
CTLFLAG_RD, &(sc->rx_queues[i].irqs),
"irqs on this queue");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
- ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
+ CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
+ ixgbe_sysctl_rdh_handler, "IU",
+ "Receive Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
- ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
+ CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
+ ixgbe_sysctl_rdt_handler, "IU",
+ "Receive Descriptor Tail");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
@@ -1679,7 +1761,6 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
}
/* MAC stats get their own sub node */
-
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
@@ -1789,12 +1870,16 @@ static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
{
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!txr)
return (0);
+
+ if (atomic_load_acq_int(&txr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1812,12 +1897,15 @@ static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
{
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!txr)
return (0);
+ if (atomic_load_acq_int(&txr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1835,12 +1923,15 @@ static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
{
struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!rxr)
return (0);
+ if (atomic_load_acq_int(&rxr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1858,12 +1949,15 @@ static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
{
struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!rxr)
return (0);
+ if (atomic_load_acq_int(&rxr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1884,7 +1978,7 @@ static void
ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1902,7 +1996,7 @@ static void
ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1918,12 +2012,12 @@ ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
{
- if_t ifp = iflib_get_ifp(ctx);
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- struct rx_ring *rxr;
- int i;
- u32 ctrl;
+ struct rx_ring *rxr;
+ int i;
+ u32 ctrl;
/*
@@ -1932,15 +2026,18 @@ ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
* the VFTA and other state, so if there
* have been no vlan's registered do nothing.
*/
- if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
+ if (sc->num_vlans == 0 ||
+ (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
/* Clear the vlan hw flag */
for (i = 0; i < sc->num_rx_queues; i++) {
rxr = &sc->rx_queues[i].rxr;
/* On 82599 the VLAN enable is per/queue in RXDCTL */
if (hw->mac.type != ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
+ ctrl = IXGBE_READ_REG(hw,
+ IXGBE_RXDCTL(rxr->me));
ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
+ ctrl);
}
rxr->vtag_strip = false;
}
@@ -1960,9 +2057,11 @@ ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
rxr = &sc->rx_queues[i].rxr;
/* On 82599 the VLAN enable is per/queue in RXDCTL */
if (hw->mac.type != ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
+ ctrl = IXGBE_READ_REG(hw,
+ IXGBE_RXDCTL(rxr->me));
ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
+ ctrl);
}
rxr->vtag_strip = true;
}
@@ -1999,11 +2098,11 @@ ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
static void
ixgbe_get_slot_info(struct ixgbe_softc *sc)
{
- device_t dev = iflib_get_dev(sc->ctx);
+ device_t dev = iflib_get_dev(sc->ctx);
struct ixgbe_hw *hw = &sc->hw;
- int bus_info_valid = true;
- u32 offset;
- u16 link;
+ int bus_info_valid = true;
+ u32 offset;
+ u16 link;
/* Some devices are behind an internal bridge */
switch (hw->device_id) {
@@ -2073,17 +2172,27 @@ display:
if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
(hw->bus.speed == ixgbe_bus_speed_2500))) {
- device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
- device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
+ device_printf(dev,
+ "PCI-Express bandwidth available for this card"
+ " is not sufficient for optimal performance.\n");
+ device_printf(dev,
+ "For optimal performance a x8 PCIE, or x4 PCIE"
+ " Gen2 slot is required.\n");
}
if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
(hw->bus.speed < ixgbe_bus_speed_8000))) {
- device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
- device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
+ device_printf(dev,
+ "PCI-Express bandwidth available for this card"
+ " is not sufficient for optimal performance.\n");
+ device_printf(dev,
+ "For optimal performance a x8 PCIE Gen3 slot is"
+ " required.\n");
}
} else
- device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
+ device_printf(dev,
+ "Unable to determine slot speed/width. The speed/width"
+ " reported are that of the internal switch.\n");
return;
} /* ixgbe_get_slot_info */
@@ -2096,11 +2205,11 @@ display:
static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que;
- int error, rid, vector = 0;
- char buf[16];
+ int error, rid, vector = 0;
+ char buf[16];
/* Admin Que is vector 0*/
rid = vector + 1;
@@ -2109,11 +2218,13 @@ ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
- IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
+ IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
+ buf);
if (error) {
device_printf(iflib_get_dev(ctx),
- "Failed to allocate que int %d err: %d", i, error);
+ "Failed to allocate que int %d err: %d",
+ i,error);
sc->num_rx_queues = i + 1;
goto fail;
}
@@ -2154,6 +2265,7 @@ ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
{
uint32_t newitr = 0;
struct rx_ring *rxr = &que->rxr;
+ /* FIXME struct tx_ring *txr = ... ->txr; */
/*
* Do Adaptive Interrupt Moderation:
@@ -2169,12 +2281,18 @@ ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
que->eitr_setting = 0;
/* Idle, do nothing */
if (rxr->bytes == 0) {
+ /* FIXME && txr->bytes == 0 */
return;
}
- if ((rxr->bytes) && (rxr->packets)) {
- newitr = (rxr->bytes / rxr->packets);
- }
+ if ((rxr->bytes) && (rxr->packets))
+ newitr = rxr->bytes / rxr->packets;
+ /* FIXME for transmit accounting
+ * if ((txr->bytes) && (txr->packets))
+ * newitr = txr->bytes/txr->packets;
+ * if ((rxr->bytes) && (rxr->packets))
+ * newitr = max(newitr, (rxr->bytes / rxr->packets));
+ */
newitr += 24; /* account for hardware frame, crc */
/* set an upper boundary */
@@ -2197,6 +2315,8 @@ ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
que->eitr_setting = newitr;
/* Reset state */
+ /* FIXME txr->bytes = 0; */
+ /* FIXME txr->packets = 0; */
rxr->bytes = 0;
rxr->packets = 0;
@@ -2210,8 +2330,8 @@ static int
ixgbe_msix_que(void *arg)
{
struct ix_rx_queue *que = arg;
- struct ixgbe_softc *sc = que->sc;
- if_t ifp = iflib_get_ifp(que->sc->ctx);
+ struct ixgbe_softc *sc = que->sc;
+ if_t ifp = iflib_get_ifp(que->sc->ctx);
/* Protect against spurious interrupts */
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
@@ -2237,9 +2357,9 @@ ixgbe_msix_que(void *arg)
static void
ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- int layer;
+ int layer;
INIT_DEBUGOUT("ixgbe_if_media_status: begin");
@@ -2285,6 +2405,9 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
+ break;
}
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
switch (sc->link_speed) {
@@ -2402,9 +2525,9 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
static int
ixgbe_if_media_change(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ifmedia *ifm = iflib_get_media(ctx);
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
ixgbe_link_speed speed = 0;
INIT_DEBUGOUT("ixgbe_if_media_change: begin");
@@ -2446,6 +2569,7 @@ ixgbe_if_media_change(if_ctx_t ctx)
#endif
case IFM_1000_LX:
case IFM_1000_SX:
+ case IFM_1000_BX:
speed |= IXGBE_LINK_SPEED_1GB_FULL;
break;
case IFM_1000_T:
@@ -2496,16 +2620,17 @@ static int
ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
- u32 rctl;
- int mcnt = 0;
+ if_t ifp = iflib_get_ifp(ctx);
+ u32 rctl;
+ int mcnt = 0;
rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
rctl &= (~IXGBE_FCTRL_UPE);
if (if_getflags(ifp) & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
else {
- mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
+ mcnt = min(if_llmaddr_count(ifp),
+ MAX_NUM_MULTICAST_ADDRESSES);
}
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
rctl &= (~IXGBE_FCTRL_MPE);
@@ -2528,10 +2653,10 @@ ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
static int
ixgbe_msix_link(void *arg)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ixgbe_hw *hw = &sc->hw;
- u32 eicr, eicr_mask;
- s32 retval;
+ u32 eicr, eicr_mask;
+ s32 retval;
++sc->link_irq;
@@ -2563,10 +2688,12 @@ ixgbe_msix_link(void *arg)
} else
if (eicr & IXGBE_EICR_ECC) {
device_printf(iflib_get_dev(sc->ctx),
- "Received ECC Err, initiating reset\n");
- hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ "Received ECC Err, initiating reset\n");
+ hw->mac.flags |=
+ ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
ixgbe_reset_hw(hw);
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_ECC);
}
/* Check for over temp condition */
@@ -2583,7 +2710,8 @@ ixgbe_msix_link(void *arg)
if (retval != IXGBE_ERR_OVERTEMP)
break;
device_printf(iflib_get_dev(sc->ctx),
- "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ "\nCRITICAL: OVER TEMP!!"
+ " PHY IS SHUT DOWN!!\n");
device_printf(iflib_get_dev(sc->ctx),
"System shutdown required!\n");
break;
@@ -2594,10 +2722,12 @@ ixgbe_msix_link(void *arg)
if (retval != IXGBE_ERR_OVERTEMP)
break;
device_printf(iflib_get_dev(sc->ctx),
- "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ "\nCRITICAL: OVER TEMP!!"
+ " PHY IS SHUT DOWN!!\n");
device_printf(iflib_get_dev(sc->ctx),
"System shutdown required!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_TS);
break;
}
}
@@ -2631,7 +2761,8 @@ ixgbe_msix_link(void *arg)
/* Check for fan failure */
if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
ixgbe_check_fan_failure(sc, eicr, true);
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* External PHY interrupt */
@@ -2641,7 +2772,8 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
}
- return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
+ return (sc->task_requests != 0) ?
+ FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
} /* ixgbe_msix_link */
/************************************************************************
@@ -2651,8 +2783,11 @@ static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
{
struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
- int error;
- unsigned int reg, usec, rate;
+ int error;
+ unsigned int reg, usec, rate;
+
+ if (atomic_load_acq_int(&que->sc->recovery_mode))
+ return (EPERM);
reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
usec = ((reg & 0x0FF8) >> 3);
@@ -2682,9 +2817,9 @@ ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
static void
ixgbe_add_device_sysctls(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
struct sysctl_oid_list *child;
struct sysctl_ctx_list *ctx_list;
@@ -2693,12 +2828,12 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
/* Sysctls for all devices */
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_flowcntl, "I",
IXGBE_SYSCTL_DESC_SET_FC);
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_advertise, "I",
IXGBE_SYSCTL_DESC_ADV_SPEED);
@@ -2707,35 +2842,54 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
&sc->enable_aim, 0, "Interrupt Moderation");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_first_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for first segment");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_middle_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for middle segment");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_last_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for last segment");
+
#ifdef IXGBE_DEBUG
/* testing sysctls (for all devices) */
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_power_state,
"I", "PCI Power State");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
#endif
/* for X550 series devices */
if (hw->mac.type >= ixgbe_mac_X550)
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
- CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_U16 | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_dmac,
"I", "DMA Coalesce");
/* for WoL-capable devices */
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
- ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ ixgbe_sysctl_wol_enable, "I",
+ "Enable/Disable Wake on LAN");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
- CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_U32 | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_wufc,
"I", "Enable/Disable Wake Up Filters");
}
@@ -2746,24 +2900,25 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
struct sysctl_oid_list *phy_list;
phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
- CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "External PHY sysctls");
phy_list = SYSCTL_CHILDREN(phy_node);
SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
- CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ CTLTYPE_U16 | CTLFLAG_RD,
sc, 0, ixgbe_sysctl_phy_temp,
"I", "Current External PHY Temperature (Celsius)");
SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
"overtemp_occurred",
- CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
ixgbe_sysctl_phy_overtemp_occurred, "I",
"External PHY High Temperature Event Occurred");
}
if (sc->feat_cap & IXGBE_FEATURE_EEE) {
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
}
} /* ixgbe_add_device_sysctls */
@@ -2775,15 +2930,16 @@ static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int rid;
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
rid = PCIR_BAR(0);
sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!(sc->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
+ device_printf(dev,
+ "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
@@ -2810,8 +2966,8 @@ static int
ixgbe_if_detach(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- u32 ctrl_ext;
+ device_t dev = iflib_get_dev(ctx);
+ u32 ctrl_ext;
INIT_DEBUGOUT("ixgbe_detach: begin");
@@ -2827,6 +2983,8 @@ ixgbe_if_detach(if_ctx_t ctx)
ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
+ callout_drain(&sc->fw_mode_timer);
+
ixgbe_free_pci_resources(ctx);
free(sc->mta, M_IXGBE);
@@ -2841,10 +2999,10 @@ ixgbe_if_detach(if_ctx_t ctx)
static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- s32 error = 0;
+ device_t dev = iflib_get_dev(ctx);
+ s32 error = 0;
if (!hw->wol_enabled)
ixgbe_set_phy_power(hw, false);
@@ -2857,8 +3015,9 @@ ixgbe_setup_low_power_mode(if_ctx_t ctx)
IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
/*
- * Clear Wake Up Status register to prevent any previous wakeup
- * events from waking us up immediately after we suspend.
+ * Clear Wake Up Status register to prevent any previous
+ * wakeup events from waking us up immediately after we
+ * suspend.
*/
IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
@@ -2877,7 +3036,8 @@ ixgbe_setup_low_power_mode(if_ctx_t ctx)
ixgbe_if_stop(ctx);
error = hw->phy.ops.enter_lplu(hw);
if (error)
- device_printf(dev, "Error entering LPLU: %d\n", error);
+ device_printf(dev, "Error entering LPLU: %d\n",
+ error);
hw->phy.reset_disable = false;
} else {
/* Just stop for other adapters */
@@ -2927,11 +3087,11 @@ ixgbe_if_suspend(if_ctx_t ctx)
static int
ixgbe_if_resume(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 wus;
+ u32 wus;
INIT_DEBUGOUT("ixgbe_resume: begin");
@@ -3035,17 +3195,17 @@ ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
void
ixgbe_if_init(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
- device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *rx_que;
struct ix_tx_queue *tx_que;
- u32 txdctl, mhadd;
- u32 rxdctl, rxctrl;
- u32 ctrl_ext;
+ u32 txdctl, mhadd;
+ u32 rxdctl, rxctrl;
+ u32 ctrl_ext;
- int i, j, err;
+ int i, j, err;
INIT_DEBUGOUT("ixgbe_if_init: begin");
@@ -3094,7 +3254,8 @@ ixgbe_if_init(if_ctx_t ctx)
}
/* Now enable all the queues */
- for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
+ for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
+ i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
@@ -3112,7 +3273,8 @@ ixgbe_if_init(if_ctx_t ctx)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
}
- for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
+ for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
+ i++, rx_que++) {
struct rx_ring *rxr = &rx_que->rxr;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
@@ -3268,7 +3430,7 @@ ixgbe_configure_ivars(struct ixgbe_softc *sc)
{
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que = sc->tx_queues;
- u32 newitr;
+ u32 newitr;
if (ixgbe_max_interrupt_rate > 0)
newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
@@ -3307,16 +3469,16 @@ static void
ixgbe_config_gpie(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 gpie;
+ u32 gpie;
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
if (sc->intr_type == IFLIB_INTR_MSIX) {
/* Enable Enhanced MSI-X mode */
- gpie |= IXGBE_GPIE_MSIX_MODE
- | IXGBE_GPIE_EIAME
- | IXGBE_GPIE_PBA_SUPPORT
- | IXGBE_GPIE_OCD;
+ gpie |= IXGBE_GPIE_MSIX_MODE |
+ IXGBE_GPIE_EIAME |
+ IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD;
}
/* Fan Failure Interrupt */
@@ -3353,7 +3515,7 @@ static void
ixgbe_config_delay_values(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 rxpb, frame, size, tmp;
+ u32 rxpb, frame, size, tmp;
frame = sc->max_frame_size;
@@ -3413,19 +3575,20 @@ ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
static void
ixgbe_if_multi_set(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_mc_addr *mta;
- if_t ifp = iflib_get_ifp(ctx);
- u8 *update_ptr;
- u32 fctrl;
- u_int mcnt;
+ if_t ifp = iflib_get_ifp(ctx);
+ u8 *update_ptr;
+ u32 fctrl;
+ u_int mcnt;
IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
mta = sc->mta;
bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
- mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
+ mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
+ sc);
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
update_ptr = (u8 *)mta;
@@ -3494,6 +3657,35 @@ ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
} /* ixgbe_if_timer */
/************************************************************************
+ * ixgbe_fw_mode_timer - FW mode timer routine
+ ************************************************************************/
+static void
+ixgbe_fw_mode_timer(void *arg)
+{
+ struct ixgbe_softc *sc = arg;
+ struct ixgbe_hw *hw = &sc->hw;
+
+ if (ixgbe_fw_recovery_mode(hw)) {
+ if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
+ /* Firmware error detected, entering recovery mode */
+ device_printf(sc->dev,
+ "Firmware recovery mode detected. Limiting"
+ " functionality. Refer to the Intel(R) Ethernet"
+ " Adapters and Devices User Guide for details on"
+ " firmware recovery mode.\n");
+
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(sc->ctx);
+ }
+ } else
+ atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
+
+
+ callout_reset(&sc->fw_mode_timer, hz,
+ ixgbe_fw_mode_timer, sc);
+} /* ixgbe_fw_mode_timer */
+
+/************************************************************************
* ixgbe_sfp_probe
*
* Determine if a port had optics inserted.
@@ -3501,10 +3693,10 @@ ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
static bool
ixgbe_sfp_probe(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- bool result = false;
+ device_t dev = iflib_get_dev(ctx);
+ bool result = false;
if ((hw->phy.type == ixgbe_phy_nl) &&
(hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
@@ -3514,7 +3706,8 @@ ixgbe_sfp_probe(if_ctx_t ctx)
ret = hw->phy.ops.reset(hw);
sc->sfp_probe = false;
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev, "Unsupported SFP+ module detected!");
+ device_printf(dev,
+ "Unsupported SFP+ module detected!");
device_printf(dev,
"Reload driver with supported module.\n");
goto out;
@@ -3534,11 +3727,11 @@ out:
static void
ixgbe_handle_mod(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- u32 err, cage_full = 0;
+ device_t dev = iflib_get_dev(ctx);
+ u32 err, cage_full = 0;
if (sc->hw.need_crosstalk_fix) {
switch (hw->mac.type) {
@@ -3590,11 +3783,11 @@ handle_mod_out:
static void
ixgbe_handle_msf(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 autoneg;
- bool negotiate;
+ u32 autoneg;
+ bool negotiate;
/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
@@ -3617,14 +3810,16 @@ ixgbe_handle_msf(void *context)
static void
ixgbe_handle_phy(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- int error;
+ int error;
error = hw->phy.ops.handle_lasi(hw);
if (error == IXGBE_ERR_OVERTEMP)
- device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
+ device_printf(sc->dev,
+ "CRITICAL: EXTERNAL PHY OVER TEMP!!"
+ " PHY will downshift to lower power state!\n");
else if (error)
device_printf(sc->dev,
"Error handling LASI interrupt: %d\n", error);
@@ -3639,7 +3834,7 @@ ixgbe_handle_phy(void *context)
static void
ixgbe_if_stop(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
@@ -3673,7 +3868,7 @@ static void
ixgbe_if_update_admin_status(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
if (sc->link_up) {
if (sc->link_active == false) {
@@ -3725,7 +3920,7 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
static void
ixgbe_config_dmac(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_hw *hw = &sc->hw;
struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
@@ -3751,10 +3946,10 @@ ixgbe_config_dmac(struct ixgbe_softc *sc)
void
ixgbe_if_enable_intr(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *que = sc->rx_queues;
- u32 mask, fwsm;
+ u32 mask, fwsm;
mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
@@ -3857,7 +4052,8 @@ ixgbe_if_disable_intr(if_ctx_t ctx)
static void
ixgbe_link_intr_enable(if_ctx_t ctx)
{
- struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
+ struct ixgbe_hw *hw =
+ &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
/* Re-enable other interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
@@ -3869,7 +4065,7 @@ ixgbe_link_intr_enable(if_ctx_t ctx)
static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = &sc->rx_queues[rxqid];
ixgbe_enable_queue(sc, que->msix);
@@ -3884,8 +4080,8 @@ static void
ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u64 queue = 1ULL << vector;
- u32 mask;
+ u64 queue = 1ULL << vector;
+ u32 mask;
if (hw->mac.type == ixgbe_mac_82598EB) {
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
@@ -3907,8 +4103,8 @@ static void
ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u64 queue = 1ULL << vector;
- u32 mask;
+ u64 queue = 1ULL << vector;
+ u32 mask;
if (hw->mac.type == ixgbe_mac_82598EB) {
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
@@ -3929,11 +4125,11 @@ ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
int
ixgbe_intr(void *arg)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = sc->rx_queues;
- struct ixgbe_hw *hw = &sc->hw;
- if_ctx_t ctx = sc->ctx;
- u32 eicr, eicr_mask;
+ struct ixgbe_hw *hw = &sc->hw;
+ if_ctx_t ctx = sc->ctx;
+ u32 eicr, eicr_mask;
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
@@ -3944,11 +4140,12 @@ ixgbe_intr(void *arg)
}
/* Check for fan failure */
- if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+ if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
(eicr & IXGBE_EICR_GPI_SDP1)) {
device_printf(sc->dev,
"\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS,
+ IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* Link status change */
@@ -3992,8 +4189,8 @@ static void
ixgbe_free_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ix_rx_queue *que = sc->rx_queues;
- device_t dev = iflib_get_dev(ctx);
+ struct ix_rx_queue *que = sc->rx_queues;
+ device_t dev = iflib_get_dev(ctx);
/* Release all MSI-X queue resources */
if (sc->intr_type == IFLIB_INTR_MSIX)
@@ -4019,7 +4216,7 @@ static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc;
- int error, fc;
+ int error, fc;
sc = (struct ixgbe_softc *)arg1;
fc = sc->hw.fc.current_mode;
@@ -4084,8 +4281,8 @@ static void
ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- struct rx_ring *rxr;
- u32 srrctl;
+ struct rx_ring *rxr;
+ u32 srrctl;
for (int i = 0; i < sc->num_rx_queues; i++) {
rxr = &sc->rx_queues[i].rxr;
@@ -4097,8 +4294,9 @@ ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
/* enable drop for each vf */
for (int i = 0; i < sc->num_vfs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
- IXGBE_QDE_ENABLE));
+ (IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
}
} /* ixgbe_enable_rx_drop */
@@ -4109,8 +4307,8 @@ static void
ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- struct rx_ring *rxr;
- u32 srrctl;
+ struct rx_ring *rxr;
+ u32 srrctl;
for (int i = 0; i < sc->num_rx_queues; i++) {
rxr = &sc->rx_queues[i].rxr;
@@ -4135,9 +4333,12 @@ static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc;
- int error, advertise;
+ int error, advertise;
sc = (struct ixgbe_softc *)arg1;
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
+
advertise = sc->advertise;
error = sysctl_handle_int(oidp, &advertise, 0, req);
@@ -4162,12 +4363,12 @@ ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
static int
ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
{
- device_t dev = iflib_get_dev(sc->ctx);
- struct ixgbe_hw *hw;
+ device_t dev = iflib_get_dev(sc->ctx);
+ struct ixgbe_hw *hw;
ixgbe_link_speed speed = 0;
ixgbe_link_speed link_caps = 0;
- s32 err = IXGBE_NOT_IMPLEMENTED;
- bool negotiate = false;
+ s32 err = IXGBE_NOT_IMPLEMENTED;
+ bool negotiate = false;
/* Checks to validate new value */
if (sc->advertise == advertise) /* no change */
@@ -4181,12 +4382,16 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber))) {
- device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
+ device_printf(dev,
+ "Advertised speed can only be set on copper or multispeed"
+ " fiber media types.\n");
return (EINVAL);
}
if (advertise < 0x1 || advertise > 0x3F) {
- device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
+ device_printf(dev,
+ "Invalid advertised speed; valid modes are 0x1 through"
+ " 0x3F\n");
return (EINVAL);
}
@@ -4194,7 +4399,9 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
&negotiate);
if (err != IXGBE_SUCCESS) {
- device_printf(dev, "Unable to determine supported advertise speeds\n");
+ device_printf(dev,
+ "Unable to determine supported advertise speeds"
+ "\n");
return (ENODEV);
}
}
@@ -4202,42 +4409,54 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
/* Set new value and report new advertised mode */
if (advertise & 0x1) {
if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
- device_printf(dev, "Interface does not support 100Mb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 100Mb advertised"
+ " speed\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_100_FULL;
}
if (advertise & 0x2) {
if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
- device_printf(dev, "Interface does not support 1Gb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 1Gb advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_1GB_FULL;
}
if (advertise & 0x4) {
if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
- device_printf(dev, "Interface does not support 10Gb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 10Gb advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_10GB_FULL;
}
if (advertise & 0x8) {
if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
- device_printf(dev, "Interface does not support 10Mb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 10Mb advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_10_FULL;
}
if (advertise & 0x10) {
if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
- device_printf(dev, "Interface does not support 2.5G advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 2.5G advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
}
if (advertise & 0x20) {
if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
- device_printf(dev, "Interface does not support 5G advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 5G advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_5GB_FULL;
@@ -4265,11 +4484,11 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
static int
ixgbe_get_default_advertise(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
- int speed;
+ struct ixgbe_hw *hw = &sc->hw;
+ int speed;
ixgbe_link_speed link_caps = 0;
- s32 err;
- bool negotiate = false;
+ s32 err;
+ bool negotiate = false;
/*
* Advertised speed means nothing unless it's copper or
@@ -4319,9 +4538,9 @@ static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- if_t ifp = iflib_get_ifp(sc->ctx);
- int error;
- u16 newval;
+ if_t ifp = iflib_get_ifp(sc->ctx);
+ int error;
+ u16 newval;
newval = sc->dmac;
error = sysctl_handle_16(oidp, &newval, 0, req);
@@ -4374,8 +4593,8 @@ static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- device_t dev = sc->dev;
- int curr_ps, new_ps, error = 0;
+ device_t dev = sc->dev;
+ int curr_ps, new_ps, error = 0;
curr_ps = new_ps = pci_get_powerstate(dev);
@@ -4414,8 +4633,8 @@ ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- int new_wol_enabled;
- int error = 0;
+ int new_wol_enabled;
+ int error = 0;
new_wol_enabled = hw->wol_enabled;
error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
@@ -4454,8 +4673,8 @@ static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- int error = 0;
- u32 new_wufc;
+ int error = 0;
+ u32 new_wufc;
new_wufc = sc->wufc;
@@ -4482,12 +4701,15 @@ ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- struct sbuf *buf;
- int error = 0, reta_size;
- u32 reg;
+ device_t dev = sc->dev;
+ struct sbuf *buf;
+ int error = 0, reta_size;
+ u32 reg;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
@@ -4540,9 +4762,12 @@ ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- u16 reg;
+ u16 reg;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
device_printf(iflib_get_dev(sc->ctx),
@@ -4553,7 +4778,8 @@ ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
device_printf(iflib_get_dev(sc->ctx),
- "Error reading from PHY's current temperature register\n");
+ "Error reading from PHY's current temperature register"
+ "\n");
return (EAGAIN);
}
@@ -4572,9 +4798,12 @@ ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- u16 reg;
+ u16 reg;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
device_printf(iflib_get_dev(sc->ctx),
@@ -4608,10 +4837,13 @@ static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- device_t dev = sc->dev;
- if_t ifp = iflib_get_ifp(sc->ctx);
- int curr_eee, new_eee, error = 0;
- s32 retval;
+ device_t dev = sc->dev;
+ if_t ifp = iflib_get_ifp(sc->ctx);
+ int curr_eee, new_eee, error = 0;
+ s32 retval;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
@@ -4651,17 +4883,54 @@ ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
return (error);
} /* ixgbe_sysctl_eee_state */
+static int
+ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
+{
+ struct ixgbe_softc *sc;
+ u32 reg, val, shift;
+ int error, mask;
+
+ sc = oidp->oid_arg1;
+ switch (oidp->oid_arg2) {
+ case 0:
+ reg = IXGBE_DTXTCPFLGL;
+ shift = 0;
+ break;
+ case 1:
+ reg = IXGBE_DTXTCPFLGL;
+ shift = 16;
+ break;
+ case 2:
+ reg = IXGBE_DTXTCPFLGH;
+ shift = 0;
+ break;
+ default:
+ return (EINVAL);
+ break;
+ }
+ val = IXGBE_READ_REG(&sc->hw, reg);
+ mask = (val >> shift) & 0xfff;
+ error = sysctl_handle_int(oidp, &mask, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (mask < 0 || mask > 0xfff)
+ return (EINVAL);
+ val = (val & ~(0xfff << shift)) | (mask << shift);
+ IXGBE_WRITE_REG(&sc->hw, reg, val);
+ return (0);
+}
+
/************************************************************************
* ixgbe_init_device_features
************************************************************************/
static void
ixgbe_init_device_features(struct ixgbe_softc *sc)
{
- sc->feat_cap = IXGBE_FEATURE_NETMAP
- | IXGBE_FEATURE_RSS
- | IXGBE_FEATURE_MSI
- | IXGBE_FEATURE_MSIX
- | IXGBE_FEATURE_LEGACY_IRQ;
+ sc->feat_cap = IXGBE_FEATURE_NETMAP |
+ IXGBE_FEATURE_RSS |
+ IXGBE_FEATURE_MSI |
+ IXGBE_FEATURE_MSIX |
+ IXGBE_FEATURE_LEGACY_IRQ;
/* Set capabilities first... */
switch (sc->hw.mac.type) {
@@ -4677,15 +4946,20 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
sc->feat_cap |= IXGBE_FEATURE_BYPASS;
break;
case ixgbe_mac_X550:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
sc->feat_cap |= IXGBE_FEATURE_SRIOV;
sc->feat_cap |= IXGBE_FEATURE_FDIR;
break;
case ixgbe_mac_X550EM_x:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_SRIOV;
sc->feat_cap |= IXGBE_FEATURE_FDIR;
+ if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
+ sc->feat_cap |= IXGBE_FEATURE_EEE;
break;
case ixgbe_mac_X550EM_a:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_SRIOV;
sc->feat_cap |= IXGBE_FEATURE_FDIR;
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
@@ -4721,6 +4995,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
/* Thermal Sensor */
if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
+ /* Recovery mode */
+ if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
+ sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
/* Enabled via global sysctl... */
/* Flow Director */
@@ -4728,7 +5005,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->feat_cap & IXGBE_FEATURE_FDIR)
sc->feat_en |= IXGBE_FEATURE_FDIR;
else
- device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
+ device_printf(sc->dev,
+ "Device does not support Flow Director."
+ " Leaving disabled.");
}
/*
* Message Signal Interrupts - Extended (MSI-X)
@@ -4762,7 +5041,8 @@ ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
IXGBE_ESDP_SDP1;
if (reg & mask)
- device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
+ device_printf(sc->dev,
+ "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
} /* ixgbe_check_fan_failure */
/************************************************************************
@@ -4772,14 +5052,43 @@ static void
ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
{
struct ixgbe_nvm_version nvm_ver = {0};
- uint16_t phyfw = 0;
- int status;
const char *space = "";
+ ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
+ ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
- ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
- status = ixgbe_get_phy_firmware_version(hw, &phyfw);
+
+ /* FW version */
+ if ((nvm_ver.phy_fw_maj == 0x0 &&
+ nvm_ver.phy_fw_min == 0x0 &&
+ nvm_ver.phy_fw_id == 0x0) ||
+ (nvm_ver.phy_fw_maj == 0xF &&
+ nvm_ver.phy_fw_min == 0xFF &&
+ nvm_ver.phy_fw_id == 0xF)) {
+ /* If major, minor and id numbers are set to 0,
+ * reading FW version is unsupported. If major number
+ * is set to 0xF, minor is set to 0xFF and id is set
+ * to 0xF, this means that number read is invalid. */
+ } else
+ sbuf_printf(buf, "fw %d.%d.%d ",
+ nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
+ nvm_ver.phy_fw_id);
+
+ /* NVM version */
+ if ((nvm_ver.nvm_major == 0x0 &&
+ nvm_ver.nvm_minor == 0x0 &&
+ nvm_ver.nvm_id == 0x0) ||
+ (nvm_ver.nvm_major == 0xF &&
+ nvm_ver.nvm_minor == 0xFF &&
+ nvm_ver.nvm_id == 0xF)) {
+ /* If major, minor and id numbers are set to 0,
+ * reading NVM version is unsupported. If major number
+ * is set to 0xF, minor is set to 0xFF and id is set
+ * to 0xF, this means that number read is invalid. */
+ } else
+ sbuf_printf(buf, "nvm %x.%02x.%x ",
+ nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
if (nvm_ver.oem_valid) {
sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
@@ -4789,18 +5098,15 @@ ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
if (nvm_ver.or_valid) {
sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
- space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+ space, nvm_ver.or_major, nvm_ver.or_build,
+ nvm_ver.or_patch);
space = " ";
}
if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
- NVM_VER_INVALID)) {
+ NVM_VER_INVALID | 0xFFFFFFFF)) {
sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
- space = " ";
}
-
- if (phyfw != 0 && status == IXGBE_SUCCESS)
- sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
} /* ixgbe_sbuf_fw_version */
/************************************************************************
@@ -4838,7 +5144,7 @@ ixgbe_print_fw_version(if_ctx_t ctx)
static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sbuf *buf;
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index 66a1e4fe2df1..54b2c8c1dd68 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "opt_inet.h"
@@ -58,13 +58,18 @@ static const char ixv_driver_version[] = "2.0.1-k";
************************************************************************/
static const pci_vendor_info_t ixv_vendor_info_array[] =
{
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF,
+ "Intel(R) X520 82599 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF,
+ "Intel(R) X540 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF,
+ "Intel(R) X550 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF,
+ "Intel(R) X552 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
+ "Intel(R) X553 Virtual Function"),
/* required last entry */
-PVID_END
+ PVID_END
};
/************************************************************************
@@ -76,8 +81,10 @@ static int ixv_if_attach_post(if_ctx_t);
static int ixv_if_detach(if_ctx_t);
static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
-static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
-static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
static void ixv_if_queues_free(if_ctx_t);
static void ixv_identify_hardware(if_ctx_t);
static void ixv_init_device_features(struct ixgbe_softc *);
@@ -239,17 +246,17 @@ ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int ntxqs, int ntxqsets)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = sc->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
- int i, j, error;
+ int i, j, error;
MPASS(sc->num_tx_queues == ntxqsets);
MPASS(ntxqs == 1);
/* Allocate queue structure memory */
sc->tx_queues =
- (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
+ ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->tx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -263,13 +270,14 @@ ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
txr->sc = que->sc = sc;
/* Allocate report status array */
- if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
+ scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
- /* get the virtual and physical address of the hardware queues */
+ /* get virtual and physical address of the hardware queues */
txr->tail = IXGBE_VFTDT(txr->me);
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
txr->tx_paddr = paddrs[i*ntxqs];
@@ -299,15 +307,15 @@ ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que;
- int i, error;
+ int i, error;
MPASS(sc->num_rx_queues == nrxqsets);
MPASS(nrxqs == 1);
/* Allocate queue structure memory */
sc->rx_queues =
- (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) *
+ nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->rx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -348,7 +356,7 @@ ixv_if_queues_free(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_tx_queue *que = sc->tx_queues;
- int i;
+ int i;
if (que == NULL)
goto free;
@@ -382,11 +390,11 @@ free:
static int
ixv_if_attach_pre(if_ctx_t ctx)
{
- struct ixgbe_softc *sc;
- device_t dev;
- if_softc_ctx_t scctx;
+ struct ixgbe_softc *sc;
+ device_t dev;
+ if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
- int error = 0;
+ int error = 0;
INIT_DEBUGOUT("ixv_attach: begin");
@@ -458,7 +466,7 @@ ixv_if_attach_pre(if_ctx_t ctx)
/* Check if VF was disabled by PF */
error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
if (error) {
- /* PF is not capable of controlling VF state. Enable the link. */
+ /* PF is not capable of controlling VF state. Enable link. */
sc->link_enabled = true;
}
@@ -522,8 +530,8 @@ static int
ixv_if_attach_post(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int error = 0;
+ device_t dev = iflib_get_dev(ctx);
+ int error = 0;
/* Setup OS specific network interface */
error = ixv_setup_interface(ctx);
@@ -568,7 +576,7 @@ ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- int error = 0;
+ int error = 0;
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
@@ -596,9 +604,9 @@ ixv_if_init(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
+ int error = 0;
INIT_DEBUGOUT("ixv_if_init: begin");
hw->adapter_stopped = false;
@@ -670,8 +678,8 @@ static inline void
ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 queue = 1 << vector;
- u32 mask;
+ u32 queue = 1 << vector;
+ u32 mask;
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@@ -684,8 +692,8 @@ static inline void
ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
@@ -699,7 +707,7 @@ static int
ixv_msix_que(void *arg)
{
struct ix_rx_queue *que = arg;
- struct ixgbe_softc *sc = que->sc;
+ struct ixgbe_softc *sc = que->sc;
ixv_disable_queue(sc, que->msix);
++que->irqs;
@@ -713,9 +721,9 @@ ixv_msix_que(void *arg)
static int
ixv_msix_mbx(void *arg)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ixgbe_hw *hw = &sc->hw;
- u32 reg;
+ u32 reg;
++sc->link_irq;
@@ -811,11 +819,13 @@ static int
ixv_negotiate_api(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- int mbx_api[] = { ixgbe_mbox_api_12,
- ixgbe_mbox_api_11,
- ixgbe_mbox_api_10,
- ixgbe_mbox_api_unknown };
- int i = 0;
+ int mbx_api[] = {
+ ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ ixgbe_mbox_api_unknown
+ };
+ int i = 0;
while (mbx_api[i] != ixgbe_mbox_api_unknown) {
if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
@@ -830,7 +840,8 @@ ixv_negotiate_api(struct ixgbe_softc *sc)
static u_int
ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
{
- bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+ bcopy(LLADDR(addr),
+ &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
IXGBE_ETH_LENGTH_OF_ADDRESS);
return (++cnt);
@@ -844,11 +855,11 @@ ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
static void
ixv_if_multi_set(if_ctx_t ctx)
{
- u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u8 *update_ptr;
- if_t ifp = iflib_get_ifp(ctx);
- int mcnt = 0;
+ u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ u8 *update_ptr;
+ if_t ifp = iflib_get_ifp(ctx);
+ int mcnt = 0;
IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
@@ -908,8 +919,8 @@ static void
ixv_if_update_admin_status(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- s32 status;
+ device_t dev = iflib_get_dev(ctx);
+ s32 status;
sc->hw.mac.get_link_status = true;
@@ -955,7 +966,7 @@ ixv_if_update_admin_status(if_ctx_t ctx)
static void
ixv_if_stop(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
INIT_DEBUGOUT("ixv_stop: begin\n");
@@ -981,8 +992,8 @@ ixv_if_stop(if_ctx_t ctx)
static void
ixv_identify_hardware(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
/* Save off the information about this board */
@@ -1023,22 +1034,24 @@ static int
ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que;
- int error, rid, vector = 0;
- char buf[16];
+ int error, rid, vector = 0;
+ char buf[16];
for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
- IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
+ IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me,
+ buf);
if (error) {
device_printf(iflib_get_dev(ctx),
- "Failed to allocate que int %d err: %d", i, error);
+ "Failed to allocate que int %d err: %d",
+ i, error);
sc->num_rx_queues = i + 1;
goto fail;
}
@@ -1073,11 +1086,15 @@ ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
*/
if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
int msix_ctrl;
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
+ if (pci_find_cap(dev, PCIY_MSIX, &rid)) {
+ device_printf(dev,
+ "Finding MSIX capability failed\n");
+ } else {
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
}
return (0);
@@ -1098,21 +1115,21 @@ static int
ixv_allocate_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int rid;
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
rid = PCIR_BAR(0);
sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!(sc->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
+ device_printf(dev,
+ "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
- sc->osdep.mem_bus_space_handle =
- rman_get_bushandle(sc->pci_mem);
+ sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem);
sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
return (0);
@@ -1126,7 +1143,7 @@ ixv_free_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = sc->rx_queues;
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
/* Release all MSI-X queue resources */
if (sc->intr_type == IFLIB_INTR_MSIX)
@@ -1153,7 +1170,7 @@ ixv_setup_interface(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = sc->shared;
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
INIT_DEBUGOUT("ixv_setup_interface: begin");
@@ -1175,7 +1192,7 @@ static uint64_t
ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_IPACKETS:
@@ -1219,16 +1236,16 @@ static void
ixv_initialize_transmit_units(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
- if_softc_ctx_t scctx = sc->shared;
+ struct ixgbe_hw *hw = &sc->hw;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = sc->tx_queues;
- int i;
+ int i;
for (i = 0; i < sc->num_tx_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
- u64 tdba = txr->tx_paddr;
- u32 txctrl, txdctl;
- int j = txr->me;
+ u64 tdba = txr->tx_paddr;
+ u32 txctrl, txdctl;
+ int j = txr->me;
/* Set WTHRESH to 8, burst writeback */
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
@@ -1278,10 +1295,10 @@ static void
ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 reta = 0, mrqc, rss_key[10];
- int queue_id;
- int i, j;
- u32 rss_hash_config;
+ u32 reta = 0, mrqc, rss_key[10];
+ int queue_id;
+ int i, j;
+ u32 rss_hash_config;
if (sc->feat_en & IXGBE_FEATURE_RSS) {
/* Fetch the configured RSS key */
@@ -1348,22 +1365,25 @@ ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
- device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
- __func__);
+ device_printf(sc->dev,
+ "%s: RSS_HASHTYPE_RSS_IPV6_EX defined,"
+ " but not supported\n", __func__);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
- device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
- __func__);
+ device_printf(sc->dev,
+ "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined,"
+ " but not supported\n", __func__);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
- device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
- __func__);
+ device_printf(sc->dev,
+ "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined,"
+ " but not supported\n", __func__);
IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
} /* ixv_initialize_rss_mapping */
-
+#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
/************************************************************************
* ixv_initialize_receive_units - Setup receive registers and features.
************************************************************************/
@@ -1371,22 +1391,22 @@ static void
ixv_initialize_receive_units(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx;
- struct ixgbe_hw *hw = &sc->hw;
- if_t ifp = iflib_get_ifp(ctx);
+ if_softc_ctx_t scctx;
+ struct ixgbe_hw *hw = &sc->hw;
+#ifdef DEV_NETMAP
+ if_t ifp = iflib_get_ifp(ctx);
+#endif
struct ix_rx_queue *que = sc->rx_queues;
- u32 bufsz, psrtype;
+ u32 bufsz, psrtype;
- if (if_getmtu(ifp) > ETHERMTU)
- bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- psrtype = IXGBE_PSRTYPE_TCPHDR
- | IXGBE_PSRTYPE_UDPHDR
- | IXGBE_PSRTYPE_IPV4HDR
- | IXGBE_PSRTYPE_IPV6HDR
- | IXGBE_PSRTYPE_L2HDR;
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
if (sc->num_rx_queues > 1)
psrtype |= 1 << 29;
@@ -1395,15 +1415,18 @@ ixv_initialize_receive_units(if_ctx_t ctx)
/* Tell PF our max_frame size */
if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
- device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
+ device_printf(sc->dev,
+ "There is a problem with the PF setup. It is likely the"
+ " receive unit for this VF will not function correctly."
+ "\n");
}
scctx = sc->shared;
for (int i = 0; i < sc->num_rx_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
- u64 rdba = rxr->rx_paddr;
- u32 reg, rxdctl;
- int j = rxr->me;
+ u64 rdba = rxr->rx_paddr;
+ u32 reg, rxdctl;
+ int j = rxr->me;
/* Disable the queue */
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
@@ -1494,10 +1517,10 @@ ixv_initialize_receive_units(if_ctx_t ctx)
static void
ixv_setup_vlan_support(if_ctx_t ctx)
{
- if_t ifp = iflib_get_ifp(ctx);
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 ctrl, vid, vfta, retry;
+ u32 ctrl, vid, vfta, retry;
/*
* We get here thru if_init, meaning
@@ -1568,7 +1591,7 @@ static void
ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1586,7 +1609,7 @@ static void
ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1600,10 +1623,10 @@ ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
static void
ixv_if_enable_intr(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *que = sc->rx_queues;
- u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@@ -1635,7 +1658,7 @@ ixv_if_disable_intr(if_ctx_t ctx)
static int
ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = &sc->rx_queues[rxqid];
ixv_enable_queue(sc, que->rxr.me);
@@ -1655,7 +1678,7 @@ static void
ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 ivar, index;
+ u32 ivar, index;
vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -1805,18 +1828,18 @@ ixv_update_stats(struct ixgbe_softc *sc)
static void
ixv_add_stats_sysctls(struct ixgbe_softc *sc)
{
- device_t dev = sc->dev;
- struct ix_tx_queue *tx_que = sc->tx_queues;
- struct ix_rx_queue *rx_que = sc->rx_queues;
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ device_t dev = sc->dev;
+ struct ix_tx_queue *tx_que = sc->tx_queues;
+ struct ix_rx_queue *rx_que = sc->rx_queues;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct ixgbevf_hw_stats *stats = &sc->stats.vf;
- struct sysctl_oid *stat_node, *queue_node;
- struct sysctl_oid_list *stat_list, *queue_list;
+ struct sysctl_oid *stat_node, *queue_node;
+ struct sysctl_oid_list *stat_list, *queue_list;
#define QUEUE_NAME_LEN 32
- char namebuf[QUEUE_NAME_LEN];
+ char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
@@ -1919,9 +1942,9 @@ ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
static void
ixv_init_device_features(struct ixgbe_softc *sc)
{
- sc->feat_cap = IXGBE_FEATURE_NETMAP
- | IXGBE_FEATURE_VF
- | IXGBE_FEATURE_LEGACY_TX;
+ sc->feat_cap = IXGBE_FEATURE_NETMAP |
+ IXGBE_FEATURE_VF |
+ IXGBE_FEATURE_LEGACY_TX;
/* A tad short on feature flags for VFs, atm. */
switch (sc->hw.mac.type) {
diff --git a/sys/dev/ixgbe/if_sriov.c b/sys/dev/ixgbe/if_sriov.c
index 7cdd287b85bf..1998cdb016f7 100644
--- a/sys/dev/ixgbe/if_sriov.c
+++ b/sys/dev/ixgbe/if_sriov.c
@@ -95,33 +95,33 @@ ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
/* Support functions for SR-IOV/VF management */
static inline void
-ixgbe_send_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
+ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
{
if (vf->flags & IXGBE_VF_CTS)
msg |= IXGBE_VT_MSGTYPE_CTS;
- sc->hw.mbx.ops.write(&sc->hw, &msg, 1, vf->pool);
+ ixgbe_write_mbx(hw, &msg, 1, vf->pool);
}
static inline void
-ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
+ixgbe_send_vf_success(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
{
msg &= IXGBE_VT_MSG_MASK;
- ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_ACK);
+ ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
}
static inline void
-ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
+ixgbe_send_vf_failure(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
{
msg &= IXGBE_VT_MSG_MASK;
- ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_NACK);
+ ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
}
static inline void
ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
{
if (!(vf->flags & IXGBE_VF_CTS))
- ixgbe_send_vf_nack(sc, vf, 0);
+ ixgbe_send_vf_failure(sc, vf, 0);
}
static inline boolean_t
@@ -210,14 +210,14 @@ ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
for (int i = 0; i < sc->num_vfs; i++) {
vf = &sc->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE)
- ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
+ ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
}
} /* ixgbe_ping_all_vfs */
static void
ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
- uint16_t tag)
+ uint16_t tag)
{
struct ixgbe_hw *hw;
uint32_t vmolr, vmvir;
@@ -254,11 +254,21 @@ ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
} /* ixgbe_vf_set_default_vlan */
+static void
+ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
+ uint16_t mbx_size = hw->mbx.size;
+ uint16_t i;
+
+ for (i = 0; i < mbx_size; ++i)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
+} /* ixgbe_clear_vfmbmem */
static boolean_t
ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
{
-
/*
* Frame size compatibility between PF and VF is only a problem on
* 82599-based cards. X540 and later support any combination of jumbo
@@ -271,8 +281,8 @@ ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_UNKNOWN:
/*
- * On legacy (1.0 and older) VF versions, we don't support jumbo
- * frames on either the PF or the VF.
+ * On legacy (1.0 and older) VF versions, we don't support
+ * jumbo frames on either the PF or the VF.
*/
if (sc->max_frame_size > ETHER_MAX_LEN ||
vf->maximum_frame_size > ETHER_MAX_LEN)
@@ -291,8 +301,8 @@ ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
return (true);
/*
- * Jumbo frames only work with VFs if the PF is also using jumbo
- * frames.
+ * Jumbo frames only work with VFs if the PF is also using
+ * jumbo frames.
*/
if (sc->max_frame_size <= ETHER_MAX_LEN)
return (true);
@@ -310,6 +320,8 @@ ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
// XXX clear multicast addresses
ixgbe_clear_rar(&sc->hw, vf->rar_index);
+ ixgbe_clear_vfmbmem(sc, vf);
+ ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
vf->api_ver = IXGBE_API_VER_UNKNOWN;
} /* ixgbe_process_vf_reset */
@@ -362,19 +374,19 @@ ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
vf->pool, true);
- ack = IXGBE_VT_MSGTYPE_ACK;
+ ack = IXGBE_VT_MSGTYPE_SUCCESS;
} else
- ack = IXGBE_VT_MSGTYPE_NACK;
+ ack = IXGBE_VT_MSGTYPE_FAILURE;
ixgbe_vf_enable_transmit(sc, vf);
ixgbe_vf_enable_receive(sc, vf);
vf->flags |= IXGBE_VF_CTS;
- resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
+ resp[0] = IXGBE_VF_RESET | ack;
bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
resp[3] = hw->mac.mc_filter_type;
- hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
+ ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
} /* ixgbe_vf_reset_msg */
@@ -387,12 +399,12 @@ ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
/* Check that the VF has permission to change the MAC address. */
if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
if (ixgbe_validate_mac_addr(mac) != 0) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
@@ -401,7 +413,7 @@ ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
true);
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_mac */
@@ -435,7 +447,7 @@ ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
vmolr |= IXGBE_VMOLR_ROMPE;
IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_mc_addr */
@@ -451,18 +463,18 @@ ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
/* It is illegal to enable vlan tag 0. */
if (tag == 0 && enable != 0) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_vlan */
@@ -477,7 +489,7 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
if (vf_max_size < ETHER_CRC_LEN) {
/* We intentionally ACK invalid LPE requests. */
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
return;
}
@@ -485,7 +497,7 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
/* We intentionally ACK invalid LPE requests. */
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
return;
}
@@ -507,16 +519,16 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_lpe */
static void
ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
- uint32_t *msg)
+ uint32_t *msg)
{
//XXX implement this
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
} /* ixgbe_vf_set_macvlan */
@@ -524,23 +536,23 @@ static void
ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
uint32_t *msg)
{
-
switch (msg[1]) {
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_1_1:
vf->api_ver = msg[1];
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
break;
default:
vf->api_ver = IXGBE_API_VER_UNKNOWN;
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
break;
}
} /* ixgbe_vf_api_negotiate */
static void
-ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
+ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
+ uint32_t *msg)
{
struct ixgbe_hw *hw;
uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
@@ -552,11 +564,11 @@ ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
switch (msg[0]) {
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_UNKNOWN:
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
- resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
+ resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
IXGBE_VT_MSGTYPE_CTS;
num_queues = ixgbe_vf_queues(sc->iov_mode);
@@ -565,16 +577,16 @@ ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
resp[IXGBE_VF_DEF_QUEUE] = 0;
- hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
+ ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
} /* ixgbe_vf_get_queues */
static void
ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
#ifdef KTR
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
#endif
struct ixgbe_hw *hw;
uint32_t msg[IXGBE_VFMAILBOX_SIZE];
@@ -582,7 +594,7 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
hw = &sc->hw;
- error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
+ error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
if (error != 0)
return;
@@ -595,7 +607,7 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
}
if (!(vf->flags & IXGBE_VF_CTS)) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
return;
}
@@ -622,17 +634,16 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
ixgbe_vf_get_queues(sc, vf, msg);
break;
default:
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
}
} /* ixgbe_process_vf_msg */
-
/* Tasklet for handling VF -> PF mailbox messages */
void
ixgbe_handle_mbx(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw;
struct ixgbe_vf *vf;
int i;
@@ -643,13 +654,16 @@ ixgbe_handle_mbx(void *context)
vf = &sc->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE) {
- if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
+ if (hw->mbx.ops[vf->pool].check_for_rst(hw,
+ vf->pool) == 0)
ixgbe_process_vf_reset(sc, vf);
- if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
+ if (hw->mbx.ops[vf->pool].check_for_msg(hw,
+ vf->pool) == 0)
ixgbe_process_vf_msg(ctx, vf);
- if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
+ if (hw->mbx.ops[vf->pool].check_for_ack(hw,
+ vf->pool) == 0)
ixgbe_process_vf_ack(sc, vf);
}
}
@@ -698,8 +712,10 @@ ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
}
sc->num_vfs = num_vfs;
- ixgbe_if_init(sc->ctx);
+ ixgbe_init_mbx_params_pf(&sc->hw);
+
sc->feat_en |= IXGBE_FEATURE_SRIOV;
+ ixgbe_if_init(sc->ctx);
return (retval);
@@ -769,7 +785,7 @@ ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
ixgbe_vf_enable_transmit(sc, vf);
ixgbe_vf_enable_receive(sc, vf);
- ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
+ ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
} /* ixgbe_init_vf */
void
@@ -784,27 +800,27 @@ ixgbe_initialize_iov(struct ixgbe_softc *sc)
/* RMW appropriate registers based on IOV mode */
/* Read... */
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
/* Modify... */
- mrqc &= ~IXGBE_MRQC_MRQE_MASK;
- mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
- gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+ mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
+ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
switch (sc->iov_mode) {
case IXGBE_64_VM:
- mrqc |= IXGBE_MRQC_VMDQRSS64EN;
- mtqc |= IXGBE_MTQC_64VF;
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ mtqc |= IXGBE_MTQC_64VF;
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
- gpie |= IXGBE_GPIE_VTMODE_64;
+ gpie |= IXGBE_GPIE_VTMODE_64;
break;
case IXGBE_32_VM:
- mrqc |= IXGBE_MRQC_VMDQRSS32EN;
- mtqc |= IXGBE_MTQC_32VF;
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ mtqc |= IXGBE_MTQC_32VF;
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
- gpie |= IXGBE_GPIE_VTMODE_32;
+ gpie |= IXGBE_GPIE_VTMODE_32;
break;
default:
panic("Unexpected SR-IOV mode %d", sc->iov_mode);
diff --git a/sys/dev/ixgbe/ix_txrx.c b/sys/dev/ixgbe/ix_txrx.c
index a593cb136760..76c718e2c252 100644
--- a/sys/dev/ixgbe/ix_txrx.c
+++ b/sys/dev/ixgbe/ix_txrx.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#ifndef IXGBE_STANDALONE_BUILD
#include "opt_inet.h"
@@ -80,7 +80,7 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
{
uint32_t vlan_macip_lens, type_tucmd_mlhl;
uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
- u8 ehdrlen;
+ u8 ehdrlen;
offload = true;
olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
@@ -105,9 +105,12 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
/* First check if TSO is to be used */
if (pi->ipi_csum_flags & CSUM_TSO) {
/* This is used in the transmit desc in encap */
- pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
- mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+ pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen -
+ pi->ipi_tcp_hlen;
+ mss_l4len_idx |=
+ (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |=
+ (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
}
olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
@@ -126,7 +129,8 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
switch (pi->ipi_ipproto) {
case IPPROTO_TCP:
- if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
+ if (pi->ipi_csum_flags &
+ (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
else
offload = false;
@@ -168,17 +172,17 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
static int
ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
- struct ixgbe_softc *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
- struct tx_ring *txr = &que->txr;
- int nsegs = pi->ipi_nsegs;
- bus_dma_segment_t *segs = pi->ipi_segs;
- union ixgbe_adv_tx_desc *txd = NULL;
+ struct ixgbe_softc *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ union ixgbe_adv_tx_desc *txd = NULL;
struct ixgbe_adv_tx_context_desc *TXD;
- int i, j, first, pidx_last;
- uint32_t olinfo_status, cmd, flags;
- qidx_t ntxd;
+ int i, j, first, pidx_last;
+ uint32_t olinfo_status, cmd, flags;
+ qidx_t ntxd;
cmd = (IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
@@ -249,9 +253,9 @@ ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
static void
ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ix_tx_queue *que = &sc->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
+ struct tx_ring *txr = &que->txr;
IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
} /* ixgbe_isc_txd_flush */
@@ -263,14 +267,14 @@ static int
ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct ixgbe_softc *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = &sc->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
- qidx_t processed = 0;
- int updated;
- qidx_t cur, prev, ntxd, rs_cidx;
- int32_t delta;
- uint8_t status;
+ struct tx_ring *txr = &que->txr;
+ qidx_t processed = 0;
+ int updated;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ uint8_t status;
rs_cidx = txr->tx_rs_cidx;
if (rs_cidx == txr->tx_rs_pidx)
@@ -319,9 +323,9 @@ ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
static void
ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
- struct ixgbe_softc *sc = arg;
- struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct ixgbe_softc *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
+ struct rx_ring *rxr = &que->rxr;
uint64_t *paddrs;
int i;
uint32_t next_pidx, pidx;
@@ -342,11 +346,12 @@ ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
* ixgbe_isc_rxd_flush
************************************************************************/
static void
-ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
+ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused,
+ qidx_t pidx)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct rx_ring *rxr = &que->rxr;
IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
} /* ixgbe_isc_rxd_flush */
@@ -357,12 +362,12 @@ ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pi
static int
ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
{
- struct ixgbe_softc *sc = arg;
- struct ix_rx_queue *que = &sc->rx_queues[qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct ixgbe_softc *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[qsidx];
+ struct rx_ring *rxr = &que->rxr;
union ixgbe_adv_rx_desc *rxd;
- uint32_t staterr;
- int cnt, i, nrxd;
+ uint32_t staterr;
+ int cnt, i, nrxd;
nrxd = sc->shared->isc_nrxd[0];
for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
@@ -391,16 +396,16 @@ ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
static int
ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct ixgbe_softc *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
- struct rx_ring *rxr = &que->rxr;
- union ixgbe_adv_rx_desc *rxd;
+ struct ixgbe_softc *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ union ixgbe_adv_rx_desc *rxd;
- uint16_t pkt_info, len, cidx, i;
- uint32_t ptype;
- uint32_t staterr = 0;
- bool eop;
+ uint16_t pkt_info, len, cidx, i;
+ uint32_t ptype;
+ uint32_t staterr = 0;
+ bool eop;
i = 0;
cidx = ri->iri_cidx;
@@ -425,7 +430,8 @@ ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Make sure bad packets are discarded */
if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
if (sc->feat_en & IXGBE_FEATURE_VF)
- if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS, 1);
+ if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS,
+ 1);
rxr->rx_discarded++;
return (EBADMSG);
@@ -478,7 +484,8 @@ ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
uint8_t errors = (uint8_t)(staterr >> 24);
/* If there is a layer 3 or 4 error we are done */
- if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
+ if (__predict_false(errors &
+ (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */
@@ -492,7 +499,8 @@ ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
ri->iri_csum_flags |= CSUM_SCTP_VALID;
} else {
- ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ ri->iri_csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_data = htons(0xffff);
}
}
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 3dae3aeebaa1..341d4ebfcebc 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
SPDX-License-Identifier: BSD-3-Clause
Copyright (c) 2001-2017, Intel Corporation
@@ -30,7 +30,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -434,6 +434,10 @@ struct ixgbe_softc {
/* Bypass */
struct ixgbe_bp_data bypass;
+ /* Firmware error check */
+ int recovery_mode;
+ struct callout fw_mode_timer;
+
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_header_failed;
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 8c3df0fd4f59..50902c6c356d 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -324,6 +324,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
s32 ret_val;
+ u16 i;
DEBUGFUNC("ixgbe_init_ops_82599");
@@ -385,7 +386,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
& IXGBE_FWSM_MODE_MASK);
- hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+ for (i = 0; i < 64; i++)
+ hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
/* EEPROM */
eeprom->ops.read = ixgbe_read_eeprom_82599;
@@ -433,12 +435,25 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
goto out;
}
+ if (hw->phy.sfp_type == ixgbe_sfp_type_da_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_da_cu_core1) {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = true;
+
+ if (hw->phy.multispeed_fiber)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ goto out;
+ }
+
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not
@@ -1535,7 +1550,7 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
}
/**
- * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
* @input: unique input dword
* @common: compressed common input dword
@@ -1757,7 +1772,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case 0x0000:
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- /* FALLTHROUGH */
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
@@ -2047,7 +2064,9 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
DEBUGOUT(" Error on src/dst port\n");
return IXGBE_ERR_CONFIG;
}
- /* FALLTHROUGH */
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+ break;
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index eded950e2881..4c50f10ed92e 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -904,7 +904,7 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
}
/**
- * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * ixgbe_update_eeprom_checksum - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
@@ -1134,6 +1134,19 @@ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
}
/**
+ * ixgbe_toggle_txdctl - Toggle VF's queues
+ * @hw: pointer to hardware structure
+ * @vind: VMDq pool index
+ *
+ * Enable and disable each queue in VF.
+ */
+s32 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, u32 vind)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.toggle_txdctl, (hw,
+ vind), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_fc_enable - Enable flow control
* @hw: pointer to hardware structure
*
@@ -1417,15 +1430,15 @@ s32 ixgbe_bypass_rw(struct ixgbe_hw *hw, u32 cmd, u32 *status)
/**
* ixgbe_bypass_valid_rd - Verify valid return from bit-bang.
+ * @hw: pointer to hardware structure
+ * @in_reg: The register cmd for the bit-bang read.
+ * @out_reg: The register returned from a bit-bang read.
*
* If we send a write we can't be sure it took until we can read back
* that same register. It can be a problem as some of the fields may
* for valid reasons change inbetween the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
- *
- * @u32 in_reg - The register cmd for the bit-bang read.
- * @u32 out_reg - The register returned from a bit-bang read.
**/
bool ixgbe_bypass_valid_rd(struct ixgbe_hw *hw, u32 in_reg, u32 out_reg)
{
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 9134971d9c98..b81510dacb95 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -131,6 +131,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, u32 *vfta_delta, u32 vfta,
bool vlvf_bypass);
+s32 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, u32 vind);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
@@ -148,7 +149,6 @@ u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
-s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 6c1396ad964f..df7ab90e72ab 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -133,6 +133,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.init_uta_tables = NULL;
mac->ops.enable_rx = ixgbe_enable_rx_generic;
mac->ops.disable_rx = ixgbe_disable_rx_generic;
+ mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic;
/* Flow Control */
mac->ops.fc_enable = ixgbe_fc_enable_generic;
@@ -171,7 +172,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
- /* flow control autoneg black list */
+ /* flow control autoneg block list */
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
@@ -268,8 +269,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
if (ret_val != IXGBE_SUCCESS)
goto out;
- /* only backplane uses autoc */
- /* FALLTHROUGH */
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ break;
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
@@ -713,7 +714,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
return ret_val;
}
- if (length == 0xFFFF || length == 0) {
+ if (length == 0xFFFF || length == 0 || length > hw->eeprom.word_size) {
DEBUGOUT("NVM PBA number section invalid length\n");
return IXGBE_ERR_PBA_SECTION;
}
@@ -1146,10 +1147,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
msec_delay(2);
/*
- * Prevent the PCI-E bus from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -3208,32 +3209,32 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else IXGBE_SUCCESS
+ * is returned signifying primary requests disabled.
**/
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u32 i, poll;
u16 value;
- DEBUGFUNC("ixgbe_disable_pcie_master");
+ DEBUGFUNC("ixgbe_disable_pcie_primary");
/* Always set this bit to ensure any future transactions are blocked */
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
IXGBE_REMOVED(hw->hw_addr))
goto out;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
usec_delay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
@@ -3241,13 +3242,13 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+ DEBUGOUT("GIO Primary Disable bit didn't clear - requesting resets\n");
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
if (hw->mac.type >= ixgbe_mac_X550)
@@ -3269,7 +3270,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"PCIe transaction pending bit also did not clear.\n");
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ status = IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
out:
return status;
@@ -3866,14 +3867,15 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * ixgbe_set_vmdq_san_mac_generic - Associate default VMDq pool index with
+ * a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ *
* This function should only be involved in the IOV mode.
* In IOV mode, Default pool is next pool after the number of
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
- *
- * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @vmdq: VMDq pool index
**/
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
@@ -4137,6 +4139,62 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
+
+/**
+ * ixgbe_toggle_txdctl_generic - Toggle VF's queues
+ * @hw: pointer to hardware structure
+ * @vf_number: VF index
+ *
+ * Enable and disable each queue in VF.
+ */
+s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number)
+{
+ u8 queue_count, i;
+ u32 offset, reg;
+
+ if (vf_number > 63)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * Determine number of queues by checking
+ * number of virtual functions
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
+ case IXGBE_GCR_EXT_VT_MODE_64:
+ queue_count = 2;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_32:
+ queue_count = 4;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_16:
+ queue_count = 8;
+ break;
+ default:
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* Toggle queues */
+ for (i = 0; i < queue_count; ++i) {
+ /* Calculate offset of current queue */
+ offset = queue_count * vf_number + i;
+
+ /* Enable queue */
+ reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
+ reg |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable queue */
+ reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
+ reg &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
/**
* ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
* @hw: pointer to hardware structure
@@ -4778,8 +4836,10 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* configure remaining packet buffers */
- /* FALLTHROUGH */
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
case PBA_STRATEGY_EQUAL:
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
for (; i < num_pb; i++)
@@ -4880,7 +4940,7 @@ static const u8 ixgbe_emc_therm_limit[4] = {
};
/**
- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Returns the thermal sensor data structure
@@ -5148,15 +5208,14 @@ s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
/**
* ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
+ * @in_reg: The register cmd for the bit-bang read.
+ * @out_reg: The register returned from a bit-bang read.
*
* If we send a write we can't be sure it took until we can read back
* that same register. It can be a problem as some of the fields may
* for valid reasons change inbetween the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
- *
- * @u32 in_reg - The register cmd for the bit-bang read.
- * @u32 out_reg - The register returned from a bit-bang read.
**/
bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
{
@@ -5207,7 +5266,7 @@ bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
* ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
*
* @hw: pointer to hardware structure
- * @cmd: The control word we are setting.
+ * @ctrl: The control word we are setting.
* @event: The event we are setting in the FW. This also happens to
* be the mask for the event we are setting (handy)
* @action: The action we set the event to in the FW. This is in a
@@ -5392,6 +5451,103 @@ void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
}
}
+/**
+ * ixgbe_get_nvm_version - Return version of NVM and its components
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * irrelevant component fields will return 0, read errors will return 0xff
+ **/
+void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 word, phy_ver;
+
+ DEBUGFUNC("ixgbe_get_nvm_version");
+
+ memset(nvm_ver, 0, sizeof(struct ixgbe_nvm_version));
+
+ /* eeprom version is mac-type specific */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ /* version of eeprom section */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_82598, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
+ >> NVM_EEP_MAJ_SHIFT);
+ nvm_ver->nvm_minor = ((word & NVM_EEP_MINOR_MASK)
+ >> NVM_EEP_MIN_SHIFT);
+ nvm_ver->nvm_id = (word & NVM_EEP_ID_MASK);
+ break;
+ case ixgbe_mac_X540:
+ /* version of eeprom section */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
+ >> NVM_EEP_MAJ_SHIFT);
+ nvm_ver->nvm_minor = ((word & NVM_EEP_MINOR_MASK)
+ >> NVM_EEP_MIN_SHIFT);
+ nvm_ver->nvm_id = (word & NVM_EEP_ID_MASK);
+ break;
+
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* version of eeprom section */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
+ >> NVM_EEP_MAJ_SHIFT);
+ nvm_ver->nvm_minor = (word & NVM_EEP_X550_MINOR_MASK);
+
+ break;
+ default:
+ break;
+ }
+
+ /* phy version is mac-type specific */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* intel phy firmware version */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->phy_fw_maj = ((word & NVM_PHY_MAJOR_MASK)
+ >> NVM_PHY_MAJ_SHIFT);
+ nvm_ver->phy_fw_min = ((word & NVM_PHY_MINOR_MASK)
+ >> NVM_PHY_MIN_SHIFT);
+ nvm_ver->phy_fw_id = (word & NVM_PHY_ID_MASK);
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_get_etk_id(hw, nvm_ver);
+
+ /* devstarter image */
+ if (ixgbe_read_eeprom(hw, NVM_DS_OFFSET, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->devstart_major = ((word & NVM_DS_MAJOR_MASK) >> NVM_DS_SHIFT);
+ nvm_ver->devstart_minor = (word & NVM_DS_MINOR_MASK);
+
+ /* OEM customization word */
+ if (ixgbe_read_eeprom(hw, NVM_OEM_OFFSET, &nvm_ver->oem_specific))
+ nvm_ver->oem_specific = NVM_VER_INVALID;
+
+ /* vendor (not intel) phy firmware version */
+ if (ixgbe_get_phy_firmware_version(hw, &phy_ver))
+ phy_ver = NVM_VER_INVALID;
+ nvm_ver->phy_vend_maj = ((phy_ver & NVM_PHYVEND_MAJOR_MASK)
+ >> NVM_PHYVEND_SHIFT);
+ nvm_ver->phy_vend_min = (phy_ver & NVM_PHYVEND_MINOR_MASK);
+
+ /* Option Rom may or may not be present. Start with pointer */
+ ixgbe_get_orom_version(hw, nvm_ver);
+ return;
+}
/**
* ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index a55003b4cfe4..a2da9c834f8f 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -118,7 +118,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,6 +141,7 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlvf_bypass);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass);
+s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vind);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -195,6 +196,8 @@ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/sys/dev/ixgbe/ixgbe_dcb.c b/sys/dev/ixgbe/ixgbe_dcb.c
index 0ebc5456eda5..29ee3117edcb 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.c
+++ b/sys/dev/ixgbe/ixgbe_dcb.c
@@ -293,7 +293,7 @@ void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
}
/**
- * ixgbe_dcb_config - Struct containing DCB settings.
+ * ixgbe_dcb_check_config_cee - Struct containing DCB settings.
* @dcb_config: Pointer to DCB config structure
*
* This function checks DCB rules for DCB settings.
diff --git a/sys/dev/ixgbe/ixgbe_dcb.h b/sys/dev/ixgbe/ixgbe_dcb.h
index b31dfae0cdfa..54decd4d081d 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.h
+++ b/sys/dev/ixgbe/ixgbe_dcb.h
@@ -40,9 +40,9 @@
/* DCB defines */
/* DCB credit calculation defines */
#define IXGBE_DCB_CREDIT_QUANTUM 64
-#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
-#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+#define IXGBE_DCB_MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1024 / 64B */
/* 513 for 32KB TSO packet */
#define IXGBE_DCB_MIN_TSO_CREDIT \
diff --git a/sys/dev/ixgbe/ixgbe_features.h b/sys/dev/ixgbe/ixgbe_features.h
index ed35a6ed458c..0cef334a185f 100644
--- a/sys/dev/ixgbe/ixgbe_features.h
+++ b/sys/dev/ixgbe/ixgbe_features.h
@@ -56,6 +56,7 @@
#define IXGBE_FEATURE_EEE (u32)(1 << 11)
#define IXGBE_FEATURE_LEGACY_IRQ (u32)(1 << 12)
#define IXGBE_FEATURE_NEEDS_CTXD (u32)(1 << 13)
+#define IXGBE_FEATURE_RECOVERY_MODE (u32)(1 << 15)
/* Check for OS support. Undefine features if not included in the OS */
#ifndef PCI_IOV
diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c
index d12aadea7097..7f58a9202c9e 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.c
+++ b/sys/dev/ixgbe/ixgbe_mbx.c
@@ -35,6 +35,9 @@
#include "ixgbe_type.h"
#include "ixgbe_mbx.h"
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id);
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id);
+
/**
* ixgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
@@ -47,42 +50,94 @@
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_read_mbx");
/* limit read to size of mailbox */
- if (size > mbx->size)
+ if (size > mbx->size) {
+ ERROR_REPORT3(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %u, changing to %u",
+ size, mbx->size);
size = mbx->size;
+ }
+
+ if (mbx->ops[mbx_id].read)
+ return mbx->ops[mbx_id].read(hw, msg, size, mbx_id);
+
+ return IXGBE_ERR_CONFIG;
+}
+
+/**
+ * ixgbe_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_poll_mbx");
- if (mbx->ops.read)
- ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+ if (!mbx->ops[mbx_id].read || !mbx->ops[mbx_id].check_for_msg ||
+ !mbx->timeout)
+ return IXGBE_ERR_CONFIG;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size) {
+ ERROR_REPORT3(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %u, changing to %u",
+ size, mbx->size);
+ size = mbx->size;
+ }
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ return mbx->ops[mbx_id].read(hw, msg, size, mbx_id);
return ret_val;
}
/**
- * ixgbe_write_mbx - Write a message to the mailbox
+ * ixgbe_write_mbx - Write a message to the mailbox and wait for ACK
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
- * returns SUCCESS if it successfully copied message into the buffer
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ACK to that message within specified period
+ *
+ * Note that the caller to this function must lock before calling, since
+ * multiple threads can destroy each other messages.
**/
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_SUCCESS;
+ s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_write_mbx");
+ /*
+ * exit if either we can't write, release
+ * or there is no timeout defined
+ */
+ if (!mbx->ops[mbx_id].write || !mbx->ops[mbx_id].check_for_ack ||
+ !mbx->ops[mbx_id].release || !mbx->timeout)
+ return IXGBE_ERR_CONFIG;
+
if (size > mbx->size) {
- ret_val = IXGBE_ERR_MBX;
+ ret_val = IXGBE_ERR_PARAM;
ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
- "Invalid mailbox message size %d", size);
- } else if (mbx->ops.write)
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+ "Invalid mailbox message size %u", size);
+ } else {
+ ret_val = mbx->ops[mbx_id].write(hw, msg, size, mbx_id);
+ }
return ret_val;
}
@@ -97,12 +152,12 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_msg");
- if (mbx->ops.check_for_msg)
- ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+ if (mbx->ops[mbx_id].check_for_msg)
+ ret_val = mbx->ops[mbx_id].check_for_msg(hw, mbx_id);
return ret_val;
}
@@ -117,12 +172,12 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_ack");
- if (mbx->ops.check_for_ack)
- ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+ if (mbx->ops[mbx_id].check_for_ack)
+ ret_val = mbx->ops[mbx_id].check_for_ack(hw, mbx_id);
return ret_val;
}
@@ -137,12 +192,32 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_rst");
- if (mbx->ops.check_for_rst)
- ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+ if (mbx->ops[mbx_id].check_for_rst)
+ ret_val = mbx->ops[mbx_id].check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_clear_mbx - Clear Mailbox Memory
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * Set VFMBMEM of given VF to 0x0.
+ **/
+s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_clear_mbx");
+
+ if (mbx->ops[mbx_id].clear)
+ ret_val = mbx->ops[mbx_id].clear(hw, mbx_id);
return ret_val;
}
@@ -161,22 +236,23 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
DEBUGFUNC("ixgbe_poll_for_msg");
- if (!countdown || !mbx->ops.check_for_msg)
- goto out;
+ if (!countdown || !mbx->ops[mbx_id].check_for_msg)
+ return IXGBE_ERR_CONFIG;
- while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ while (countdown && mbx->ops[mbx_id].check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
- if (countdown == 0)
+ if (countdown == 0) {
ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Polling for VF%d mailbox message timedout", mbx_id);
+ "Polling for VF%u mailbox message timedout", mbx_id);
+ return IXGBE_ERR_TIMEOUT;
+ }
-out:
- return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+ return IXGBE_SUCCESS;
}
/**
@@ -193,115 +269,71 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
DEBUGFUNC("ixgbe_poll_for_ack");
- if (!countdown || !mbx->ops.check_for_ack)
- goto out;
+ if (!countdown || !mbx->ops[mbx_id].check_for_ack)
+ return IXGBE_ERR_CONFIG;
- while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ while (countdown && mbx->ops[mbx_id].check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
- if (countdown == 0)
+ if (countdown == 0) {
ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Polling for VF%d mailbox ack timedout", mbx_id);
+ "Polling for VF%u mailbox ack timedout", mbx_id);
+ return IXGBE_ERR_TIMEOUT;
+ }
-out:
- return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+ return IXGBE_SUCCESS;
}
/**
- * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * ixgbe_read_mailbox_vf - read VF's mailbox register
* @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
*
- * returns SUCCESS if it successfully received a message notification and
- * copied it into the receive buffer.
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
**/
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static u32 ixgbe_read_mailbox_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
-
- DEBUGFUNC("ixgbe_read_posted_mbx");
+ u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
- if (!mbx->ops.read)
- goto out;
-
- ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+ vf_mailbox |= hw->mbx.vf_mailbox;
+ hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
- return ret_val;
+ return vf_mailbox;
}
-/**
- * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
- **/
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static void ixgbe_clear_msg_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
-
- DEBUGFUNC("ixgbe_write_posted_mbx");
-
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbe_poll_for_ack(hw, mbx_id);
-out:
- return ret_val;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+ hw->mbx.stats.reqs++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+ }
}
-/**
- * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
- * @hw: pointer to the HW structure
- *
- * Setups up the mailbox read and write message function pointers
- **/
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+static void ixgbe_clear_ack_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+ hw->mbx.stats.acks++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+ }
}
-/**
- * ixgbe_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
- *
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
- **/
-static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+static void ixgbe_clear_rst_vf(struct ixgbe_hw *hw)
{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
-
- return v2p_mailbox;
+ if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+ hw->mbx.stats.rsts++;
+ hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD);
+ }
}
/**
@@ -314,15 +346,12 @@ static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
**/
static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
- u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- if (v2p_mailbox & mask)
- ret_val = IXGBE_SUCCESS;
-
- hw->mbx.v2p_mailbox &= ~mask;
+ if (vf_mailbox & mask)
+ return IXGBE_SUCCESS;
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -334,17 +363,13 @@ static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
**/
static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
-
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_msg_vf");
- if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.reqs++;
- }
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS))
+ return IXGBE_SUCCESS;
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -356,17 +381,16 @@ static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
**/
static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
-
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.acks++;
+ /* TODO: should this be autocleared? */
+ ixgbe_clear_ack_vf(hw);
+ return IXGBE_SUCCESS;
}
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -378,18 +402,17 @@ static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
**/
static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
-
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_rst_vf");
- if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
- IXGBE_VFMAILBOX_RSTI))) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.rsts++;
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD)) {
+ /* TODO: should this be autocleared? */
+ ixgbe_clear_rst_vf(hw);
+ return IXGBE_SUCCESS;
}
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -400,21 +423,115 @@ static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
**/
static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+ if (!mbx->timeout)
+ return IXGBE_ERR_CONFIG;
- /* reserve mailbox for vf use */
- if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = IXGBE_SUCCESS;
+ while (countdown--) {
+ /* Reserve mailbox for VF use */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* Verify that VF is the owner of the lock */
+ if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+ ret_val = IXGBE_SUCCESS;
+ break;
+ }
+
+ /* Wait a bit before trying again */
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (ret_val != IXGBE_SUCCESS) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Failed to obtain mailbox lock");
+ ret_val = IXGBE_ERR_TIMEOUT;
+ }
return ret_val;
}
/**
+ * ixgbe_release_mbx_lock_dummy - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to read
+ **/
+static void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ UNREFERENCED_2PARAMETER(hw, mbx_id);
+
+ DEBUGFUNC("ixgbe_release_mbx_lock_dummy");
+}
+
+/**
+ * ixgbe_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to read
+ **/
+static void ixgbe_release_mbx_lock_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ u32 vf_mailbox;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ DEBUGFUNC("ixgbe_release_mbx_lock_vf");
+
+ /* Return ownership of the buffer */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbe_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_write_mbx_vf_legacy");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_vf(hw, 0);
+ ixgbe_clear_msg_vf(hw);
+ ixgbe_check_for_ack_vf(hw, 0);
+ ixgbe_clear_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
@@ -426,6 +543,7 @@ static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
+ u32 vf_mailbox;
s32 ret_val;
u16 i;
@@ -436,11 +554,11 @@ static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
- goto out_no_write;
+ goto out;
/* flush msg and acks as we are overwriting the message buffer */
- ixgbe_check_for_msg_vf(hw, 0);
- ixgbe_check_for_ack_vf(hw, 0);
+ ixgbe_clear_msg_vf(hw);
+ ixgbe_clear_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -449,15 +567,22 @@ static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* update stats */
hw->mbx.stats.msgs_tx++;
- /* Drop VFU and interrupt the PF to tell it a message has been sent */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+ /* interrupt the PF to tell it a message has been sent */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ ixgbe_poll_for_ack(hw, mbx_id);
+
+out:
+ hw->mbx.ops[mbx_id].release(hw, mbx_id);
-out_no_write:
return ret_val;
}
/**
- * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * ixgbe_read_mbx_vf_legacy - Reads a message from the inbox intended for vf
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -465,19 +590,19 @@ out_no_write:
*
* returns SUCCESS if it successfully read message from buffer
**/
-static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static s32 ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
- s32 ret_val = IXGBE_SUCCESS;
+ s32 ret_val;
u16 i;
- DEBUGFUNC("ixgbe_read_mbx_vf");
+ DEBUGFUNC("ixgbe_read_mbx_vf_legacy");
UNREFERENCED_1PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
- goto out_no_read;
+ return ret_val;
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -489,34 +614,74 @@ static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* update stats */
hw->mbx.stats.msgs_rx++;
-out_no_read:
- return ret_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_vf");
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ /* check if there is a message from PF */
+ ret_val = ixgbe_check_for_msg_vf(hw, 0);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_MBX_NOMSG;
+
+ ixgbe_clear_msg_vf(hw);
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return IXGBE_SUCCESS;
}
/**
* ixgbe_init_mbx_params_vf - set initial values for vf mailbox
* @hw: pointer to the HW structure
*
- * Initializes the hw->mbx struct to correct values for vf mailbox
+ * Initializes single set the hw->mbx struct to correct values for vf mailbox
+ * Set of legacy functions is being used here
*/
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- /* start mailbox as timed out and let the reset_hw call set the timeout
- * value to begin communications */
- mbx->timeout = 0;
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
- mbx->ops.read = ixgbe_read_mbx_vf;
- mbx->ops.write = ixgbe_write_mbx_vf;
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
- mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
- mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
- mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+ /* VF has only one mailbox connection, no need for more IDs */
+ mbx->ops[0].release = ixgbe_release_mbx_lock_dummy;
+ mbx->ops[0].read = ixgbe_read_mbx_vf_legacy;
+ mbx->ops[0].write = ixgbe_write_mbx_vf_legacy;
+ mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
+ mbx->ops[0].clear = NULL;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
@@ -525,62 +690,119 @@ void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
mbx->stats.rsts = 0;
}
+/**
+ * ixgbe_upgrade_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ /* VF has only one mailbox connection, no need for more IDs */
+ mbx->ops[0].release = ixgbe_release_mbx_lock_vf;
+ mbx->ops[0].read = ixgbe_read_mbx_vf;
+ mbx->ops[0].write = ixgbe_write_mbx_vf;
+ mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
+ mbx->ops[0].clear = NULL;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+static void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
+ u32 pfmbicr;
+
+ pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
+
+ if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift))
+ hw->mbx.stats.reqs++;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
+ IXGBE_PFMBICR_VFREQ_VF1 << vf_shift);
+}
+
+static void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
+ u32 pfmbicr;
+
+ pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
+
+ if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift))
+ hw->mbx.stats.acks++;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
+ IXGBE_PFMBICR_VFACK_VF1 << vf_shift);
+}
+
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
- u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
- if (mbvficr & mask) {
- ret_val = IXGBE_SUCCESS;
- IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ if (pfmbicr & mask) {
+ return IXGBE_SUCCESS;
}
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
* ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
- u32 vf_bit = vf_number % 16;
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
DEBUGFUNC("ixgbe_check_for_msg_pf");
- if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.reqs++;
- }
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift,
+ index))
+ return IXGBE_SUCCESS;
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
* ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_id)
{
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
s32 ret_val = IXGBE_ERR_MBX;
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
- u32 vf_bit = vf_number % 16;
DEBUGFUNC("ixgbe_check_for_ack_pf");
- if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift,
index)) {
ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.acks++;
+ /* TODO: should this be autocleared? */
+ ixgbe_clear_ack_pf(hw, vf_id);
}
return ret_val;
@@ -589,28 +811,28 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
/**
* ixgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_id)
{
- u32 reg_offset = (vf_number < 32) ? 0 : 1;
- u32 vf_shift = vf_number % 32;
- u32 vflre = 0;
+ u32 vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id);
+ u32 index = IXGBE_PFVFLRE_INDEX(vf_id);
s32 ret_val = IXGBE_ERR_MBX;
+ u32 vflre = 0;
DEBUGFUNC("ixgbe_check_for_rst_pf");
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
- vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index));
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
case ixgbe_mac_X540:
- vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index));
break;
default:
break;
@@ -618,7 +840,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
if (vflre & (1 << vf_shift)) {
ret_val = IXGBE_SUCCESS;
- IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
@@ -628,121 +850,297 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
/**
* ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
s32 ret_val = IXGBE_ERR_MBX;
- u32 p2v_mailbox;
+ u32 pf_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+ if (!mbx->timeout)
+ return IXGBE_ERR_CONFIG;
- /* reserve mailbox for vf use */
- p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
- if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
- ret_val = IXGBE_SUCCESS;
- else
- ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Failed to obtain mailbox lock for VF%d", vf_number);
+ while (countdown--) {
+ /* Reserve mailbox for PF use */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ /* Check if other thread holds the PF lock already */
+ if (pf_mailbox & IXGBE_PFMAILBOX_PFU)
+ goto retry;
+
+ pf_mailbox |= IXGBE_PFMAILBOX_PFU;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+
+ /* Verify that PF is the owner of the lock */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ if (pf_mailbox & IXGBE_PFMAILBOX_PFU) {
+ ret_val = IXGBE_SUCCESS;
+ break;
+ }
+
+ retry:
+ /* Wait a bit before trying again */
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (ret_val != IXGBE_SUCCESS) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Failed to obtain mailbox lock");
+ ret_val = IXGBE_ERR_TIMEOUT;
+ }
return ret_val;
}
/**
+ * ixgbe_release_mbx_lock_pf - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ **/
+static void ixgbe_release_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u32 pf_mailbox;
+
+ DEBUGFUNC("ixgbe_release_mbx_lock_pf");
+
+ /* Return ownership of the buffer */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ pf_mailbox &= ~IXGBE_PFMAILBOX_PFU;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+}
+
+/**
+ * ixgbe_write_mbx_pf_legacy - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_id: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_mbx_pf_legacy");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
+ if (ret_val)
+ return ret_val;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_id);
+ ixgbe_clear_msg_pf(hw, vf_id);
+ ixgbe_check_for_ack_pf(hw, vf_id);
+ ixgbe_clear_ack_pf(hw, vf_id);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_id)
{
+ u32 pf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_write_mbx_pf");
/* lock the mailbox to prevent pf/vf race condition */
- ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
if (ret_val)
- goto out_no_write;
+ goto out;
/* flush msg and acks as we are overwriting the message buffer */
- ixgbe_check_for_msg_pf(hw, vf_number);
- ixgbe_check_for_ack_pf(hw, vf_number);
+ ixgbe_clear_msg_pf(hw, vf_id);
+ ixgbe_clear_ack_pf(hw, vf_id);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
- /* Interrupt VF to tell it a message has been sent and release buffer*/
- IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+ /* interrupt VF to tell it a message has been sent */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ pf_mailbox |= IXGBE_PFMAILBOX_STS;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ if (msg[0] & IXGBE_VT_MSGTYPE_CTS)
+ ixgbe_poll_for_ack(hw, vf_id);
/* update stats */
hw->mbx.stats.msgs_tx++;
-out_no_write:
+out:
+ hw->mbx.ops[vf_id].release(hw, vf_id);
+
return ret_val;
}
/**
+ * ixgbe_read_mbx_pf_legacy - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_id: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_pf_legacy");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_id)
{
+ u32 pf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_pf");
- /* lock the mailbox to prevent pf/vf race condition */
- ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
- if (ret_val)
- goto out_no_read;
+ /* check if there is a message from VF */
+ ret_val = ixgbe_check_for_msg_pf(hw, vf_id);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_MBX_NOMSG;
+
+ ixgbe_clear_msg_pf(hw, vf_id);
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
- msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
/* Acknowledge the message and release buffer */
- IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ pf_mailbox |= IXGBE_PFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
/* update stats */
hw->mbx.stats.msgs_rx++;
-out_no_read:
- return ret_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_mbx_pf - Clear Mailbox Memory
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Set VFMBMEM of given VF to 0x0.
+ **/
+static s32 ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u16 mbx_size = hw->mbx.size;
+ u16 i;
+
+ if (vf_id > 63)
+ return IXGBE_ERR_PARAM;
+
+ for (i = 0; i < mbx_size; ++i)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf_id - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Initializes single set of the hw->mbx struct to correct values for pf mailbox
+ * Set of legacy functions is being used here
+ */
+void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops[vf_id].release = ixgbe_release_mbx_lock_dummy;
+ mbx->ops[vf_id].read = ixgbe_read_mbx_pf_legacy;
+ mbx->ops[vf_id].write = ixgbe_write_mbx_pf_legacy;
+ mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf;
+ mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf;
}
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
- * Initializes the hw->mbx struct to correct values for pf mailbox
+ * Initializes all sets of the hw->mbx struct to correct values for pf
+ * mailbox. One set corresponds to single VF. It also initializes counters
+ * and general variables. A set of legacy functions is used by default.
*/
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
+ u16 i;
struct ixgbe_mbx_info *mbx = &hw->mbx;
+ /* Ensure we are not calling this function from VF */
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
@@ -750,18 +1148,59 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X540)
return;
- mbx->timeout = 0;
- mbx->usec_delay = 0;
+ /* Initialize common mailbox settings */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+ /* Initialize counters with zeroes */
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ /* No matter of VF number, we initialize params for all 64 VFs. */
+ /* TODO: 1. Add a define for max VF and refactor SHARED to get rid
+ * of magic number for that (63 or 64 depending on use case.)
+ * 2. rewrite the code to dynamically allocate mbx->ops[vf_id] for
+ * certain number of VFs instead of default maximum value of 64 (0..63)
+ */
+ for (i = 0; i < 64; i++)
+ ixgbe_init_mbx_params_pf_id(hw, i);
+}
+
+/**
+ * ixgbe_upgrade_mbx_params_pf - Upgrade initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Initializes the hw->mbx struct to new function set for improved
+ * stability and handling of messages.
+ */
+void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* Ensure we are not calling this function from VF */
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a &&
+ hw->mac.type != ixgbe_mac_X540)
+ return;
+
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
- mbx->ops.read = ixgbe_read_mbx_pf;
- mbx->ops.write = ixgbe_write_mbx_pf;
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
- mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
- mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
- mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+ mbx->ops[vf_id].release = ixgbe_release_mbx_lock_pf;
+ mbx->ops[vf_id].read = ixgbe_read_mbx_pf;
+ mbx->ops[vf_id].write = ixgbe_write_mbx_pf;
+ mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf;
+ mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h
index 47b9327bb896..e6519963242e 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.h
+++ b/sys/dev/ixgbe/ixgbe_mbx.h
@@ -35,10 +35,43 @@
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
-#include "ixgbe_type.h"
+struct ixgbe_hw;
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ void (*release)(struct ixgbe_hw *hw, u16 mbx_id);
+ s32 (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ s32 (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ s32 (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number);
+ s32 (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number);
+ s32 (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number);
+ s32 (*clear)(struct ixgbe_hw *hw, u16 vf_number);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ /*
+ * PF: One set of operations for each VF to handle various API versions
+ * at the same time
+ * VF: Only the very first (0) set should be used
+ */
+ struct ixgbe_mbx_operations ops[64];
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 vf_mailbox;
+ u16 size;
+};
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -60,22 +93,22 @@
#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+#define IXGBE_PFMBICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_PFMBICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_PFMBICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_PFMBICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
*/
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- * clear to send requests */
+#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this
+ * have succeeded */
+#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this
+ * have failed */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -92,6 +125,9 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ /* API 1.4 is being used in the upstream for IPsec */
+ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -153,15 +189,17 @@ enum ixgbevf_xcast_modes {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
-void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
-void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id);
+s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 vf_number);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw);
+void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id);
+void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id);
#endif /* _IXGBE_MBX_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 2fa651df8936..892924712c38 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -33,6 +33,12 @@
#include "ixgbe.h"
+inline device_t
+ixgbe_dev_from_hw(struct ixgbe_hw *hw)
+{
+ return ((struct ixgbe_softc *)hw->back)->dev;
+}
+
inline u16
ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
{
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index eca15f0f3816..cf7c578fd684 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -197,6 +197,7 @@ struct ixgbe_osdep
/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
+device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
/* These routines are needed by the shared code */
extern u16 ixgbe_read_pci_cfg(struct ixgbe_hw *, u32);
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index f4bee6c34f53..2a735ead9a12 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -462,8 +462,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID2:
- case X550_PHY_ID3:
+ case X550_PHY_ID:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -588,7 +587,7 @@ void ixgbe_restart_auto_neg(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * ixgbe_read_phy_reg_mdi - Reads a value from a specified PHY register without
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
@@ -1424,6 +1423,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1437,7 +1443,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
(comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
- (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)) ||
+ (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) ||
+ (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE))
hw->phy.multispeed_fiber = true;
/* Determine PHY vendor */
@@ -1488,7 +1496,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_intel;
break;
default:
- hw->phy.type = ixgbe_phy_sfp_unknown;
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_active_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
@@ -1496,10 +1509,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
IXGBE_SFF_DA_ACTIVE_CABLE)) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_sfp_passive_unknown;
- else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
- hw->phy.type = ixgbe_phy_sfp_active_unknown;
status = IXGBE_SUCCESS;
goto out;
}
@@ -1511,7 +1520,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1530,7 +1541,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
@@ -1613,6 +1626,8 @@ u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ else if (comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
break;
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
@@ -1861,12 +1876,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 1fa2acb77354..c1ba73851397 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -49,6 +49,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -73,6 +74,7 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
diff --git a/sys/dev/ixgbe/ixgbe_rss.h b/sys/dev/ixgbe/ixgbe_rss.h
index c00273587aaa..84c802671195 100644
--- a/sys/dev/ixgbe/ixgbe_rss.h
+++ b/sys/dev/ixgbe/ixgbe_rss.h
@@ -48,6 +48,7 @@
#define RSS_HASHTYPE_RSS_IPV6_EX (1 << 5)
#define RSS_HASHTYPE_RSS_TCP_IPV6_EX (1 << 6)
#define RSS_HASHTYPE_RSS_UDP_IPV4 (1 << 7)
+#define RSS_HASHTYPE_RSS_UDP_IPV4_EX (1 << 8)
#define RSS_HASHTYPE_RSS_UDP_IPV6 (1 << 9)
#define RSS_HASHTYPE_RSS_UDP_IPV6_EX (1 << 10)
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index b8aeaf51f86c..91b46da72c75 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -202,6 +202,10 @@
#define IXGBE_FLA_X550EM_x IXGBE_FLA
#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA)
+#define IXGBE_FLA_FL_SIZE_SHIFT_X540 17
+#define IXGBE_FLA_FL_SIZE_SHIFT_X550 12
+#define IXGBE_FLA_FL_SIZE_MASK_X540 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X540)
+#define IXGBE_FLA_FL_SIZE_MASK_X550 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X550)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -284,6 +288,41 @@
#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+/* NVM component version fields */
+#define NVM_VERSZ_LONG 64
+#define NVM_VERSZ_SHORT 32
+#define NVM_VER_LONG \
+ "DS_%x.%x NVM_%x.%02x.%x PHY_%x.%02x.%x OEM_%04x EtkId_%x OR_%x.%x.%x\n"
+#define NVM_VER_SHORT1 "%02x.%02x %x %x.%x.%x\n"
+#define NVM_VER_SHORT2 "%02x.%02x.%x %x.%02x.%x %x %x.%x.%x\n"
+
+#define NVM_EEP_MAJOR_MASK 0xF000
+#define NVM_EEP_MINOR_MASK 0xFF0
+#define NVM_EEP_ID_MASK 0xF
+#define NVM_EEP_MAJ_SHIFT 12
+#define NVM_EEP_MIN_SHIFT 4
+
+#define NVM_EEP_OFFSET_82598 0x2A
+#define NVM_EEP_OFFSET_X540 0x18
+#define NVM_EEP_X550_MINOR_MASK 0xFF
+#define NVM_EEP_PHY_OFF_X540 0x19
+#define NVM_PHY_MAJOR_MASK 0xF000
+#define NVM_PHY_MINOR_MASK 0xFF0
+#define NVM_PHY_ID_MASK 0xF
+#define NVM_PHY_MAJ_SHIFT 12
+#define NVM_PHY_MIN_SHIFT 4
+
+#define NVM_DS_OFFSET 0x29
+#define NVM_DS_MAJOR_MASK 0xF000
+#define NVM_DS_MINOR_MASK 0xF
+#define NVM_DS_SHIFT 12
+
+#define NVM_OEM_OFFSET 0x2A
+
+#define NVM_PHYVEND_MAJOR_MASK 0xFF00
+#define NVM_PHYVEND_MINOR_MASK 0xFF
+#define NVM_PHYVEND_SHIFT 8
+
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
@@ -344,6 +383,16 @@ struct ixgbe_nvm_version {
u16 or_build;
u8 or_patch;
+ u8 phy_fw_maj;
+ u16 phy_fw_min;
+ u8 phy_fw_id;
+
+ u8 devstart_major;
+ u8 devstart_minor;
+ u16 oem_specific;
+
+ u8 phy_vend_maj;
+ u8 phy_vend_min;
};
/* Interrupt Registers */
@@ -483,8 +532,14 @@ struct ixgbe_nvm_version {
#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
/* 64 Mailboxes, 16 DW each */
#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR_INDEX(_i) ((_i) >> 4)
+#define IXGBE_PFMBICR_SHIFT(_i) ((_i) % 16)
#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFVFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_PFVFLREC(_i) (0x00700 + ((_i) * 4))
+#define IXGBE_PFVFLRE_INDEX(_i) ((_i) >> 5)
+#define IXGBE_PFVFLRE_SHIFT(_i) ((_i) % 32)
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
@@ -1437,6 +1492,7 @@ struct ixgbe_dmac_config {
#define IXGBE_BARCTRL_FLSIZE 0x0700
#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
#define IXGBE_BARCTRL_CSRSIZE 0x2000
+#define IXGBE_BARCTRL_CSRSIZE_SHIFT 13
/* RSCCTL Bit Masks */
#define IXGBE_RSCCTL_RSCEN 0x01
@@ -1482,7 +1538,7 @@ struct ixgbe_dmac_config {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1696,6 +1752,7 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
@@ -1832,7 +1889,7 @@ enum {
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
@@ -2129,7 +2186,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Ena Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2539,8 +2596,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* Check whether address is multicast. This is little-endian specific check.*/
#define IXGBE_IS_MULTICAST(Address) \
@@ -2898,11 +2955,6 @@ enum {
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
-/* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
-#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
-#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Translated register #defines */
#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
@@ -3103,6 +3155,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_SHADOW_RAM_DUMP_LEN 0
#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
#define FW_NVM_DATA_OFFSET 3
+#define FW_ANVM_DATA_OFFSET 3
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
@@ -3174,6 +3227,8 @@ enum ixgbe_fdir_pballoc_type {
#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
+#define IXGBE_SR_IMMEDIATE_VALUES_PTR 0x4E
+
/* Host Interface Command Structures */
#pragma pack(push, 1)
@@ -3409,6 +3464,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_MACLEN_MASK (0x7F << IXGBE_ADVTXD_MACLEN_SHIFT) /* Adv ctxt desc mac len mask */
#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
@@ -3477,6 +3533,8 @@ typedef u64 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
@@ -3749,6 +3807,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -4023,6 +4083,7 @@ struct ixgbe_mac_operations {
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ s32 (*toggle_txdctl)(struct ixgbe_hw *hw, u32 vf_index);
s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*set_rlpml)(struct ixgbe_hw *, u16);
@@ -4176,35 +4237,6 @@ struct ixgbe_phy_info {
#include "ixgbe_mbx.h"
-struct ixgbe_mbx_operations {
- void (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
-};
-
-struct ixgbe_mbx_stats {
- u32 msgs_tx;
- u32 msgs_rx;
-
- u32 acks;
- u32 reqs;
- u32 rsts;
-};
-
-struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
- struct ixgbe_mbx_stats stats;
- u32 timeout;
- u32 usec_delay;
- u32 v2p_mailbox;
- u16 size;
-};
-
struct ixgbe_hw {
u8 IOMEM *hw_addr;
void *back;
@@ -4228,6 +4260,7 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u32 fw_rst_cnt;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4247,7 +4280,7 @@ struct ixgbe_hw {
#define IXGBE_ERR_ADAPTER_STOPPED -9
#define IXGBE_ERR_INVALID_MAC_ADDR -10
#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
#define IXGBE_ERR_RESET_FAILED -15
@@ -4275,6 +4308,9 @@ struct ixgbe_hw {
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
#define IXGBE_ERR_FW_RESP_INVALID -39
#define IXGBE_ERR_TOKEN_RETRY -40
+#define IXGBE_ERR_MBX -41
+#define IXGBE_ERR_MBX_NOMSG -42
+#define IXGBE_ERR_TIMEOUT -43
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index 91df9b7dd1c3..cac3c6b5e5e7 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -49,6 +49,8 @@
**/
s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
{
+ u16 i;
+
/* MAC */
hw->mac.ops.init_hw = ixgbe_init_hw_vf;
hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
@@ -82,7 +84,8 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
hw->mac.max_tx_queues = 1;
hw->mac.max_rx_queues = 1;
- hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+ for (i = 0; i < 64; i++)
+ hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_vf;
return IXGBE_SUCCESS;
}
@@ -185,6 +188,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
+ ixgbe_init_mbx_params_vf(hw);
DEBUGOUT("Issuing a function level reset to MAC\n");
@@ -194,7 +198,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
msec_delay(50);
/* we cannot reset while the RSTI / RSTD bits are asserted */
- while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ while (!mbx->ops[0].check_for_rst(hw, 0) && timeout) {
timeout--;
usec_delay(5);
}
@@ -209,7 +213,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
- mbx->ops.write_posted(hw, msgbuf, 1, 0);
+ ixgbe_write_mbx(hw, msgbuf, 1, 0);
msec_delay(10);
@@ -218,16 +222,16 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
- ret_val = mbx->ops.read_posted(hw, msgbuf,
- IXGBE_VF_PERMADDR_MSG_LEN, 0);
+ ret_val = ixgbe_poll_mbx(hw, msgbuf,
+ IXGBE_VF_PERMADDR_MSG_LEN, 0);
if (ret_val)
return ret_val;
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
- msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_INVALID_MAC_ADDR;
- if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
@@ -324,13 +328,12 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
static s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
u32 *retmsg, u16 size)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+ s32 retval = ixgbe_write_mbx(hw, msg, size, 0);
if (retval)
return retval;
- return mbx->ops.read_posted(hw, retmsg, size, 0);
+ return ixgbe_poll_mbx(hw, retmsg, size, 0);
}
/**
@@ -356,9 +359,9 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if nacked the address was rejected, use "perm_addr" */
+ /* if we had failure, the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
@@ -380,7 +383,6 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next,
bool clear)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 vector;
@@ -412,7 +414,7 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
vector_list[i] = (u16)vector;
}
- return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+ return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
}
/**
@@ -434,6 +436,7 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
/* Fall through */
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_15:
break;
default:
return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
@@ -447,7 +450,7 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return err;
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
return IXGBE_SUCCESS;
}
@@ -470,7 +473,7 @@ s32 ixgbe_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
- if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) {
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
ret_val = IXGBE_ERR_MBX;
} else {
ret_val = IXGBE_SUCCESS;
@@ -503,10 +506,10 @@ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
- if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
+ if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_SUCCESS;
- return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
+ return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE);
}
/**
@@ -571,7 +574,7 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_OUT_OF_MEM;
}
@@ -608,12 +611,13 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = IXGBE_SUCCESS;
- u32 links_reg;
u32 in_msg = 0;
+ u32 links_reg;
+
UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
/* If we were hit with a reset drop the link */
- if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ if (!mbx->ops[0].check_for_rst(hw, 0) || !mbx->timeout)
mac->get_link_status = true;
if (!mac->get_link_status)
@@ -642,7 +646,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.type >= ixgbe_mac_X550) {
+ if (hw->mac.type >= ixgbe_mac_X550_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
}
@@ -652,7 +656,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -660,7 +664,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
case IXGBE_LINKS_SPEED_10_X550EM_A:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
/* Since Reserved in older MAC's */
- if (hw->mac.type >= ixgbe_mac_X550)
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
*speed = IXGBE_LINK_SPEED_10_FULL;
break;
default:
@@ -670,19 +674,22 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
- if (mbx->ops.read(hw, &in_msg, 1, 0))
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = false;
goto out;
+ }
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
- /* msg is not CTS and is NACK we must have lost CTS status */
- if (in_msg & IXGBE_VT_MSGTYPE_NACK)
- ret_val = -1;
+ /* msg is not CTS and is FAILURE we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
+ ret_val = IXGBE_ERR_MBX;
goto out;
}
/* the pf is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
- ret_val = -1;
+ ret_val = IXGBE_ERR_TIMEOUT;
goto out;
}
@@ -713,7 +720,7 @@ s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
if (retval)
return retval;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
- (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_MBX;
return 0;
@@ -739,7 +746,7 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
- if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_SUCCESS)) {
hw->api_version = api;
return 0;
}
@@ -761,6 +768,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_15:
break;
default:
return 0;
@@ -775,11 +783,11 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/*
- * if we we didn't get an ACK there must have been
+ * if we we didn't get a SUCCESS there must have been
* some sort of mailbox error so we should treat it
* as such
*/
- if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* record and validate values from message */
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
index c06a19555a8b..57cec5b52e18 100644
--- a/sys/dev/ixgbe/ixgbe_x540.c
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -62,6 +62,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
s32 ret_val;
+ u16 i;
DEBUGFUNC("ixgbe_init_ops_X540");
@@ -145,7 +146,8 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
& IXGBE_FWSM_MODE_MASK);
- hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+ for (i = 0; i < 64; i++)
+ hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
/* LEDs */
mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
diff --git a/sys/dev/ixgbe/ixgbe_x550.c b/sys/dev/ixgbe/ixgbe_x550.c
index ad7e7abd7a12..7f07190f832c 100644
--- a/sys/dev/ixgbe/ixgbe_x550.c
+++ b/sys/dev/ixgbe/ixgbe_x550.c
@@ -355,8 +355,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
- /* Fallthrough */
-
+ return ixgbe_identify_sfp_module_X550em(hw);
case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_sfp_module_X550em(hw);
break;
@@ -750,7 +749,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
}
/**
- * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
@@ -799,14 +798,8 @@ s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
/* Start with generic X550EM init */
ret_val = ixgbe_init_ops_X550EM(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
- hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
- } else {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
- }
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
@@ -1288,72 +1281,6 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
}
/**
- * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
- * of the IOSF device
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Data to write to the register
- **/
-s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
-{
- struct ixgbe_hic_internal_phy_req write_cmd;
- s32 status;
- UNREFERENCED_1PARAMETER(device_type);
-
- memset(&write_cmd, 0, sizeof(write_cmd));
- write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
- write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
- write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- write_cmd.port_number = hw->bus.lan_id;
- write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
- write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
- write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
-
- status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
- sizeof(write_cmd),
- IXGBE_HI_COMMAND_TIMEOUT, false);
-
- return status;
-}
-
-/**
- * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Pointer to read data from the register
- **/
-s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data)
-{
- union {
- struct ixgbe_hic_internal_phy_req cmd;
- struct ixgbe_hic_internal_phy_resp rsp;
- } hic;
- s32 status;
- UNREFERENCED_1PARAMETER(device_type);
-
- memset(&hic, 0, sizeof(hic));
- hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
- hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
- hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- hic.cmd.port_number = hw->bus.lan_id;
- hic.cmd.command_type = FW_INT_PHY_REQ_READ;
- hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
-
- status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
- sizeof(hic.cmd),
- IXGBE_HI_COMMAND_TIMEOUT, true);
-
- /* Extract the register value from the response. */
- *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
-
- return status;
-}
-
-/**
* ixgbe_disable_mdd_X550
* @hw: pointer to hardware structure
*
@@ -1569,6 +1496,8 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
*linear = false;
break;
case ixgbe_sfp_type_unknown:
@@ -1876,7 +1805,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * ixgbe_get_link_capabilities_X550em - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
@@ -1902,9 +1831,11 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
/* Check if 1G SFP module. */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
- || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return IXGBE_SUCCESS;
}
@@ -1941,7 +1872,9 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
break;
}
}
- /* fall through */
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
default:
*speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
@@ -3236,7 +3169,7 @@ out:
}
/**
- * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
@@ -3661,7 +3594,9 @@ u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
break;
}
}
- /* fall through */
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
case ixgbe_phy_x550em_xfi:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
IXGBE_PHYSICAL_LAYER_1000BASE_KX;
@@ -3704,7 +3639,7 @@ u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_bus_info_x550em - Set PCI bus info
+ * ixgbe_get_bus_info_X550em - Set PCI bus info
* @hw: pointer to hardware structure
*
* Sets bus link width and speed to unknown because X550em is
@@ -3769,7 +3704,7 @@ void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enter_lplu_x550em - Transition to low power states
+ * ixgbe_enter_lplu_t_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
* Configures Low Power Link Up on transition to low power states
@@ -3877,7 +3812,7 @@ s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_lcd_x550em - Determine lowest common denominator
+ * ixgbe_get_lcd_t_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
*
@@ -4311,36 +4246,39 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
- while (--retries) {
- status = IXGBE_SUCCESS;
- if (hmask)
- status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
- if (status) {
- DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
- status);
- return status;
- }
- if (!(mask & IXGBE_GSSR_TOKEN_SM))
- return IXGBE_SUCCESS;
+ status = IXGBE_SUCCESS;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", status);
+ return status;
+ }
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return IXGBE_SUCCESS;
+
+ while (--retries) {
status = ixgbe_get_phy_token(hw);
- if (status == IXGBE_ERR_TOKEN_RETRY)
- DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
- status);
if (status == IXGBE_SUCCESS)
return IXGBE_SUCCESS;
- if (hmask)
- ixgbe_release_swfw_sync_X540(hw, hmask);
-
if (status != IXGBE_ERR_TOKEN_RETRY) {
- DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
- status);
+ DEBUGOUT1("Retry acquiring the PHY token failed, Status = %d\n", status);
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
return status;
}
+
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
}
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+
DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
hw->phy.id);
return status;
diff --git a/sys/dev/ixgbe/ixgbe_x550.h b/sys/dev/ixgbe/ixgbe_x550.h
index 245e128266aa..8bff5e3d2bf6 100644
--- a/sys/dev/ixgbe/ixgbe_x550.h
+++ b/sys/dev/ixgbe/ixgbe_x550.h
@@ -69,10 +69,6 @@ s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
s32 ixgbe_get_phy_token(struct ixgbe_hw *);
s32 ixgbe_put_phy_token(struct ixgbe_hw *);
-s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data);
-s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data);
void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
@@ -107,7 +103,6 @@ s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw);