summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorSimon J. Gerraty <sjg@FreeBSD.org>2014-05-08 23:54:15 +0000
committerSimon J. Gerraty <sjg@FreeBSD.org>2014-05-08 23:54:15 +0000
commitcc3f4b99653c34ae64f8a1fddea370abefef680e (patch)
tree8ce0ddd0e6f508bd20c77429c448969589170fae /sys/dev
parentf974b33f6ed6f35170c520d2be111bfc2c3954cd (diff)
parent16aa1f0950a3b4407a36ecc96acc8ca0710ebd91 (diff)
downloadsrc-test-cc3f4b99653c34ae64f8a1fddea370abefef680e.tar.gz
src-test-cc3f4b99653c34ae64f8a1fddea370abefef680e.zip
Notes
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/ath/ath_hal/ah_devid.h1
-rw-r--r--sys/dev/ath/ath_hal/ar5210/ar5210_power.c9
-rw-r--r--sys/dev/ath/ath_hal/ar5211/ar5211_power.c9
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_power.c9
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_power.c12
-rw-r--r--sys/dev/ath/if_ath.c588
-rw-r--r--sys/dev/ath/if_ath_beacon.c87
-rw-r--r--sys/dev/ath/if_ath_beacon.h2
-rw-r--r--sys/dev/ath/if_ath_debug.h1
-rw-r--r--sys/dev/ath/if_ath_keycache.c16
-rw-r--r--sys/dev/ath/if_ath_led.c9
-rw-r--r--sys/dev/ath/if_ath_misc.h13
-rw-r--r--sys/dev/ath/if_ath_rx.c245
-rw-r--r--sys/dev/ath/if_ath_rx_edma.c65
-rw-r--r--sys/dev/ath/if_ath_sysctl.c25
-rw-r--r--sys/dev/ath/if_ath_tdma.c15
-rw-r--r--sys/dev/ath/if_ath_tx.c82
-rw-r--r--sys/dev/ath/if_ath_tx_edma.c14
-rw-r--r--sys/dev/ath/if_athvar.h24
-rw-r--r--sys/dev/bce/if_bce.c20
-rw-r--r--sys/dev/bce/if_bcefw.h8
-rw-r--r--sys/dev/bce/if_bcereg.h6
-rw-r--r--sys/dev/bxe/57710_init_values.c9
-rw-r--r--sys/dev/bxe/57710_int_offsets.h9
-rw-r--r--sys/dev/bxe/57711_init_values.c9
-rw-r--r--sys/dev/bxe/57711_int_offsets.h9
-rw-r--r--sys/dev/bxe/57712_init_values.c9
-rw-r--r--sys/dev/bxe/57712_int_offsets.h9
-rw-r--r--sys/dev/bxe/bxe.c47
-rw-r--r--sys/dev/bxe/bxe.h9
-rw-r--r--sys/dev/bxe/bxe_dcb.h9
-rw-r--r--sys/dev/bxe/bxe_debug.c9
-rw-r--r--sys/dev/bxe/bxe_elink.c9
-rw-r--r--sys/dev/bxe/bxe_elink.h9
-rw-r--r--sys/dev/bxe/bxe_stats.c9
-rw-r--r--sys/dev/bxe/bxe_stats.h9
-rw-r--r--sys/dev/bxe/ecore_fw_defs.h9
-rw-r--r--sys/dev/bxe/ecore_hsi.h9
-rw-r--r--sys/dev/bxe/ecore_init.h9
-rw-r--r--sys/dev/bxe/ecore_init_ops.h9
-rw-r--r--sys/dev/bxe/ecore_mfw_req.h9
-rw-r--r--sys/dev/bxe/ecore_reg.h9
-rw-r--r--sys/dev/bxe/ecore_sp.c9
-rw-r--r--sys/dev/bxe/ecore_sp.h9
-rw-r--r--sys/dev/drm2/i915/i915_gem.c2
-rw-r--r--sys/dev/drm2/radeon/radeon_drv.c8
-rw-r--r--sys/dev/drm2/radeon/radeon_ioc32.c417
-rw-r--r--sys/dev/gpio/gpio_if.m39
-rw-r--r--sys/dev/gpio/gpiobus.c14
-rw-r--r--sys/dev/gpio/gpiobusvar.h10
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c14
-rw-r--r--sys/dev/lindev/full.c103
-rw-r--r--sys/dev/mpr/mpi/mpi2.h1257
-rw-r--r--sys/dev/mpr/mpi/mpi2_cnfg.h3169
-rw-r--r--sys/dev/mpr/mpi/mpi2_hbd.h152
-rw-r--r--sys/dev/mpr/mpi/mpi2_history.txt619
-rw-r--r--sys/dev/mpr/mpi/mpi2_init.h614
-rw-r--r--sys/dev/mpr/mpi/mpi2_ioc.h1856
-rw-r--r--sys/dev/mpr/mpi/mpi2_ra.h118
-rw-r--r--sys/dev/mpr/mpi/mpi2_raid.h406
-rw-r--r--sys/dev/mpr/mpi/mpi2_sas.h346
-rw-r--r--sys/dev/mpr/mpi/mpi2_targ.h600
-rw-r--r--sys/dev/mpr/mpi/mpi2_tool.h546
-rw-r--r--sys/dev/mpr/mpi/mpi2_type.h131
-rw-r--r--sys/dev/mpr/mpr.c2795
-rw-r--r--sys/dev/mpr/mpr_config.c1302
-rw-r--r--sys/dev/mpr/mpr_ioctl.h386
-rw-r--r--sys/dev/mpr/mpr_mapping.c2269
-rw-r--r--sys/dev/mpr/mpr_mapping.h (renamed from sys/dev/lindev/lindev.c)84
-rw-r--r--sys/dev/mpr/mpr_pci.c350
-rw-r--r--sys/dev/mpr/mpr_sas.c3485
-rw-r--r--sys/dev/mpr/mpr_sas.h168
-rw-r--r--sys/dev/mpr/mpr_sas_lsi.c1218
-rw-r--r--sys/dev/mpr/mpr_table.c516
-rw-r--r--sys/dev/mpr/mpr_table.h (renamed from sys/dev/lindev/lindev.h)29
-rw-r--r--sys/dev/mpr/mpr_user.c2453
-rw-r--r--sys/dev/mpr/mprvar.h766
-rw-r--r--sys/dev/mps/mps_sas.c6
-rw-r--r--sys/dev/mrsas/mrsas.c3672
-rw-r--r--sys/dev/mrsas/mrsas.h2464
-rw-r--r--sys/dev/mrsas/mrsas_cam.c1179
-rw-r--r--sys/dev/mrsas/mrsas_fp.c1451
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.c546
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.h97
-rw-r--r--sys/dev/null/null.c26
-rw-r--r--sys/dev/ofw/ofw_bus.h8
-rw-r--r--sys/dev/ofw/ofw_bus_if.m30
-rw-r--r--sys/dev/pci/pci.c192
-rw-r--r--sys/dev/pci/pci_if.m4
-rw-r--r--sys/dev/pci/pcib_if.m4
-rw-r--r--sys/dev/proto/proto.h63
-rw-r--r--sys/dev/proto/proto_bus_pci.c112
-rw-r--r--sys/dev/proto/proto_core.c384
-rw-r--r--sys/dev/proto/proto_dev.h43
-rw-r--r--sys/dev/sdhci/sdhci_fdt.c16
-rw-r--r--sys/dev/usb/controller/dwc_otg.c1143
-rw-r--r--sys/dev/usb/controller/dwc_otg.h30
-rw-r--r--sys/dev/usb/controller/dwc_otgreg.h10
-rw-r--r--sys/dev/usb/net/if_smsc.c48
-rw-r--r--sys/dev/vt/hw/efifb/efifb.c53
-rw-r--r--sys/dev/vt/hw/fb/vt_early_fb.c80
-rw-r--r--sys/dev/vt/hw/fb/vt_fb.c66
-rw-r--r--sys/dev/vt/hw/fb/vt_fb.h2
-rw-r--r--sys/dev/vt/hw/ofwfb/ofwfb.c1
-rw-r--r--sys/dev/vt/hw/vga/vga.c37
-rw-r--r--sys/dev/vt/vt.h12
-rw-r--r--sys/dev/vt/vt_buf.c5
-rw-r--r--sys/dev/vt/vt_consolectl.c2
-rw-r--r--sys/dev/vt/vt_core.c154
-rw-r--r--sys/dev/vt/vt_sysmouse.c2
110 files changed, 38354 insertions, 1326 deletions
diff --git a/sys/dev/ath/ath_hal/ah_devid.h b/sys/dev/ath/ath_hal/ah_devid.h
index 43d994dcd3ab7..1e4d473072583 100644
--- a/sys/dev/ath/ath_hal/ah_devid.h
+++ b/sys/dev/ath/ath_hal/ah_devid.h
@@ -92,6 +92,7 @@
#define AR9300_DEVID_AR946X_PCIE 0x0034
#define AR9300_DEVID_AR9330 0x0035
#define AR9300_DEVID_QCA9565 0x0036
+#define AR9300_DEVID_AR1111_PCIE 0x0037
#define AR9300_DEVID_QCA955X 0x0039
#define AR_SUBVENDOR_ID_NOG 0x0e11 /* No 11G subvendor ID */
diff --git a/sys/dev/ath/ath_hal/ar5210/ar5210_power.c b/sys/dev/ath/ath_hal/ar5210/ar5210_power.c
index 7e7961fcc38a0..ec5e75d369349 100644
--- a/sys/dev/ath/ath_hal/ar5210/ar5210_power.c
+++ b/sys/dev/ath/ath_hal/ar5210/ar5210_power.c
@@ -108,16 +108,19 @@ ar5210SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode, int setChip)
setChip ? "set chip " : "");
switch (mode) {
case HAL_PM_AWAKE:
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
status = ar5210SetPowerModeAwake(ah, setChip);
break;
case HAL_PM_FULL_SLEEP:
ar5210SetPowerModeSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
case HAL_PM_NETWORK_SLEEP:
ar5210SetPowerModeAuto(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
default:
HALDEBUG(ah, HAL_DEBUG_ANY, "%s: unknown power mode %u\n",
diff --git a/sys/dev/ath/ath_hal/ar5211/ar5211_power.c b/sys/dev/ath/ath_hal/ar5211/ar5211_power.c
index 0ed090215b52c..e646d90b66cab 100644
--- a/sys/dev/ath/ath_hal/ar5211/ar5211_power.c
+++ b/sys/dev/ath/ath_hal/ar5211/ar5211_power.c
@@ -110,16 +110,19 @@ ar5211SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode, int setChip)
setChip ? "set chip " : "");
switch (mode) {
case HAL_PM_AWAKE:
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
status = ar5211SetPowerModeAwake(ah, setChip);
break;
case HAL_PM_FULL_SLEEP:
ar5211SetPowerModeSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
case HAL_PM_NETWORK_SLEEP:
ar5211SetPowerModeNetworkSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
default:
HALDEBUG(ah, HAL_DEBUG_ANY, "%s: unknown power mode %u\n",
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_power.c b/sys/dev/ath/ath_hal/ar5212/ar5212_power.c
index 555632895910a..3068510dd8cd9 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_power.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_power.c
@@ -134,16 +134,19 @@ ar5212SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode, int setChip)
setChip ? "set chip " : "");
switch (mode) {
case HAL_PM_AWAKE:
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
status = ar5212SetPowerModeAwake(ah, setChip);
break;
case HAL_PM_FULL_SLEEP:
ar5212SetPowerModeSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
case HAL_PM_NETWORK_SLEEP:
ar5212SetPowerModeNetworkSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
default:
HALDEBUG(ah, HAL_DEBUG_ANY, "%s: unknown power mode %u\n",
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_power.c b/sys/dev/ath/ath_hal/ar5416/ar5416_power.c
index c70a8e7d5d57f..dff9a855917a4 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_power.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_power.c
@@ -133,23 +133,29 @@ ar5416SetPowerMode(struct ath_hal *ah, HAL_POWER_MODE mode, int setChip)
};
#endif
int status = AH_TRUE;
+
+#if 0
if (!setChip)
return AH_TRUE;
+#endif
HALDEBUG(ah, HAL_DEBUG_POWER, "%s: %s -> %s (%s)\n", __func__,
modes[ah->ah_powerMode], modes[mode], setChip ? "set chip " : "");
switch (mode) {
case HAL_PM_AWAKE:
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
status = ar5416SetPowerModeAwake(ah, setChip);
break;
case HAL_PM_FULL_SLEEP:
ar5416SetPowerModeSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
case HAL_PM_NETWORK_SLEEP:
ar5416SetPowerModeNetworkSleep(ah, setChip);
- ah->ah_powerMode = mode;
+ if (setChip)
+ ah->ah_powerMode = mode;
break;
default:
HALDEBUG(ah, HAL_DEBUG_ANY, "%s: unknown power mode 0x%x\n",
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 6c98e603a835a..a3ab2f61dbfa3 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -165,6 +165,7 @@ static void ath_bmiss_vap(struct ieee80211vap *);
static void ath_bmiss_proc(void *, int);
static void ath_key_update_begin(struct ieee80211vap *);
static void ath_key_update_end(struct ieee80211vap *);
+static void ath_update_mcast_hw(struct ath_softc *);
static void ath_update_mcast(struct ifnet *);
static void ath_update_promisc(struct ifnet *);
static void ath_updateslot(struct ifnet *);
@@ -279,6 +280,164 @@ ath_legacy_attach_comp_func(struct ath_softc *sc)
}
}
+/*
+ * Set the target power mode.
+ *
+ * If this is called during a point in time where
+ * the hardware is being programmed elsewhere, it will
+ * simply store it away and update it when all current
+ * uses of the hardware are completed.
+ */
+void
+_ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line)
+{
+ ATH_LOCK_ASSERT(sc);
+
+ sc->sc_target_powerstate = power_state;
+
+ DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
+ __func__,
+ file,
+ line,
+ power_state,
+ sc->sc_powersave_refcnt);
+
+ if (sc->sc_powersave_refcnt == 0 &&
+ power_state != sc->sc_cur_powerstate) {
+ sc->sc_cur_powerstate = power_state;
+ ath_hal_setpower(sc->sc_ah, power_state);
+
+ /*
+ * If the NIC is force-awake, then set the
+ * self-gen frame state appropriately.
+ *
+ * If the nic is in network sleep or full-sleep,
+ * we let the above call leave the self-gen
+ * state as "sleep".
+ */
+ if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
+ sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
+ ath_hal_setselfgenpower(sc->sc_ah,
+ sc->sc_target_selfgen_state);
+ }
+ }
+}
+
+/*
+ * Set the current self-generated frames state.
+ *
+ * This is separate from the target power mode. The chip may be
+ * awake but the desired state is "sleep", so frames sent to the
+ * destination has PWRMGT=1 in the 802.11 header. The NIC also
+ * needs to know to set PWRMGT=1 in self-generated frames.
+ */
+void
+_ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line)
+{
+
+ ATH_LOCK_ASSERT(sc);
+
+ DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
+ __func__,
+ file,
+ line,
+ power_state,
+ sc->sc_target_selfgen_state);
+
+ sc->sc_target_selfgen_state = power_state;
+
+ /*
+ * If the NIC is force-awake, then set the power state.
+ * Network-state and full-sleep will already transition it to
+ * mark self-gen frames as sleeping - and we can't
+ * guarantee the NIC is awake to program the self-gen frame
+ * setting anyway.
+ */
+ if (sc->sc_cur_powerstate == HAL_PM_AWAKE) {
+ ath_hal_setselfgenpower(sc->sc_ah, power_state);
+ }
+}
+
+/*
+ * Set the hardware power mode and take a reference.
+ *
+ * This doesn't update the target power mode in the driver;
+ * it just updates the hardware power state.
+ *
+ * XXX it should only ever force the hardware awake; it should
+ * never be called to set it asleep.
+ */
+void
+_ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line)
+{
+ ATH_LOCK_ASSERT(sc);
+
+ DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
+ __func__,
+ file,
+ line,
+ power_state,
+ sc->sc_powersave_refcnt);
+
+ sc->sc_powersave_refcnt++;
+
+ if (power_state != sc->sc_cur_powerstate) {
+ ath_hal_setpower(sc->sc_ah, power_state);
+ sc->sc_cur_powerstate = power_state;
+
+ /*
+ * Adjust the self-gen powerstate if appropriate.
+ */
+ if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
+ sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
+ ath_hal_setselfgenpower(sc->sc_ah,
+ sc->sc_target_selfgen_state);
+ }
+
+ }
+}
+
+/*
+ * Restore the power save mode to what it once was.
+ *
+ * This will decrement the reference counter and once it hits
+ * zero, it'll restore the powersave state.
+ */
+void
+_ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line)
+{
+
+ ATH_LOCK_ASSERT(sc);
+
+ DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n",
+ __func__,
+ file,
+ line,
+ sc->sc_powersave_refcnt,
+ sc->sc_target_powerstate);
+
+ if (sc->sc_powersave_refcnt == 0)
+ device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__);
+ else
+ sc->sc_powersave_refcnt--;
+
+ if (sc->sc_powersave_refcnt == 0 &&
+ sc->sc_target_powerstate != sc->sc_cur_powerstate) {
+ sc->sc_cur_powerstate = sc->sc_target_powerstate;
+ ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate);
+ }
+
+ /*
+ * Adjust the self-gen powerstate if appropriate.
+ */
+ if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
+ sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
+ ath_hal_setselfgenpower(sc->sc_ah,
+ sc->sc_target_selfgen_state);
+ }
+
+}
+
#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
#define HAL_MODE_HT40 \
(HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
@@ -341,6 +500,10 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
ath_xmit_setup_legacy(sc);
}
+ if (ath_hal_hasmybeacon(sc->sc_ah)) {
+ sc->sc_do_mybeacon = 1;
+ }
+
/*
* Check if the MAC has multi-rate retry support.
* We do this by trying to setup a fake extended
@@ -605,6 +768,8 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
#ifdef ATH_ENABLE_DFS
| IEEE80211_C_DFS /* Enable radar detection */
#endif
+ | IEEE80211_C_PMGT /* Station side power mgmt */
+ | IEEE80211_C_SWSLEEP
;
/*
* Query the hal to figure out h/w crypto support.
@@ -994,6 +1159,14 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
if (bootverbose)
ieee80211_announce(ic);
ath_announce(sc);
+
+ /*
+ * Put it to sleep for now.
+ */
+ ATH_LOCK(sc);
+ ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
+ ATH_UNLOCK(sc);
+
return 0;
bad2:
ath_tx_cleanup(sc);
@@ -1039,7 +1212,22 @@ ath_detach(struct ath_softc *sc)
* it last
* Other than that, it's straightforward...
*/
+
+ /*
+ * XXX Wake the hardware up first. ath_stop() will still
+ * wake it up first, but I'd rather do it here just to
+ * ensure it's awake.
+ */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ath_power_setpower(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
+ /*
+ * Stop things cleanly.
+ */
ath_stop(ifp);
+
ieee80211_ifdetach(ifp->if_l2com);
taskqueue_free(sc->sc_tq);
#ifdef ATH_TX99_DIAG
@@ -1402,6 +1590,10 @@ ath_vap_delete(struct ieee80211vap *vap)
struct ath_hal *ah = sc->sc_ah;
struct ath_vap *avp = ATH_VAP(vap);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
/*
@@ -1415,6 +1607,8 @@ ath_vap_delete(struct ieee80211vap *vap)
ath_stoprecv(sc, 1); /* stop recv side */
}
+ /* .. leave the hardware awake for now. */
+
ieee80211_vap_detach(vap);
/*
@@ -1502,6 +1696,9 @@ ath_vap_delete(struct ieee80211vap *vap)
}
ath_hal_intrset(ah, sc->sc_imask);
}
+
+ /* Ok, let the hardware asleep. */
+ ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
@@ -1547,8 +1744,12 @@ ath_reset_keycache(struct ath_softc *sc)
struct ath_hal *ah = sc->sc_ah;
int i;
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
for (i = 0; i < sc->sc_keymax; i++)
ath_hal_keyreset(ah, i);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
ieee80211_crypto_reload_keys(ic);
}
@@ -1600,6 +1801,14 @@ ath_resume(struct ath_softc *sc)
sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
+
+ /* Ensure we set the current power state to on */
+ ATH_LOCK(sc);
+ ath_power_setselfgen(sc, HAL_PM_AWAKE);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ath_power_setpower(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ath_hal_reset(ah, sc->sc_opmode,
sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
AH_FALSE, &status);
@@ -1632,6 +1841,10 @@ ath_resume(struct ath_softc *sc)
if (sc->sc_resume_up)
ieee80211_resume_all(ic);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
/* XXX beacons ? */
}
@@ -1689,6 +1902,10 @@ ath_intr(void *arg)
return;
}
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
if ((ifp->if_flags & IFF_UP) == 0 ||
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
HAL_INT status;
@@ -1698,6 +1915,10 @@ ath_intr(void *arg)
ath_hal_getisr(ah, &status); /* clear ISR */
ath_hal_intrset(ah, 0); /* disable further intr's */
ATH_PCU_UNLOCK(sc);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
return;
}
@@ -1737,6 +1958,11 @@ ath_intr(void *arg)
/* Short-circuit un-handled interrupts */
if (status == 0x0) {
ATH_PCU_UNLOCK(sc);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
return;
}
@@ -1903,10 +2129,18 @@ ath_intr(void *arg)
ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
sc->sc_stats.ast_rxorn++;
}
+ if (status & HAL_INT_TSFOOR) {
+ device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__);
+ sc->sc_syncbeacon = 1;
+ }
}
ATH_PCU_LOCK(sc);
sc->sc_intr_cnt--;
ATH_PCU_UNLOCK(sc);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
}
static void
@@ -1937,6 +2171,8 @@ ath_fatal_proc(void *arg, int pending)
static void
ath_bmiss_vap(struct ieee80211vap *vap)
{
+ struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+
/*
* Workaround phantom bmiss interrupts by sanity-checking
* the time of our last rx'd frame. If it is within the
@@ -1945,6 +2181,16 @@ ath_bmiss_vap(struct ieee80211vap *vap)
* be dispatched up for processing. Note this applies only
* for h/w beacon miss events.
*/
+
+ /*
+ * XXX TODO: Just read the TSF during the interrupt path;
+ * that way we don't have to wake up again just to read it
+ * again.
+ */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
struct ifnet *ifp = vap->iv_ic->ic_ifp;
struct ath_softc *sc = ifp->if_softc;
@@ -1962,12 +2208,32 @@ ath_bmiss_vap(struct ieee80211vap *vap)
if (tsf - lastrx <= bmisstimeout) {
sc->sc_stats.ast_bmiss_phantom++;
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
return;
}
}
+
+ /*
+ * There's no need to keep the hardware awake during the call
+ * to av_bmiss().
+ */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
+ /*
+ * Attempt to force a beacon resync.
+ */
+ sc->sc_syncbeacon = 1;
+
ATH_VAP(vap)->av_bmiss(vap);
}
+/* XXX this needs a force wakeup! */
int
ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
{
@@ -1990,6 +2256,12 @@ ath_bmiss_proc(void *arg, int pending)
DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
+ ath_beacon_miss(sc);
+
/*
* Do a reset upon any becaon miss event.
*
@@ -2003,6 +2275,13 @@ ath_bmiss_proc(void *arg, int pending)
ath_reset(ifp, ATH_RESET_NOLOSS);
ieee80211_beacon_miss(ifp->if_l2com);
}
+
+ /* Force a beacon resync, in case they've drifted */
+ sc->sc_syncbeacon = 1;
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
}
/*
@@ -2042,6 +2321,13 @@ ath_init(void *arg)
ATH_LOCK(sc);
/*
+ * Force the sleep state awake.
+ */
+ ath_power_setselfgen(sc, HAL_PM_AWAKE);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ath_power_setpower(sc, HAL_PM_AWAKE);
+
+ /*
* Stop anything previously setup. This is safe
* whether this is the first time through or not.
*/
@@ -2058,6 +2344,7 @@ ath_init(void *arg)
ath_update_chainmasks(sc, ic->ic_curchan);
ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
+
if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
if_printf(ifp, "unable to reset hardware; hal status %u\n",
status);
@@ -2113,6 +2400,7 @@ ath_init(void *arg)
*/
if (ath_startrecv(sc) != 0) {
if_printf(ifp, "unable to start recv logic\n");
+ ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return;
}
@@ -2139,6 +2427,15 @@ ath_init(void *arg)
if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
sc->sc_imask |= HAL_INT_MIB;
+ /*
+ * XXX add capability for this.
+ *
+ * If we're in STA mode (and maybe IBSS?) then register for
+ * TSFOOR interrupts.
+ */
+ if (ic->ic_opmode == IEEE80211_M_STA)
+ sc->sc_imask |= HAL_INT_TSFOOR;
+
/* Enable global TX timeout and carrier sense timeout if available */
if (ath_hal_gtxto_supported(ah))
sc->sc_imask |= HAL_INT_GTT;
@@ -2150,6 +2447,7 @@ ath_init(void *arg)
callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
ath_hal_intrset(ah, sc->sc_imask);
+ ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
#ifdef ATH_TX99_DIAG
@@ -2170,6 +2468,12 @@ ath_stop_locked(struct ifnet *ifp)
__func__, sc->sc_invalid, ifp->if_flags);
ATH_LOCK_ASSERT(sc);
+
+ /*
+ * Wake the hardware up before fiddling with it.
+ */
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
/*
* Shutdown the hardware and driver:
@@ -2210,9 +2514,20 @@ ath_stop_locked(struct ifnet *ifp)
sc->sc_rxlink = NULL;
ath_beacon_free(sc); /* XXX not needed */
}
+
+ /* And now, restore the current power state */
+ ath_power_restore_power_state(sc);
}
-#define MAX_TXRX_ITERATIONS 1000
+/*
+ * Wait until all pending TX/RX has completed.
+ *
+ * This waits until all existing transmit, receive and interrupts
+ * have completed. It's assumed that the caller has first
+ * grabbed the reset lock so it doesn't try to do overlapping
+ * chip resets.
+ */
+#define MAX_TXRX_ITERATIONS 100
static void
ath_txrx_stop_locked(struct ath_softc *sc)
{
@@ -2231,7 +2546,8 @@ ath_txrx_stop_locked(struct ath_softc *sc)
sc->sc_txstart_cnt || sc->sc_intr_cnt) {
if (i <= 0)
break;
- msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
+ msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
+ msecs_to_ticks(10));
i--;
}
@@ -2278,7 +2594,7 @@ ath_txrx_start(struct ath_softc *sc)
* Another, cleaner way should be found to serialise all of
* these operations.
*/
-#define MAX_RESET_ITERATIONS 10
+#define MAX_RESET_ITERATIONS 25
static int
ath_reset_grablock(struct ath_softc *sc, int dowait)
{
@@ -2296,7 +2612,11 @@ ath_reset_grablock(struct ath_softc *sc, int dowait)
break;
}
ATH_PCU_UNLOCK(sc);
- pause("ath_reset_grablock", 1);
+ /*
+ * 1 tick is likely not enough time for long calibrations
+ * to complete. So we should wait quite a while.
+ */
+ pause("ath_reset_grablock", msecs_to_ticks(100));
i--;
ATH_PCU_LOCK(sc);
} while (i > 0);
@@ -2361,6 +2681,13 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
/* Try to (stop any further TX/RX from occuring */
taskqueue_block(sc->sc_tq);
+ /*
+ * Wake the hardware up.
+ */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_PCU_LOCK(sc);
/*
@@ -2455,9 +2782,13 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
* reset counter - this way ath_intr() doesn't end up
* disabling interrupts without a corresponding enable
* in the rest or channel change path.
+ *
+ * Grab the TX reference in case we need to transmit.
+ * That way a parallel transmit doesn't.
*/
ATH_PCU_LOCK(sc);
sc->sc_inreset_cnt--;
+ sc->sc_txstart_cnt++;
/* XXX only do this if sc_inreset_cnt == 0? */
ath_hal_intrset(ah, sc->sc_imask);
ATH_PCU_UNLOCK(sc);
@@ -2474,6 +2805,8 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
/* Restart TX/RX as needed */
ath_txrx_start(sc);
+ /* XXX TODO: we need to hold the tx refcount here! */
+
/* Restart TX completion and pending TX */
if (reset_type == ATH_RESET_NOLOSS) {
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
@@ -2498,6 +2831,14 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
IF_UNLOCK(&ifp->if_snd);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
+ ATH_PCU_LOCK(sc);
+ sc->sc_txstart_cnt--;
+ ATH_PCU_UNLOCK(sc);
+
/* Handle any frames in the TX queue */
/*
* XXX should this be done by the caller, rather than
@@ -2638,6 +2979,7 @@ ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
tbf->bf_status = bf->bf_status;
tbf->bf_m = bf->bf_m;
tbf->bf_node = bf->bf_node;
+ KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__));
/* will be setup by the chain/setup function */
tbf->bf_lastds = NULL;
/* for now, last == self */
@@ -2739,6 +3081,11 @@ ath_transmit(struct ifnet *ifp, struct mbuf *m)
sc->sc_txstart_cnt++;
ATH_PCU_UNLOCK(sc);
+ /* Wake the hardware up already */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start");
/*
* Grab the TX lock - it's ok to do this here; we haven't
@@ -2972,6 +3319,11 @@ finish:
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
+ /* Sleep the hardware if required */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished");
return (retval);
@@ -2999,7 +3351,6 @@ ath_key_update_begin(struct ieee80211vap *vap)
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
taskqueue_block(sc->sc_tq);
- IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
}
static void
@@ -3009,7 +3360,6 @@ ath_key_update_end(struct ieee80211vap *vap)
struct ath_softc *sc = ifp->if_softc;
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
- IF_UNLOCK(&ifp->if_snd);
taskqueue_unblock(sc->sc_tq);
}
@@ -3020,16 +3370,25 @@ ath_update_promisc(struct ifnet *ifp)
u_int32_t rfilt;
/* configure rx filter */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
rfilt = ath_calcrxfilter(sc);
ath_hal_setrxfilter(sc->sc_ah, rfilt);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
}
+/*
+ * Driver-internal mcast update call.
+ *
+ * Assumes the hardware is already awake.
+ */
static void
-ath_update_mcast(struct ifnet *ifp)
+ath_update_mcast_hw(struct ath_softc *sc)
{
- struct ath_softc *sc = ifp->if_softc;
+ struct ifnet *ifp = sc->sc_ifp;
u_int32_t mfilt[2];
/* calculate and install multicast filter */
@@ -3057,11 +3416,33 @@ ath_update_mcast(struct ifnet *ifp)
if_maddr_runlock(ifp);
} else
mfilt[0] = mfilt[1] = ~0;
+
ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
+
DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
__func__, mfilt[0], mfilt[1]);
}
+/*
+ * Called from the net80211 layer - force the hardware
+ * awake before operating.
+ */
+static void
+ath_update_mcast(struct ifnet *ifp)
+{
+ struct ath_softc *sc = ifp->if_softc;
+
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
+ ath_update_mcast_hw(sc);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+}
+
void
ath_mode_init(struct ath_softc *sc)
{
@@ -3087,7 +3468,7 @@ ath_mode_init(struct ath_softc *sc)
ath_hal_setmac(ah, IF_LLADDR(ifp));
/* calculate and install multicast filter */
- ath_update_mcast(ifp);
+ ath_update_mcast_hw(sc);
}
/*
@@ -3119,8 +3500,13 @@ ath_setslottime(struct ath_softc *sc)
__func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
+ /* Wake up the hardware first before updating the slot time */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_hal_setslottime(ah, usec);
+ ath_power_restore_power_state(sc);
sc->sc_updateslot = OK;
+ ATH_UNLOCK(sc);
}
/*
@@ -3137,6 +3523,8 @@ ath_updateslot(struct ifnet *ifp)
* When not coordinating the BSS, change the hardware
* immediately. For other operation we defer the change
* until beacon updates have propagated to the stations.
+ *
+ * XXX sc_updateslot isn't changed behind a lock?
*/
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS)
@@ -4258,6 +4646,10 @@ ath_tx_proc_q0(void *arg, int npending)
sc->sc_txq_active &= ~txqs;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
"ath_tx_proc_q0: txqs=0x%08x", txqs);
@@ -4278,6 +4670,10 @@ ath_tx_proc_q0(void *arg, int npending)
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
ath_tx_kick(sc);
}
@@ -4299,6 +4695,10 @@ ath_tx_proc_q0123(void *arg, int npending)
sc->sc_txq_active &= ~txqs;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
"ath_tx_proc_q0123: txqs=0x%08x", txqs);
@@ -4331,6 +4731,10 @@ ath_tx_proc_q0123(void *arg, int npending)
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
ath_tx_kick(sc);
}
@@ -4351,6 +4755,10 @@ ath_tx_proc(void *arg, int npending)
sc->sc_txq_active &= ~txqs;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
/*
@@ -4376,6 +4784,10 @@ ath_tx_proc(void *arg, int npending)
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
ath_tx_kick(sc);
}
#undef TXQACTIVE
@@ -4402,6 +4814,10 @@ ath_txq_sched_tasklet(void *arg, int npending)
sc->sc_txproc_cnt++;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_TX_LOCK(sc);
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
@@ -4410,6 +4826,10 @@ ath_txq_sched_tasklet(void *arg, int npending)
}
ATH_TX_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
@@ -5057,6 +5477,15 @@ ath_calibrate(void *arg)
HAL_BOOL aniCal, shortCal = AH_FALSE;
int nextcal;
+ /*
+ * Force the hardware awake for ANI work.
+ */
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+
+ /* Skip trying to do this if we're in reset */
+ if (sc->sc_inreset_cnt)
+ goto restart;
+
if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
goto restart;
longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
@@ -5086,6 +5515,7 @@ ath_calibrate(void *arg)
sc->sc_doresetcal = AH_TRUE;
taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
+ ath_power_restore_power_state(sc);
return;
}
/*
@@ -5157,6 +5587,10 @@ restart:
__func__);
/* NB: don't rearm timer */
}
+ /*
+ * Restore power state now that we're done.
+ */
+ ath_power_restore_power_state(sc);
}
static void
@@ -5242,6 +5676,10 @@ ath_set_channel(struct ieee80211com *ic)
struct ifnet *ifp = ic->ic_ifp;
struct ath_softc *sc = ifp->if_softc;
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
(void) ath_chan_set(sc, ic->ic_curchan);
/*
* If we are returning to our bss channel then mark state
@@ -5252,6 +5690,7 @@ ath_set_channel(struct ieee80211com *ic)
ATH_LOCK(sc);
if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
sc->sc_syncbeacon = 1;
+ ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
@@ -5284,6 +5723,7 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
int i, error, stamode;
u_int32_t rfilt;
int csa_run_transition = 0;
+ enum ieee80211_state ostate = vap->iv_state;
static const HAL_LED_STATE leds[] = {
HAL_LED_INIT, /* IEEE80211_S_INIT */
@@ -5297,7 +5737,7 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
};
DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
- ieee80211_state_name[vap->iv_state],
+ ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
/*
@@ -5309,7 +5749,26 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
*/
IEEE80211_LOCK_ASSERT(ic);
- if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
+ /* Before we touch the hardware - wake it up */
+ ATH_LOCK(sc);
+ /*
+ * If the NIC is in anything other than SLEEP state,
+ * we need to ensure that self-generated frames are
+ * set for PWRMGT=0. Otherwise we may end up with
+ * strange situations.
+ *
+ * XXX TODO: is this actually the case? :-)
+ */
+ if (nstate != IEEE80211_S_SLEEP)
+ ath_power_setselfgen(sc, HAL_PM_AWAKE);
+
+ /*
+ * Now, wake the thing up.
+ */
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
+ if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
csa_run_transition = 1;
callout_drain(&sc->sc_cal_ch);
@@ -5322,6 +5781,13 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
* [re]setup beacons. Unblock the task q thread so
* deferred interrupt processing is done.
*/
+
+ /* Ensure we stay awake during scan */
+ ATH_LOCK(sc);
+ ath_power_setselfgen(sc, HAL_PM_AWAKE);
+ ath_power_setpower(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ath_hal_intrset(ah,
sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
@@ -5334,6 +5800,11 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
stamode = (vap->iv_opmode == IEEE80211_M_STA ||
vap->iv_opmode == IEEE80211_M_AHDEMO ||
vap->iv_opmode == IEEE80211_M_IBSS);
+
+ /*
+ * XXX Dont need to do this (and others) if we've transitioned
+ * from SLEEP->RUN.
+ */
if (stamode && nstate == IEEE80211_S_RUN) {
sc->sc_curaid = ni->ni_associd;
IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
@@ -5436,11 +5907,14 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
* beacon to update the beacon timer and thus we
* won't get notified of the missing beacons.
*/
- sc->sc_syncbeacon = 1;
-#if 0
- if (csa_run_transition)
-#endif
- ath_beacon_config(sc, vap);
+ if (ostate != IEEE80211_S_RUN &&
+ ostate != IEEE80211_S_SLEEP) {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: STA; syncbeacon=1\n", __func__);
+ sc->sc_syncbeacon = 1;
+
+ if (csa_run_transition)
+ ath_beacon_config(sc, vap);
/*
* PR: kern/175227
@@ -5454,7 +5928,8 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
* timer fires (too often), leading to a STA
* disassociation.
*/
- sc->sc_beacons = 1;
+ sc->sc_beacons = 1;
+ }
break;
case IEEE80211_M_MONITOR:
/*
@@ -5480,6 +5955,15 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
+
+ /*
+ * Force awake for RUN mode.
+ */
+ ATH_LOCK(sc);
+ ath_power_setselfgen(sc, HAL_PM_AWAKE);
+ ath_power_setpower(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
/*
* Finally, start any timers and the task q thread
* (in case we didn't go through SCAN state).
@@ -5491,6 +5975,7 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
DPRINTF(sc, ATH_DEBUG_CALIBRATE,
"%s: calibration disabled\n", __func__);
}
+
taskqueue_unblock(sc->sc_tq);
} else if (nstate == IEEE80211_S_INIT) {
/*
@@ -5510,9 +5995,43 @@ ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
#ifdef IEEE80211_SUPPORT_TDMA
ath_hal_setcca(ah, AH_TRUE);
#endif
+ } else if (nstate == IEEE80211_S_SLEEP) {
+ /* We're going to sleep, so transition appropriately */
+ /* For now, only do this if we're a single STA vap */
+ if (sc->sc_nvaps == 1 &&
+ vap->iv_opmode == IEEE80211_M_STA) {
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon);
+ ATH_LOCK(sc);
+ /*
+ * Always at least set the self-generated
+ * frame config to set PWRMGT=1.
+ */
+ ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP);
+
+ /*
+ * If we're not syncing beacons, transition
+ * to NETWORK_SLEEP.
+ *
+ * We stay awake if syncbeacon > 0 in case
+ * we need to listen for some beacons otherwise
+ * our beacon timer config may be wrong.
+ */
+ if (sc->sc_syncbeacon == 0) {
+ ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP);
+ }
+ ATH_UNLOCK(sc);
+ }
}
bad:
ieee80211_free_node(ni);
+
+ /*
+ * Restore the power state - either to what it was, or
+ * to network_sleep if it's alright.
+ */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
return error;
}
@@ -5567,6 +6086,13 @@ ath_newassoc(struct ieee80211_node *ni, int isnew)
an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
+ DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n",
+ __func__,
+ ni->ni_macaddr,
+ ":",
+ isnew,
+ an->an_is_powersave);
+
ATH_NODE_LOCK(an);
ath_rate_newassoc(sc, an, isnew);
ATH_NODE_UNLOCK(an);
@@ -5813,6 +6339,10 @@ ath_watchdog(void *arg)
struct ifnet *ifp = sc->sc_ifp;
uint32_t hangs;
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
hangs != 0) {
if_printf(ifp, "%s hang detected (0x%x)\n",
@@ -5822,6 +6352,10 @@ ath_watchdog(void *arg)
do_reset = 1;
ifp->if_oerrors++;
sc->sc_stats.ast_watchdog++;
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
}
/*
@@ -5919,6 +6453,13 @@ ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
goto bad;
}
}
+
+
+ ATH_LOCK(sc);
+ if (id != HAL_DIAG_REGS)
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
if (outsize < ad->ad_out_size)
ad->ad_out_size = outsize;
@@ -5928,6 +6469,12 @@ ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
} else {
error = EINVAL;
}
+
+ ATH_LOCK(sc);
+ if (id != HAL_DIAG_REGS)
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
bad:
if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
free(indata, M_TEMP);
@@ -5957,7 +6504,9 @@ ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
* only reflect promisc mode settings.
*/
ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_mode_init(sc);
+ ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
} else if (ifp->if_flags & IFF_UP) {
/*
@@ -5974,11 +6523,8 @@ ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
} else {
ATH_LOCK(sc);
ath_stop_locked(ifp);
-#ifdef notyet
- /* XXX must wakeup in places like ath_vap_delete */
if (!sc->sc_invalid)
- ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
-#endif
+ ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
ATH_UNLOCK(sc);
}
break;
diff --git a/sys/dev/ath/if_ath_beacon.c b/sys/dev/ath/if_ath_beacon.c
index f721de64c93d2..317f83a358dda 100644
--- a/sys/dev/ath/if_ath_beacon.c
+++ b/sys/dev/ath/if_ath_beacon.c
@@ -382,7 +382,7 @@ ath_beacon_update(struct ieee80211vap *vap, int item)
/*
* Handle a beacon miss.
*/
-static void
+void
ath_beacon_miss(struct ath_softc *sc)
{
HAL_SURVEY_SAMPLE hs;
@@ -916,7 +916,7 @@ ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
struct ieee80211_node *ni;
u_int32_t nexttbtt, intval, tsftu;
u_int32_t nexttbtt_u8, intval_u8;
- u_int64_t tsf;
+ u_int64_t tsf, tsf_beacon;
if (vap == NULL)
vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
@@ -932,9 +932,17 @@ ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
ni = ieee80211_ref_node(vap->iv_bss);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
/* extract tstamp from last beacon and convert to TU */
nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
LE_READ_4(ni->ni_tstamp.data));
+
+ tsf_beacon = ((uint64_t) LE_READ_4(ni->ni_tstamp.data + 4)) << 32;
+ tsf_beacon |= LE_READ_4(ni->ni_tstamp.data);
+
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS) {
/*
@@ -980,14 +988,63 @@ ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
*/
tsf = ath_hal_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
- do {
- nexttbtt += intval;
- if (--dtimcount < 0) {
- dtimcount = dtimperiod - 1;
- if (--cfpcount < 0)
- cfpcount = cfpperiod - 1;
+
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: beacon tsf=%llu, hw tsf=%llu, nexttbtt=%u, tsftu=%u\n",
+ __func__,
+ (unsigned long long) tsf_beacon,
+ (unsigned long long) tsf,
+ nexttbtt,
+ tsftu);
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: beacon tsf=%llu, hw tsf=%llu, tsf delta=%lld\n",
+ __func__,
+ (unsigned long long) tsf_beacon,
+ (unsigned long long) tsf,
+ (long long) tsf -
+ (long long) tsf_beacon);
+
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: nexttbtt=%llu, beacon tsf delta=%lld\n",
+ __func__,
+ (unsigned long long) nexttbtt,
+ (long long) ((long long) nexttbtt * 1024LL) - (long long) tsf_beacon);
+
+ /* XXX cfpcount? */
+
+ if (nexttbtt > tsftu) {
+ uint32_t countdiff, oldtbtt, remainder;
+
+ oldtbtt = nexttbtt;
+ remainder = (nexttbtt - tsftu) % intval;
+ nexttbtt = tsftu + remainder;
+
+ countdiff = (oldtbtt - nexttbtt) / intval % dtimperiod;
+ if (dtimcount > countdiff) {
+ dtimcount -= countdiff;
+ } else {
+ dtimcount += dtimperiod - countdiff;
+ }
+ } else { //nexttbtt <= tsftu
+ uint32_t countdiff, oldtbtt, remainder;
+
+ oldtbtt = nexttbtt;
+ remainder = (tsftu - nexttbtt) % intval;
+ nexttbtt = tsftu - remainder + intval;
+ countdiff = (nexttbtt - oldtbtt) / intval % dtimperiod;
+ if (dtimcount > countdiff) {
+ dtimcount -= countdiff;
+ } else {
+ dtimcount += dtimperiod - countdiff;
}
- } while (nexttbtt < tsftu);
+ }
+
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: adj nexttbtt=%llu, rx tsf delta=%lld\n",
+ __func__,
+ (unsigned long long) nexttbtt,
+ (long long) ((long long)nexttbtt * 1024LL) - (long long)tsf);
+
memset(&bs, 0, sizeof(bs));
bs.bs_intval = intval;
bs.bs_nexttbtt = nexttbtt;
@@ -1034,9 +1091,12 @@ ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
DPRINTF(sc, ATH_DEBUG_BEACON,
- "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
+ "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u "
+ "nextdtim %u bmiss %u sleep %u cfp:period %u "
+ "maxdur %u next %u timoffset %u\n"
, __func__
- , tsf, tsftu
+ , tsf
+ , tsftu
, bs.bs_intval
, bs.bs_nexttbtt
, bs.bs_dtimperiod
@@ -1113,8 +1173,11 @@ ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
ath_beacon_start_adhoc(sc, vap);
}
- sc->sc_syncbeacon = 0;
ieee80211_free_node(ni);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
#undef FUDGE
#undef TSF_TO_TU
}
diff --git a/sys/dev/ath/if_ath_beacon.h b/sys/dev/ath/if_ath_beacon.h
index f3f73d7166cf1..a9402680ee27e 100644
--- a/sys/dev/ath/if_ath_beacon.h
+++ b/sys/dev/ath/if_ath_beacon.h
@@ -48,5 +48,7 @@ extern int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni);
extern void ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf);
extern void ath_beacon_free(struct ath_softc *sc);
extern void ath_beacon_proc(void *arg, int pending);
+extern void ath_beacon_miss(struct ath_softc *sc);
#endif
+
diff --git a/sys/dev/ath/if_ath_debug.h b/sys/dev/ath/if_ath_debug.h
index 83597afaa1a41..40c0b9a5ac74d 100644
--- a/sys/dev/ath/if_ath_debug.h
+++ b/sys/dev/ath/if_ath_debug.h
@@ -68,6 +68,7 @@ enum {
ATH_DEBUG_SW_TX_FILT = 0x400000000ULL, /* SW TX FF */
ATH_DEBUG_NODE_PWRSAVE = 0x800000000ULL, /* node powersave */
ATH_DEBUG_DIVERSITY = 0x1000000000ULL, /* Diversity logic */
+ ATH_DEBUG_PWRSAVE = 0x2000000000ULL,
ATH_DEBUG_ANY = 0xffffffffffffffffULL
};
diff --git a/sys/dev/ath/if_ath_keycache.c b/sys/dev/ath/if_ath_keycache.c
index a9e6df0e46e81..fe99f106ac50c 100644
--- a/sys/dev/ath/if_ath_keycache.c
+++ b/sys/dev/ath/if_ath_keycache.c
@@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_keycache.h>
+#include <dev/ath/if_ath_misc.h>
#ifdef ATH_DEBUG
static void
@@ -198,6 +199,7 @@ ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap,
u_int8_t gmac[IEEE80211_ADDR_LEN];
const u_int8_t *mac;
HAL_KEYVAL hk;
+ int ret;
memset(&hk, 0, sizeof(hk));
/*
@@ -251,13 +253,19 @@ ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap,
} else
mac = k->wk_macaddr;
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
if (hk.kv_type == HAL_CIPHER_TKIP &&
(k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
- return ath_keyset_tkip(sc, k, &hk, mac);
+ ret = ath_keyset_tkip(sc, k, &hk, mac);
} else {
KEYPRINTF(sc, k->wk_keyix, &hk, mac);
- return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
+ ret = ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
}
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
+ return (ret);
#undef N
}
@@ -492,6 +500,8 @@ ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_hal_keyreset(ah, keyix);
/*
* Handle split tx/rx keying required for TKIP with h/w MIC.
@@ -515,6 +525,8 @@ ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
}
}
}
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
return 1;
}
diff --git a/sys/dev/ath/if_ath_led.c b/sys/dev/ath/if_ath_led.c
index 33cc512ebf1ed..a55e0364689e3 100644
--- a/sys/dev/ath/if_ath_led.c
+++ b/sys/dev/ath/if_ath_led.c
@@ -122,6 +122,11 @@ __FBSDID("$FreeBSD$");
void
ath_led_config(struct ath_softc *sc)
{
+
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
/* Software LED blinking - GPIO controlled LED */
if (sc->sc_softled) {
ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
@@ -144,6 +149,10 @@ ath_led_config(struct ath_softc *sc)
ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_led_net_pin,
HAL_GPIO_OUTPUT_MUX_MAC_NETWORK_LED);
}
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
}
static void
diff --git a/sys/dev/ath/if_ath_misc.h b/sys/dev/ath/if_ath_misc.h
index 0c99bc7cdaea1..711e69e87ba54 100644
--- a/sys/dev/ath/if_ath_misc.h
+++ b/sys/dev/ath/if_ath_misc.h
@@ -128,6 +128,19 @@ extern void ath_start_task(void *arg, int npending);
extern void ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq);
/*
+ * Power state tracking.
+ */
+extern void _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line);
+extern void _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line);
+extern void _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line);
+extern void _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line);
+
+#define ath_power_setpower(sc, ps) _ath_power_setpower(sc, ps, __FILE__, __LINE__)
+#define ath_power_setselfgen(sc, ps) _ath_power_set_selfgen(sc, ps, __FILE__, __LINE__)
+#define ath_power_set_power_state(sc, ps) _ath_power_set_power_state(sc, ps, __FILE__, __LINE__)
+#define ath_power_restore_power_state(sc) _ath_power_restore_power_state(sc, __FILE__, __LINE__)
+
+/*
* Kick the frame TX task.
*/
static inline void
diff --git a/sys/dev/ath/if_ath_rx.c b/sys/dev/ath/if_ath_rx.c
index 5c30a46cbef2a..2f6cb58487d54 100644
--- a/sys/dev/ath/if_ath_rx.c
+++ b/sys/dev/ath/if_ath_rx.c
@@ -166,10 +166,22 @@ ath_calcrxfilter(struct ath_softc *sc)
/* XXX ic->ic_monvaps != 0? */
if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
rfilt |= HAL_RX_FILTER_PROM;
+
+ /*
+ * Only listen to all beacons if we're scanning.
+ *
+ * Otherwise we only really need to hear beacons from
+ * our own BSSID.
+ */
if (ic->ic_opmode == IEEE80211_M_STA ||
- ic->ic_opmode == IEEE80211_M_IBSS ||
- sc->sc_swbmiss || sc->sc_scanning)
- rfilt |= HAL_RX_FILTER_BEACON;
+ ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) {
+ if (sc->sc_do_mybeacon && ! sc->sc_scanning) {
+ rfilt |= HAL_RX_FILTER_MYBEACON;
+ } else { /* scanning, non-mybeacon chips */
+ rfilt |= HAL_RX_FILTER_BEACON;
+ }
+ }
+
/*
* NB: We don't recalculate the rx filter when
* ic_protmode changes; otherwise we could do
@@ -233,6 +245,8 @@ ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
struct mbuf *m;
struct ath_desc *ds;
+ /* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */
+
m = bf->bf_m;
if (m == NULL) {
/*
@@ -317,6 +331,23 @@ ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+ uint64_t tsf_beacon_old, tsf_beacon;
+ uint64_t nexttbtt;
+ int64_t tsf_delta;
+ int32_t tsf_delta_bmiss;
+ int32_t tsf_remainder;
+ uint64_t tsf_beacon_target;
+ int tsf_intval;
+
+ tsf_beacon_old = ((uint64_t) LE_READ_4(ni->ni_tstamp.data + 4)) << 32;
+ tsf_beacon_old |= LE_READ_4(ni->ni_tstamp.data);
+
+#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
+ tsf_intval = 1;
+ if (ni->ni_intval > 0) {
+ tsf_intval = TU_TO_TSF(ni->ni_intval);
+ }
+#undef TU_TO_TSF
/*
* Call up first so subsequent work can use information
@@ -328,14 +359,79 @@ ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
/* update rssi statistics for use by the hal */
/* XXX unlocked check against vap->iv_bss? */
ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
+
+ tsf_beacon = ((uint64_t) LE_READ_4(ni->ni_tstamp.data + 4)) << 32;
+ tsf_beacon |= LE_READ_4(ni->ni_tstamp.data);
+
+ nexttbtt = ath_hal_getnexttbtt(sc->sc_ah);
+
+ /*
+ * Let's calculate the delta and remainder, so we can see
+ * if the beacon timer from the AP is varying by more than
+ * a few TU. (Which would be a huge, huge problem.)
+ */
+ tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old;
+
+ tsf_delta_bmiss = tsf_delta / tsf_intval;
+
+ /*
+ * If our delta is greater than half the beacon interval,
+ * let's round the bmiss value up to the next beacon
+ * interval. Ie, we're running really, really early
+ * on the next beacon.
+ */
+ if (tsf_delta % tsf_intval > (tsf_intval / 2))
+ tsf_delta_bmiss ++;
+
+ tsf_beacon_target = tsf_beacon_old +
+ (((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval);
+
+ /*
+ * The remainder using '%' is between 0 .. intval-1.
+ * If we're actually running too fast, then the remainder
+ * will be some large number just under intval-1.
+ * So we need to look at whether we're running
+ * before or after the target beacon interval
+ * and if we are, modify how we do the remainder
+ * calculation.
+ */
+ if (tsf_beacon < tsf_beacon_target) {
+ tsf_remainder =
+ -(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval));
+ } else {
+ tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval;
+ }
+
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: old_tsf=%llu, new_tsf=%llu, target_tsf=%llu, delta=%lld, bmiss=%d, remainder=%d\n",
+ __func__,
+ (unsigned long long) tsf_beacon_old,
+ (unsigned long long) tsf_beacon,
+ (unsigned long long) tsf_beacon_target,
+ (long long) tsf_delta,
+ tsf_delta_bmiss,
+ tsf_remainder);
+
+ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf=%llu, nexttbtt=%llu, delta=%d\n",
+ __func__,
+ (unsigned long long) tsf_beacon,
+ (unsigned long long) nexttbtt,
+ (int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval);
+
if (sc->sc_syncbeacon &&
- ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
+ ni == vap->iv_bss &&
+ (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) {
+ DPRINTF(sc, ATH_DEBUG_BEACON,
+ "%s: syncbeacon=1; syncing\n",
+ __func__);
/*
* Resync beacon timers using the tsf of the beacon
* frame we just received.
*/
ath_beacon_config(sc, vap);
+ sc->sc_syncbeacon = 0;
}
+
+
/* fall thru... */
case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
if (vap->iv_opmode == IEEE80211_M_IBSS &&
@@ -880,6 +976,14 @@ rx_next:
#define ATH_RX_MAX 128
+/*
+ * XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like
+ * the EDMA code does.
+ *
+ * XXX TODO: then, do all of the RX list management stuff inside
+ * ATH_RX_LOCK() so we don't end up potentially racing. The EDMA
+ * code is doing it right.
+ */
static void
ath_rx_proc(struct ath_softc *sc, int resched)
{
@@ -901,6 +1005,7 @@ ath_rx_proc(struct ath_softc *sc, int resched)
u_int64_t tsf;
int npkts = 0;
int kickpcu = 0;
+ int ret;
/* XXX we must not hold the ATH_LOCK here */
ATH_UNLOCK_ASSERT(sc);
@@ -911,6 +1016,10 @@ ath_rx_proc(struct ath_softc *sc, int resched)
kickpcu = sc->sc_kickpcu;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
ngood = 0;
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
@@ -996,8 +1105,26 @@ ath_rx_proc(struct ath_softc *sc, int resched)
if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf, m))
ngood++;
rx_proc_next:
- TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
- } while (ath_rxbuf_init(sc, bf) == 0);
+ /*
+ * If there's a holding buffer, insert that onto
+ * the RX list; the hardware is now definitely not pointing
+ * to it now.
+ */
+ ret = 0;
+ if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) {
+ TAILQ_INSERT_TAIL(&sc->sc_rxbuf,
+ sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf,
+ bf_list);
+ ret = ath_rxbuf_init(sc,
+ sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf);
+ }
+ /*
+ * Next, throw our buffer into the holding entry. The hardware
+ * may use the descriptor to read the link pointer before
+ * DMAing the next descriptor in to write out a packet.
+ */
+ sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf;
+ } while (ret == 0);
/* rx signal state monitoring */
ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
@@ -1029,6 +1156,13 @@ rx_proc_next:
* constantly write over the same frame, leading
* the RX driver code here to get heavily confused.
*/
+ /*
+ * XXX Has RX DMA stopped enough here to just call
+ * ath_startrecv()?
+ * XXX Do we need to use the holding buffer to restart
+ * RX DMA by appending entries to the final
+ * descriptor? Quite likely.
+ */
#if 1
ath_startrecv(sc);
#else
@@ -1066,6 +1200,13 @@ rx_proc_next:
#undef PA2DESC
/*
+ * Put the hardware to sleep again if we're done with it.
+ */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
+ /*
* If we hit the maximum number of frames in this round,
* reschedule for another immediate pass. This gives
* the TX and TX completion routines time to run, which
@@ -1112,6 +1253,58 @@ ath_legacy_flushrecv(struct ath_softc *sc)
ath_rx_proc(sc, 0);
}
+static void
+ath_legacy_flush_rxpending(struct ath_softc *sc)
+{
+
+ /* XXX ATH_RX_LOCK_ASSERT(sc); */
+
+ if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
+ m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
+ sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
+ }
+ if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
+ m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
+ sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
+ }
+}
+
+static int
+ath_legacy_flush_rxholdbf(struct ath_softc *sc)
+{
+ struct ath_buf *bf;
+
+ /* XXX ATH_RX_LOCK_ASSERT(sc); */
+ /*
+ * If there are RX holding buffers, free them here and return
+ * them to the list.
+ *
+ * XXX should just verify that bf->bf_m is NULL, as it must
+ * be at this point!
+ */
+ bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf;
+ if (bf != NULL) {
+ if (bf->bf_m != NULL)
+ m_freem(bf->bf_m);
+ bf->bf_m = NULL;
+ TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
+ (void) ath_rxbuf_init(sc, bf);
+ }
+ sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL;
+
+ bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf;
+ if (bf != NULL) {
+ if (bf->bf_m != NULL)
+ m_freem(bf->bf_m);
+ bf->bf_m = NULL;
+ TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
+ (void) ath_rxbuf_init(sc, bf);
+ }
+ sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL;
+
+ return (0);
+}
+
/*
* Disable the receive h/w in preparation for a reset.
*/
@@ -1123,6 +1316,8 @@ ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
struct ath_hal *ah = sc->sc_ah;
+ ATH_RX_LOCK(sc);
+
ath_hal_stoppcurecv(ah); /* disable PCU */
ath_hal_setrxfilter(ah, 0); /* clear recv filter */
ath_hal_stopdmarecv(ah); /* disable DMA engine */
@@ -1156,22 +1351,23 @@ ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
}
}
#endif
- /*
- * Free both high/low RX pending, just in case.
- */
- if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
- m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
- sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
- }
- if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
- m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
- sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
- }
+
+ (void) ath_legacy_flush_rxpending(sc);
+ (void) ath_legacy_flush_rxholdbf(sc);
+
sc->sc_rxlink = NULL; /* just in case */
+
+ ATH_RX_UNLOCK(sc);
#undef PA2DESC
}
/*
+ * XXX TODO: something was calling startrecv without calling
+ * stoprecv. Let's figure out what/why. It was showing up
+ * as a mbuf leak (rxpending) and ath_buf leak (holdbf.)
+ */
+
+/*
* Enable the receive h/w following a reset.
*/
static int
@@ -1180,9 +1376,18 @@ ath_legacy_startrecv(struct ath_softc *sc)
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf;
+ ATH_RX_LOCK(sc);
+
+ /*
+ * XXX should verify these are already all NULL!
+ */
sc->sc_rxlink = NULL;
- sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
- sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
+ (void) ath_legacy_flush_rxpending(sc);
+ (void) ath_legacy_flush_rxholdbf(sc);
+
+ /*
+ * Re-chain all of the buffers in the RX buffer list.
+ */
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
int error = ath_rxbuf_init(sc, bf);
if (error != 0) {
@@ -1198,6 +1403,8 @@ ath_legacy_startrecv(struct ath_softc *sc)
ath_hal_rxena(ah); /* enable recv descriptors */
ath_mode_init(sc); /* set filters, etc. */
ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
+
+ ATH_RX_UNLOCK(sc);
return 0;
}
diff --git a/sys/dev/ath/if_ath_rx_edma.c b/sys/dev/ath/if_ath_rx_edma.c
index 780367e7fb64f..a8758cfa268b7 100644
--- a/sys/dev/ath/if_ath_rx_edma.c
+++ b/sys/dev/ath/if_ath_rx_edma.c
@@ -286,7 +286,16 @@ ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ath_edma_recv_proc_queue(sc, qtype, dosched);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
@@ -294,8 +303,17 @@ static void
ath_edma_recv_sched(struct ath_softc *sc, int dosched)
{
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
+
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
@@ -309,6 +327,10 @@ ath_edma_recv_flush(struct ath_softc *sc)
sc->sc_rxproc_cnt++;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
/*
* Flush any active frames from FIFO -> deferred list
*/
@@ -318,9 +340,18 @@ ath_edma_recv_flush(struct ath_softc *sc)
/*
* Process what's in the deferred queue
*/
+ /*
+ * XXX: If we read the tsf/channoise here and then pass it in,
+ * we could restore the power state before processing
+ * the deferred queue.
+ */
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt--;
ATH_PCU_UNLOCK(sc);
@@ -560,12 +591,25 @@ ath_edma_recv_tasklet(void *arg, int npending)
sc->sc_rxproc_cnt++;
ATH_PCU_UNLOCK(sc);
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
+ /*
+ * XXX: If we read the tsf/channoise here and then pass it in,
+ * we could restore the power state before processing
+ * the deferred queue.
+ */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
/* XXX inside IF_LOCK ? */
if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
#ifdef IEEE80211_SUPPORT_SUPERG
@@ -844,10 +888,13 @@ ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
qtype);
return (-EINVAL);
}
- device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n",
- __func__,
- qtype,
- re->m_fifolen);
+
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "%s: type=%d, FIFO depth = %d entries\n",
+ __func__,
+ qtype,
+ re->m_fifolen);
/* Allocate ath_buf FIFO array, pre-zero'ed */
re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen,
@@ -938,10 +985,12 @@ ath_recv_setup_edma(struct ath_softc *sc)
(void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
sc->sc_rx_statuslen);
- device_printf(sc->sc_dev, "RX status length: %d\n",
- sc->sc_rx_statuslen);
- device_printf(sc->sc_dev, "RX buffer size: %d\n",
- sc->sc_edma_bufsize);
+ if (bootverbose) {
+ device_printf(sc->sc_dev, "RX status length: %d\n",
+ sc->sc_rx_statuslen);
+ device_printf(sc->sc_dev, "RX buffer size: %d\n",
+ sc->sc_edma_bufsize);
+ }
sc->sc_rx.recv_stop = ath_edma_stoprecv;
sc->sc_rx.recv_start = ath_edma_startrecv;
diff --git a/sys/dev/ath/if_ath_sysctl.c b/sys/dev/ath/if_ath_sysctl.c
index f1b6dd264c181..40a34d479db6e 100644
--- a/sys/dev/ath/if_ath_sysctl.c
+++ b/sys/dev/ath/if_ath_sysctl.c
@@ -108,13 +108,26 @@ static int
ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
- u_int slottime = ath_hal_getslottime(sc->sc_ah);
+ u_int slottime;
int error;
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ slottime = ath_hal_getslottime(sc->sc_ah);
+ ATH_UNLOCK(sc);
+
error = sysctl_handle_int(oidp, &slottime, 0, req);
if (error || !req->newptr)
- return error;
- return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
+ goto finish;
+
+ error = !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
+
+finish:
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
+ return error;
}
static int
@@ -400,12 +413,14 @@ ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
ATH_RX_LOCK(sc);
for (i = 0; i < 2; i++) {
- printf("%d: fifolen: %d/%d; head=%d; tail=%d\n",
+ printf("%d: fifolen: %d/%d; head=%d; tail=%d; m_pending=%p, m_holdbf=%p\n",
i,
sc->sc_rxedma[i].m_fifo_depth,
sc->sc_rxedma[i].m_fifolen,
sc->sc_rxedma[i].m_fifo_head,
- sc->sc_rxedma[i].m_fifo_tail);
+ sc->sc_rxedma[i].m_fifo_tail,
+ sc->sc_rxedma[i].m_rxpending,
+ sc->sc_rxedma[i].m_holdbf);
}
i = 0;
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
diff --git a/sys/dev/ath/if_ath_tdma.c b/sys/dev/ath/if_ath_tdma.c
index 3ea70e359a07f..de1a91ca02e90 100644
--- a/sys/dev/ath/if_ath_tdma.c
+++ b/sys/dev/ath/if_ath_tdma.c
@@ -477,16 +477,19 @@ ath_tdma_update(struct ieee80211_node *ni,
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"rs->rstamp %llu rstamp %llu tsf %llu txtime %d, nextslot %llu, "
"nextslottu %d, nextslottume %d\n",
- (unsigned long long) rs->rs_tstamp, rstamp, tsf, txtime,
- nextslot, nextslottu, TSF_TO_TU(nextslot >> 32, nextslot));
+ (unsigned long long) rs->rs_tstamp,
+ (unsigned long long) rstamp,
+ (unsigned long long) tsf, txtime,
+ (unsigned long long) nextslot,
+ nextslottu, TSF_TO_TU(nextslot >> 32, nextslot));
DPRINTF(sc, ATH_DEBUG_TDMA,
" beacon tstamp: %llu (0x%016llx)\n",
- le64toh(ni->ni_tstamp.tsf),
- le64toh(ni->ni_tstamp.tsf));
+ (unsigned long long) le64toh(ni->ni_tstamp.tsf),
+ (unsigned long long) le64toh(ni->ni_tstamp.tsf));
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"nexttbtt %llu (0x%08llx) tsfdelta %d avg +%d/-%d\n",
- nexttbtt,
+ (unsigned long long) nexttbtt,
(long long) nexttbtt,
tsfdelta,
TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
@@ -580,7 +583,7 @@ ath_tdma_update(struct ieee80211_node *ni,
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"%s: calling ath_hal_adjusttsf: TSF=%llu, tsfdelta=%d\n",
__func__,
- tsf,
+ (unsigned long long) tsf,
tsfdelta);
#ifdef ATH_DEBUG_ALQ
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index d4e3ebd378725..1b140da6b0775 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -761,37 +761,21 @@ ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
("ath_tx_handoff_hw called for mcast queue"));
/*
- * XXX racy, should hold the PCU lock when checking this,
- * and also should ensure that the TX counter is >0!
+ * XXX We should instead just verify that sc_txstart_cnt
+ * or ath_txproc_cnt > 0. That would mean that
+ * the reset is going to be waiting for us to complete.
*/
- KASSERT((sc->sc_inreset_cnt == 0),
- ("%s: TX during reset?\n", __func__));
+ if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
+ device_printf(sc->sc_dev,
+ "%s: TX dispatch without holding txcount/txstart refcnt!\n",
+ __func__);
+ }
-#if 0
/*
- * This causes a LOR. Find out where the PCU lock is being
- * held whilst the TXQ lock is grabbed - that shouldn't
- * be occuring.
+ * XXX .. this is going to cause the hardware to get upset;
+ * so we really should find some way to drop or queue
+ * things.
*/
- ATH_PCU_LOCK(sc);
- if (sc->sc_inreset_cnt) {
- ATH_PCU_UNLOCK(sc);
- DPRINTF(sc, ATH_DEBUG_RESET,
- "%s: called with sc_in_reset != 0\n",
- __func__);
- DPRINTF(sc, ATH_DEBUG_XMIT,
- "%s: queued: TXDP[%u] = %p (%p) depth %d\n",
- __func__, txq->axq_qnum,
- (caddr_t)bf->bf_daddr, bf->bf_desc,
- txq->axq_depth);
- /* XXX axq_link needs to be set and updated! */
- ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
- if (bf->bf_state.bfs_aggr)
- txq->axq_aggr_depth++;
- return;
- }
- ATH_PCU_UNLOCK(sc);
-#endif
ATH_TXQ_LOCK(txq);
@@ -1615,6 +1599,7 @@ ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
error = ath_tx_dmasetup(sc, bf, m0);
if (error != 0)
return error;
+ KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
bf->bf_node = ni; /* NB: held reference */
m0 = bf->bf_m; /* NB: may have changed */
wh = mtod(m0, struct ieee80211_frame *);
@@ -2106,6 +2091,7 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
int do_override;
uint8_t type, subtype;
int queue_to_head;
+ struct ath_node *an = ATH_NODE(ni);
ATH_TX_LOCK_ASSERT(sc);
@@ -2165,6 +2151,7 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
return error;
m0 = bf->bf_m; /* NB: may have changed */
wh = mtod(m0, struct ieee80211_frame *);
+ KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
bf->bf_node = ni; /* NB: held reference */
/* Always enable CLRDMASK for raw frames for now.. */
@@ -2183,12 +2170,24 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
rt = sc->sc_currates;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
+
+ /* Fetch first rate information */
rix = ath_tx_findrix(sc, params->ibp_rate0);
+ try0 = params->ibp_try0;
+
+ /*
+ * Override EAPOL rate as appropriate.
+ */
+ if (m0->m_flags & M_EAPOL) {
+ /* XXX? maybe always use long preamble? */
+ rix = an->an_mgmtrix;
+ try0 = ATH_TXMAXTRY; /* XXX?too many? */
+ }
+
txrate = rt->info[rix].rateCode;
if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
txrate |= rt->info[rix].shortPreamble;
sc->sc_txrix = rix;
- try0 = params->ibp_try0;
ismrr = (params->ibp_try1 != 0);
txantenna = params->ibp_pri >> 2;
if (txantenna == 0) /* XXX? */
@@ -2261,8 +2260,7 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
/* Blank the legacy rate array */
bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
- bf->bf_state.bfs_rc[0].rix =
- ath_tx_findrix(sc, params->ibp_rate0);
+ bf->bf_state.bfs_rc[0].rix = rix;
bf->bf_state.bfs_rc[0].tries = try0;
bf->bf_state.bfs_rc[0].ratecode = txrate;
@@ -2354,11 +2352,16 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
"%s: sc_inreset_cnt > 0; bailing\n", __func__);
error = EIO;
ATH_PCU_UNLOCK(sc);
- goto bad0;
+ goto badbad;
}
sc->sc_txstart_cnt++;
ATH_PCU_UNLOCK(sc);
+ /* Wake the hardware up already */
+ ATH_LOCK(sc);
+ ath_power_set_power_state(sc, HAL_PM_AWAKE);
+ ATH_UNLOCK(sc);
+
ATH_TX_LOCK(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
@@ -2437,7 +2440,14 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
+
+ /* Put the hardware back to sleep if required */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
return 0;
+
bad2:
ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
"bf=%p",
@@ -2447,14 +2457,20 @@ bad2:
ATH_TXBUF_LOCK(sc);
ath_returnbuf_head(sc, bf);
ATH_TXBUF_UNLOCK(sc);
-bad:
+bad:
ATH_TX_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
-bad0:
+
+ /* Put the hardware back to sleep if required */
+ ATH_LOCK(sc);
+ ath_power_restore_power_state(sc);
+ ATH_UNLOCK(sc);
+
+badbad:
ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
m, params);
ifp->if_oerrors++;
diff --git a/sys/dev/ath/if_ath_tx_edma.c b/sys/dev/ath/if_ath_tx_edma.c
index 3405d4a34315e..7d149203d3ed0 100644
--- a/sys/dev/ath/if_ath_tx_edma.c
+++ b/sys/dev/ath/if_ath_tx_edma.c
@@ -866,12 +866,14 @@ ath_xmit_setup_edma(struct ath_softc *sc)
(void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen);
(void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps);
- device_printf(sc->sc_dev, "TX descriptor length: %d\n",
- sc->sc_tx_desclen);
- device_printf(sc->sc_dev, "TX status length: %d\n",
- sc->sc_tx_statuslen);
- device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
- sc->sc_tx_nmaps);
+ if (bootverbose) {
+ device_printf(sc->sc_dev, "TX descriptor length: %d\n",
+ sc->sc_tx_desclen);
+ device_printf(sc->sc_dev, "TX status length: %d\n",
+ sc->sc_tx_statuslen);
+ device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
+ sc->sc_tx_nmaps);
+ }
sc->sc_tx.xmit_setup = ath_edma_dma_txsetup;
sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown;
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 6b074d6c95c50..d1ae089f49b8f 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -510,6 +510,7 @@ struct ath_rx_edma {
int m_fifo_tail;
int m_fifo_depth;
struct mbuf *m_rxpending;
+ struct ath_buf *m_holdbf;
};
struct ath_tx_edma_fifo {
@@ -621,7 +622,8 @@ struct ath_softc {
sc_resetcal : 1,/* reset cal state next trip */
sc_rxslink : 1,/* do self-linked final descriptor */
sc_rxtsf32 : 1,/* RX dec TSF is 32 bits */
- sc_isedma : 1;/* supports EDMA */
+ sc_isedma : 1,/* supports EDMA */
+ sc_do_mybeacon : 1; /* supports mybeacon */
/*
* Second set of flags.
@@ -864,6 +866,22 @@ struct ath_softc {
void (*sc_bar_response)(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap,
int status);
+
+ /*
+ * Powersave state tracking.
+ *
+ * target/cur powerstate is the chip power state.
+ * target selfgen state is the self-generated frames
+ * state. The chip can be awake but transmitted frames
+ * can have the PWRMGT bit set to 1 so the destination
+ * thinks the node is asleep.
+ */
+ HAL_POWER_MODE sc_target_powerstate;
+ HAL_POWER_MODE sc_target_selfgen_state;
+
+ HAL_POWER_MODE sc_cur_powerstate;
+
+ int sc_powersave_refcnt;
};
#define ATH_LOCK_INIT(_sc) \
@@ -1038,6 +1056,8 @@ void ath_intr(void *);
((*(_ah)->ah_updateTxTrigLevel)((_ah), (_inc)))
#define ath_hal_setpower(_ah, _mode) \
((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_TRUE))
+#define ath_hal_setselfgenpower(_ah, _mode) \
+ ((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_FALSE))
#define ath_hal_keycachesize(_ah) \
((*(_ah)->ah_getKeyCacheSize)((_ah)))
#define ath_hal_keyreset(_ah, _ix) \
@@ -1266,6 +1286,8 @@ void ath_intr(void *);
#define ath_hal_setintmit(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_INTMIT, \
HAL_CAP_INTMIT_ENABLE, _v, NULL)
+#define ath_hal_hasmybeacon(_ah) \
+ (ath_hal_getcapability(_ah, HAL_CAP_DO_MYBEACON, 1, NULL) == HAL_OK)
#define ath_hal_hasenforcetxop(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 0, NULL) == HAL_OK)
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index a93b8afad5678..1605d24c50622 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2006-2010 Broadcom Corporation
- * David Christensen <davidch@broadcom.com>. All rights reserved.
+ * Copyright (c) 2006-2014 QLogic Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -11,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -154,13 +150,13 @@ static const struct bce_type bce_devs[] = {
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
"HP NC371i Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5706 1000Base-T" },
+ "QLogic NetXtreme II BCM5706 1000Base-T" },
/* BCM5706S controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
"HP NC370F Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5706 1000Base-SX" },
+ "QLogic NetXtreme II BCM5706 1000Base-SX" },
/* BCM5708C controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
@@ -170,7 +166,7 @@ static const struct bce_type bce_devs[] = {
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
"HP NC374m PCIe Multifunction Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5708 1000Base-T" },
+ "QLogic NetXtreme II BCM5708 1000Base-T" },
/* BCM5708S controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
@@ -180,7 +176,7 @@ static const struct bce_type bce_devs[] = {
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
"HP NC373F PCIe Multifunc Giga Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+ "QLogic NetXtreme II BCM5708 1000Base-SX" },
/* BCM5709C controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
@@ -188,7 +184,7 @@ static const struct bce_type bce_devs[] = {
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5709 1000Base-T" },
+ "QLogic NetXtreme II BCM5709 1000Base-T" },
/* BCM5709S controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
@@ -196,11 +192,11 @@ static const struct bce_type bce_devs[] = {
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
"HP NC382i DP Multifunction Gigabit Server Adapter" },
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+ "QLogic NetXtreme II BCM5709 1000Base-SX" },
/* BCM5716 controllers and OEM boards. */
{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM5716 1000Base-T" },
+ "QLogic NetXtreme II BCM5716 1000Base-T" },
{ 0, 0, 0, 0, NULL }
};
diff --git a/sys/dev/bce/if_bcefw.h b/sys/dev/bce/if_bcefw.h
index 8d97b31c0e6b9..fa0d528a126bc 100644
--- a/sys/dev/bce/if_bcefw.h
+++ b/sys/dev/bce/if_bcefw.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2006-2011 Broadcom Corporation
- * David Christensen <davidch@broadcom.com>. All rights reserved.
+ * Copyright (c) 2006-2014 QLogic Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,9 +9,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -31,7 +27,7 @@
/*
* This file contains firmware data derived from proprietary unpublished
- * source code, Copyright (c) 2004-2011 Broadcom Corporation.
+ * source code, Copyright (c) 2004-2014 QLogic Corporation.
*
* Permission is hereby granted for the distribution of this firmware data
* in hexadecimal or equivalent format, provided this copyright notice also
diff --git a/sys/dev/bce/if_bcereg.h b/sys/dev/bce/if_bcereg.h
index 88c40a5f82188..16f99ccdf55ea 100644
--- a/sys/dev/bce/if_bcereg.h
+++ b/sys/dev/bce/if_bcereg.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2006-2010 Broadcom Corporation
- * David Christensen <davidch@broadcom.com>. All rights reserved.
+ * Copyright (c) 2006-2014 QLogic Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,9 +9,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/57710_init_values.c b/sys/dev/bxe/57710_init_values.c
index 90cf332b520c5..3122c336a50fc 100644
--- a/sys/dev/bxe/57710_init_values.c
+++ b/sys/dev/bxe/57710_init_values.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/57710_int_offsets.h b/sys/dev/bxe/57710_int_offsets.h
index 543417953294e..c4c0faca7de05 100644
--- a/sys/dev/bxe/57710_int_offsets.h
+++ b/sys/dev/bxe/57710_int_offsets.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/57711_init_values.c b/sys/dev/bxe/57711_init_values.c
index 20747192e1d3a..8b15d00b65542 100644
--- a/sys/dev/bxe/57711_init_values.c
+++ b/sys/dev/bxe/57711_init_values.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/57711_int_offsets.h b/sys/dev/bxe/57711_int_offsets.h
index 449b960e0d268..a3ebbf36da0d5 100644
--- a/sys/dev/bxe/57711_int_offsets.h
+++ b/sys/dev/bxe/57711_int_offsets.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/57712_init_values.c b/sys/dev/bxe/57712_init_values.c
index 3f98bf4923cdd..112a1122e9cba 100644
--- a/sys/dev/bxe/57712_init_values.c
+++ b/sys/dev/bxe/57712_init_values.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/57712_int_offsets.h b/sys/dev/bxe/57712_int_offsets.h
index 3f6e83b3b202b..437de0b21e9c0 100644
--- a/sys/dev/bxe/57712_int_offsets.h
+++ b/sys/dev/bxe/57712_int_offsets.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index eb812d7ef157f..b6583d9cdfa25 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -105,126 +98,126 @@ static struct bxe_device_type bxe_devs[] = {
BRCM_VENDORID,
CHIP_NUM_57710,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57710 10GbE"
+ "QLogic NetXtreme II BCM57710 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57711,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57711 10GbE"
+ "QLogic NetXtreme II BCM57711 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57711E,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57711E 10GbE"
+ "QLogic NetXtreme II BCM57711E 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57712,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57712 10GbE"
+ "QLogic NetXtreme II BCM57712 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57712_MF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57712 MF 10GbE"
+ "QLogic NetXtreme II BCM57712 MF 10GbE"
},
#if 0
{
BRCM_VENDORID,
CHIP_NUM_57712_VF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57712 VF 10GbE"
+ "QLogic NetXtreme II BCM57712 VF 10GbE"
},
#endif
{
BRCM_VENDORID,
CHIP_NUM_57800,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57800 10GbE"
+ "QLogic NetXtreme II BCM57800 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57800_MF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57800 MF 10GbE"
+ "QLogic NetXtreme II BCM57800 MF 10GbE"
},
#if 0
{
BRCM_VENDORID,
CHIP_NUM_57800_VF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57800 VF 10GbE"
+ "QLogic NetXtreme II BCM57800 VF 10GbE"
},
#endif
{
BRCM_VENDORID,
CHIP_NUM_57810,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57810 10GbE"
+ "QLogic NetXtreme II BCM57810 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57810_MF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57810 MF 10GbE"
+ "QLogic NetXtreme II BCM57810 MF 10GbE"
},
#if 0
{
BRCM_VENDORID,
CHIP_NUM_57810_VF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57810 VF 10GbE"
+ "QLogic NetXtreme II BCM57810 VF 10GbE"
},
#endif
{
BRCM_VENDORID,
CHIP_NUM_57811,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57811 10GbE"
+ "QLogic NetXtreme II BCM57811 10GbE"
},
{
BRCM_VENDORID,
CHIP_NUM_57811_MF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57811 MF 10GbE"
+ "QLogic NetXtreme II BCM57811 MF 10GbE"
},
#if 0
{
BRCM_VENDORID,
CHIP_NUM_57811_VF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57811 VF 10GbE"
+ "QLogic NetXtreme II BCM57811 VF 10GbE"
},
#endif
{
BRCM_VENDORID,
CHIP_NUM_57840_4_10,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57840 4x10GbE"
+ "QLogic NetXtreme II BCM57840 4x10GbE"
},
#if 0
{
BRCM_VENDORID,
CHIP_NUM_57840_2_20,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57840 2x20GbE"
+ "QLogic NetXtreme II BCM57840 2x20GbE"
},
#endif
{
BRCM_VENDORID,
CHIP_NUM_57840_MF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57840 MF 10GbE"
+ "QLogic NetXtreme II BCM57840 MF 10GbE"
},
#if 0
{
BRCM_VENDORID,
CHIP_NUM_57840_VF,
PCI_ANY_ID, PCI_ANY_ID,
- "Broadcom NetXtreme II BCM57840 VF 10GbE"
+ "QLogic NetXtreme II BCM57840 VF 10GbE"
},
#endif
{
diff --git a/sys/dev/bxe/bxe.h b/sys/dev/bxe/bxe.h
index 47a0b79e6ce66..a0709f630d355 100644
--- a/sys/dev/bxe/bxe.h
+++ b/sys/dev/bxe/bxe.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe_dcb.h b/sys/dev/bxe/bxe_dcb.h
index 01beb8e02172a..79515e0010630 100644
--- a/sys/dev/bxe/bxe_dcb.h
+++ b/sys/dev/bxe/bxe_dcb.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe_debug.c b/sys/dev/bxe/bxe_debug.c
index 328779f9c5a07..32db18aa3af24 100644
--- a/sys/dev/bxe/bxe_debug.c
+++ b/sys/dev/bxe/bxe_debug.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe_elink.c b/sys/dev/bxe/bxe_elink.c
index f4e280151ab9e..64f6852574dad 100644
--- a/sys/dev/bxe/bxe_elink.c
+++ b/sys/dev/bxe/bxe_elink.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe_elink.h b/sys/dev/bxe/bxe_elink.h
index dd5d406e18e06..a5be6a305dbf4 100644
--- a/sys/dev/bxe/bxe_elink.h
+++ b/sys/dev/bxe/bxe_elink.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe_stats.c b/sys/dev/bxe/bxe_stats.c
index 977f6cfb25919..cfecc22ce8a29 100644
--- a/sys/dev/bxe/bxe_stats.c
+++ b/sys/dev/bxe/bxe_stats.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/bxe_stats.h b/sys/dev/bxe/bxe_stats.h
index 09a8a17592f80..cb98201abb131 100644
--- a/sys/dev/bxe/bxe_stats.h
+++ b/sys/dev/bxe/bxe_stats.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_fw_defs.h b/sys/dev/bxe/ecore_fw_defs.h
index c585cbffffb0f..30754f723eec3 100644
--- a/sys/dev/bxe/ecore_fw_defs.h
+++ b/sys/dev/bxe/ecore_fw_defs.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_hsi.h b/sys/dev/bxe/ecore_hsi.h
index bfffcec345845..005bb2e4dbce2 100644
--- a/sys/dev/bxe/ecore_hsi.h
+++ b/sys/dev/bxe/ecore_hsi.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_init.h b/sys/dev/bxe/ecore_init.h
index 21242acd6d6c8..7eab8112453bf 100644
--- a/sys/dev/bxe/ecore_init.h
+++ b/sys/dev/bxe/ecore_init.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_init_ops.h b/sys/dev/bxe/ecore_init_ops.h
index ff373a8948530..5d8dee41d36d3 100644
--- a/sys/dev/bxe/ecore_init_ops.h
+++ b/sys/dev/bxe/ecore_init_ops.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_mfw_req.h b/sys/dev/bxe/ecore_mfw_req.h
index 136d0f5ae715c..26884f8450b55 100644
--- a/sys/dev/bxe/ecore_mfw_req.h
+++ b/sys/dev/bxe/ecore_mfw_req.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_reg.h b/sys/dev/bxe/ecore_reg.h
index 3c8de57a1b9a3..916df8f339814 100644
--- a/sys/dev/bxe/ecore_reg.h
+++ b/sys/dev/bxe/ecore_reg.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_sp.c b/sys/dev/bxe/ecore_sp.c
index c33e42ed9294a..b716085222de3 100644
--- a/sys/dev/bxe/ecore_sp.c
+++ b/sys/dev/bxe/ecore_sp.c
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/bxe/ecore_sp.h b/sys/dev/bxe/ecore_sp.h
index ccb83eb1fead8..c3d9c3a85399a 100644
--- a/sys/dev/bxe/ecore_sp.h
+++ b/sys/dev/bxe/ecore_sp.h
@@ -1,9 +1,5 @@
/*-
- * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
+ * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,9 +10,6 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
diff --git a/sys/dev/drm2/i915/i915_gem.c b/sys/dev/drm2/i915/i915_gem.c
index 9af50d8f32dd5..99821e49ae1a3 100644
--- a/sys/dev/drm2/i915/i915_gem.c
+++ b/sys/dev/drm2/i915/i915_gem.c
@@ -1431,6 +1431,7 @@ retry:
m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
offset);
if (m == NULL) {
+ VM_OBJECT_WUNLOCK(vm_obj);
cause = 60;
ret = -EFAULT;
goto unlock;
@@ -1450,7 +1451,6 @@ retry:
DRM_UNLOCK(dev);
VM_OBJECT_WUNLOCK(vm_obj);
VM_WAIT;
- VM_OBJECT_WLOCK(vm_obj);
goto retry;
}
m->valid = VM_PAGE_BITS_ALL;
diff --git a/sys/dev/drm2/radeon/radeon_drv.c b/sys/dev/drm2/radeon/radeon_drv.c
index 0cf96d2aac5e7..37b4e265ffc32 100644
--- a/sys/dev/drm2/radeon/radeon_drv.c
+++ b/sys/dev/drm2/radeon/radeon_drv.c
@@ -85,6 +85,10 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
+#ifdef COMPAT_FREEBSD32
+extern struct drm_ioctl_desc radeon_compat_ioctls[];
+extern int radeon_num_compat_ioctls;
+#endif
#ifdef DUMBBELL_WIP
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
#endif /* DUMBBELL_WIP */
@@ -466,6 +470,10 @@ radeon_attach(device_t kdev)
if (radeon_modeset == 1) {
kms_driver.driver_features |= DRIVER_MODESET;
kms_driver.max_ioctl = radeon_max_kms_ioctl;
+#ifdef COMPAT_FREEBSD32
+ kms_driver.compat_ioctls = radeon_compat_ioctls;
+ kms_driver.compat_ioctls_nr = &radeon_num_compat_ioctls;
+#endif
radeon_register_atpx_handler();
}
dev->driver = &kms_driver;
diff --git a/sys/dev/drm2/radeon/radeon_ioc32.c b/sys/dev/drm2/radeon/radeon_ioc32.c
index 361d48cb3f48c..ee691be95e2a8 100644
--- a/sys/dev/drm2/radeon/radeon_ioc32.c
+++ b/sys/dev/drm2/radeon/radeon_ioc32.c
@@ -31,10 +31,13 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <linux/compat.h>
+#include "opt_compat.h"
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
+#ifdef COMPAT_FREEBSD32
+
+#include <dev/drm2/drmP.h>
+#include <dev/drm2/drm.h>
+#include <dev/drm2/radeon/radeon_drm.h>
#include "radeon_drv.h"
typedef struct drm_radeon_init32 {
@@ -60,42 +63,37 @@ typedef struct drm_radeon_init32 {
u32 gart_textures_offset;
} drm_radeon_init32_t;
-static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_init(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_init32_t init32;
- drm_radeon_init_t __user *init;
-
- if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
- return -EFAULT;
-
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.is_pci, &init->is_pci)
- || __put_user(init32.cp_mode, &init->cp_mode)
- || __put_user(init32.gart_size, &init->gart_size)
- || __put_user(init32.ring_size, &init->ring_size)
- || __put_user(init32.usec_timeout, &init->usec_timeout)
- || __put_user(init32.fb_bpp, &init->fb_bpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_bpp, &init->depth_bpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.ring_offset, &init->ring_offset)
- || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset)
- || __put_user(init32.gart_textures_offset,
- &init->gart_textures_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+ drm_radeon_init32_t *init32;
+ drm_radeon_init_t __user init;
+
+ init32 = arg;
+
+ init.func = init32->func;
+ init.sarea_priv_offset = (unsigned long)init32->sarea_priv_offset;
+ init.is_pci = init32->is_pci;
+ init.cp_mode = init32->cp_mode;
+ init.gart_size = init32->gart_size;
+ init.ring_size = init32->ring_size;
+ init.usec_timeout = init32->usec_timeout;
+ init.fb_bpp = init32->fb_bpp;
+ init.front_offset = init32->front_offset;
+ init.front_pitch = init32->front_pitch;
+ init.back_offset = init32->back_offset;
+ init.back_pitch = init32->back_pitch;
+ init.depth_bpp = init32->depth_bpp;
+ init.depth_offset = init32->depth_offset;
+ init.depth_pitch = init32->depth_pitch;
+ init.fb_offset = (unsigned long)init32->fb_offset;
+ init.mmio_offset = (unsigned long)init32->mmio_offset;
+ init.ring_offset = (unsigned long)init32->ring_offset;
+ init.ring_rptr_offset = (unsigned long)init32->ring_rptr_offset;
+ init.buffers_offset = (unsigned long)init32->buffers_offset;
+ init.gart_textures_offset = (unsigned long)init32->gart_textures_offset;
+
+ return radeon_cp_init(dev, &init, file_priv);
}
typedef struct drm_radeon_clear32 {
@@ -107,50 +105,37 @@ typedef struct drm_radeon_clear32 {
u32 depth_boxes;
} drm_radeon_clear32_t;
-static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_clear(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_clear32_t clr32;
- drm_radeon_clear_t __user *clr;
-
- if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
- return -EFAULT;
-
- clr = compat_alloc_user_space(sizeof(*clr));
- if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
- || __put_user(clr32.flags, &clr->flags)
- || __put_user(clr32.clear_color, &clr->clear_color)
- || __put_user(clr32.clear_depth, &clr->clear_depth)
- || __put_user(clr32.color_mask, &clr->color_mask)
- || __put_user(clr32.depth_mask, &clr->depth_mask)
- || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
- &clr->depth_boxes))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+ drm_radeon_clear32_t *clr32;
+ drm_radeon_clear_t __user clr;
+
+ clr32 = arg;
+
+ clr.flags = clr32->flags;
+ clr.clear_color = clr32->clear_color;
+ clr.clear_depth = clr32->clear_depth;
+ clr.color_mask = clr32->color_mask;
+ clr.depth_mask = clr32->depth_mask;
+ clr.depth_boxes = (drm_radeon_clear_rect_t *)(unsigned long)clr32->depth_boxes;
+
+ return radeon_ioctls[DRM_IOCTL_RADEON_CLEAR].func(dev, &clr, file_priv);
}
typedef struct drm_radeon_stipple32 {
u32 mask;
} drm_radeon_stipple32_t;
-static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_stipple(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
drm_radeon_stipple32_t __user *argp = (void __user *)arg;
- drm_radeon_stipple_t __user *request;
- u32 mask;
-
- if (get_user(mask, &argp->mask))
- return -EFAULT;
+ drm_radeon_stipple_t __user request;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((unsigned int __user *)(unsigned long)mask,
- &request->mask))
- return -EFAULT;
+ request.mask = (unsigned int *)(unsigned long)argp->mask;
- return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+ return radeon_ioctls[DRM_IOCTL_RADEON_STIPPLE].func(dev, &request, file_priv);
}
typedef struct drm_radeon_tex_image32 {
@@ -168,43 +153,32 @@ typedef struct drm_radeon_texture32 {
u32 image;
} drm_radeon_texture32_t;
-static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_texture(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_texture32_t req32;
- drm_radeon_texture_t __user *request;
- drm_radeon_tex_image32_t img32;
- drm_radeon_tex_image_t __user *image;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
- if (req32.image == 0)
+ drm_radeon_texture32_t *req32;
+ drm_radeon_texture_t __user request;
+ drm_radeon_tex_image32_t *img32;
+ drm_radeon_tex_image_t __user image;
+
+ req32 = arg;
+ if (req32->image == 0)
return -EINVAL;
- if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
- sizeof(img32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
- if (!access_ok(VERIFY_WRITE, request,
- sizeof(*request) + sizeof(*image)))
- return -EFAULT;
- image = (drm_radeon_tex_image_t __user *) (request + 1);
-
- if (__put_user(req32.offset, &request->offset)
- || __put_user(req32.pitch, &request->pitch)
- || __put_user(req32.format, &request->format)
- || __put_user(req32.width, &request->width)
- || __put_user(req32.height, &request->height)
- || __put_user(image, &request->image)
- || __put_user(img32.x, &image->x)
- || __put_user(img32.y, &image->y)
- || __put_user(img32.width, &image->width)
- || __put_user(img32.height, &image->height)
- || __put_user((const void __user *)(unsigned long)img32.data,
- &image->data))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+ img32 = (drm_radeon_tex_image32_t *)(unsigned long)req32->image;
+
+ request.offset = req32->offset;
+ request.pitch = req32->pitch;
+ request.format = req32->format;
+ request.width = req32->width;
+ request.height = req32->height;
+ request.image = &image;
+ image.x = img32->x;
+ image.y = img32->y;
+ image.width = img32->width;
+ image.height = img32->height;
+ image.data = (void *)(unsigned long)img32->data;
+
+ return radeon_ioctls[DRM_IOCTL_RADEON_TEXTURE].func(dev, &request, file_priv);
}
typedef struct drm_radeon_vertex2_32 {
@@ -216,28 +190,22 @@ typedef struct drm_radeon_vertex2_32 {
u32 prim;
} drm_radeon_vertex2_32_t;
-static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_vertex2(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_vertex2_32_t req32;
- drm_radeon_vertex2_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.idx, &request->idx)
- || __put_user(req32.discard, &request->discard)
- || __put_user(req32.nr_states, &request->nr_states)
- || __put_user((void __user *)(unsigned long)req32.state,
- &request->state)
- || __put_user(req32.nr_prims, &request->nr_prims)
- || __put_user((void __user *)(unsigned long)req32.prim,
- &request->prim))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+ drm_radeon_vertex2_32_t *req32;
+ drm_radeon_vertex2_t __user request;
+
+ req32 = arg;
+
+ request.idx = req32->idx;
+ request.discard = req32->discard;
+ request.nr_states = req32->nr_states;
+ request.state = (drm_radeon_state_t *)(unsigned long)req32->state;
+ request.nr_prims = req32->nr_prims;
+ request.prim = (drm_radeon_prim_t *)(unsigned long)req32->prim;
+
+ return radeon_ioctls[DRM_IOCTL_RADEON_VERTEX2].func(dev, &request, file_priv);
}
typedef struct drm_radeon_cmd_buffer32 {
@@ -247,26 +215,20 @@ typedef struct drm_radeon_cmd_buffer32 {
u32 boxes;
} drm_radeon_cmd_buffer32_t;
-static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_cmdbuf(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_cmd_buffer32_t req32;
- drm_radeon_cmd_buffer_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.bufsz, &request->bufsz)
- || __put_user((void __user *)(unsigned long)req32.buf,
- &request->buf)
- || __put_user(req32.nbox, &request->nbox)
- || __put_user((void __user *)(unsigned long)req32.boxes,
- &request->boxes))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+ drm_radeon_cmd_buffer32_t *req32;
+ drm_radeon_cmd_buffer_t __user request;
+
+ req32 = arg;
+
+ request.bufsz = req32->bufsz;
+ request.buf = (char *)(unsigned long)req32->buf;
+ request.nbox = req32->nbox;
+ request.boxes = (struct drm_clip_rect *)(unsigned long)req32->boxes;
+
+ return radeon_ioctls[DRM_IOCTL_RADEON_CMDBUF].func(dev, &request, file_priv);
}
typedef struct drm_radeon_getparam32 {
@@ -274,23 +236,18 @@ typedef struct drm_radeon_getparam32 {
u32 value;
} drm_radeon_getparam32_t;
-static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_getparam(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_getparam32_t req32;
- drm_radeon_getparam_t __user *request;
+ drm_radeon_getparam32_t *req32;
+ drm_radeon_getparam_t __user request;
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
+ req32 = arg;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
+ request.param = req32->param;
+ request.value = (void *)(unsigned long)req32->value;
- return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+ return radeon_ioctls[DRM_IOCTL_RADEON_GETPARAM].func(dev, &request, file_priv);
}
typedef struct drm_radeon_mem_alloc32 {
@@ -300,129 +257,71 @@ typedef struct drm_radeon_mem_alloc32 {
u32 region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc32_t;
-static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_mem_alloc(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_mem_alloc32_t req32;
- drm_radeon_mem_alloc_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.region, &request->region)
- || __put_user(req32.alignment, &request->alignment)
- || __put_user(req32.size, &request->size)
- || __put_user((int __user *)(unsigned long)req32.region_offset,
- &request->region_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+ drm_radeon_mem_alloc32_t *req32;
+ drm_radeon_mem_alloc_t __user request;
+
+ req32 = arg;
+
+ request.region = req32->region;
+ request.alignment = req32->alignment;
+ request.size = req32->size;
+ request.region_offset = (int *)(unsigned long)req32->region_offset;
+
+ return radeon_mem_alloc(dev, &request, file_priv);
}
typedef struct drm_radeon_irq_emit32 {
u32 irq_seq;
} drm_radeon_irq_emit32_t;
-static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_irq_emit(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_irq_emit32_t req32;
- drm_radeon_irq_emit_t __user *request;
+ drm_radeon_irq_emit32_t *req32;
+ drm_radeon_irq_emit_t __user request;
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
+ req32 = arg;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((int __user *)(unsigned long)req32.irq_seq,
- &request->irq_seq))
- return -EFAULT;
+ request.irq_seq = (int *)(unsigned long)req32->irq_seq;
- return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+ return radeon_irq_emit(dev, &request, file_priv);
}
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
-#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
typedef struct drm_radeon_setparam32 {
int param;
u64 value;
} __attribute__((packed)) drm_radeon_setparam32_t;
-static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int compat_radeon_cp_setparam(struct drm_device *dev, void *arg,
+ struct drm_file *file_priv)
{
- drm_radeon_setparam32_t req32;
- drm_radeon_setparam_t __user *request;
+ drm_radeon_setparam32_t *req32;
+ drm_radeon_setparam_t __user request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
+ req32 = arg;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
+ request.param = req32->param;
+ request.value = req32->value;
- return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+ return radeon_ioctls[DRM_IOCTL_RADEON_SETPARAM].func(dev, &request, file_priv);
}
-#else
-#define compat_radeon_cp_setparam NULL
-#endif /* X86_64 || IA64 */
-
-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
- [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
- [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
- [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
- [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
- [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
- [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
- [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
- [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
- [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
- [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
-};
-
-/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
- *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- */
-long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
-
-long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- ret = drm_ioctl(filp, cmd, arg);
+struct drm_ioctl_desc radeon_compat_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, compat_radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, compat_radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, compat_radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, compat_radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, compat_radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, compat_radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, compat_radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, compat_radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, compat_radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, compat_radeon_irq_emit, DRM_AUTH)
+};
+int radeon_num_compat_ioctls = DRM_ARRAY_SIZE(radeon_compat_ioctls);
- return ret;
-}
+#endif
diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m
index 78383d3d46d85..4d6dfd17a5d75 100644
--- a/sys/dev/gpio/gpio_if.m
+++ b/sys/dev/gpio/gpio_if.m
@@ -31,6 +31,32 @@
INTERFACE gpio;
+CODE {
+ static gpio_map_gpios_t gpio_default_map_gpios;
+
+ int
+ gpio_default_map_gpios(device_t bus, phandle_t dev,
+ phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin,
+ uint32_t *flags)
+ {
+ /* Propagate up the bus hierarchy until someone handles it. */
+ if (device_get_parent(bus) != NULL)
+ return (GPIO_MAP_GPIOS(device_get_parent(bus), dev,
+ gparent, gcells, gpios, pin, flags));
+
+ /* If that fails, then assume the FreeBSD defaults. */
+ *pin = gpios[0];
+ if (gcells == 2 || gcells == 3)
+ *flags = gpios[gcells - 1];
+
+ return (0);
+ }
+};
+
+HEADER {
+ #include <dev/ofw/openfirm.h>
+};
+
#
# Get total number of pins
#
@@ -100,3 +126,16 @@ METHOD int pin_setflags {
uint32_t pin_num;
uint32_t flags;
};
+
+#
+# Allow the GPIO controller to map the gpio-specifier on its own.
+#
+METHOD int map_gpios {
+ device_t bus;
+ phandle_t dev;
+ phandle_t gparent;
+ int gcells;
+ pcell_t *gpios;
+ uint32_t *pin;
+ uint32_t *flags;
+} DEFAULT gpio_default_map_gpios;
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index d8eacc62e85e8..e09393c325377 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -29,21 +29,13 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
-#include <sys/kernel.h>
-#include <sys/queue.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-#include <sys/bus.h>
-#include <machine/bus.h>
-#include <sys/rman.h>
-#include <machine/resource.h>
-
-#include <sys/gpio.h>
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
+
#include "gpiobus_if.h"
static int gpiobus_parse_pins(struct gpiobus_softc *, device_t, int);
diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h
index 3d387107762ed..e2ee51bd6f0c1 100644
--- a/sys/dev/gpio/gpiobusvar.h
+++ b/sys/dev/gpio/gpiobusvar.h
@@ -32,7 +32,6 @@
#include "opt_platform.h"
-#include <sys/param.h>
#include <sys/lock.h>
#include <sys/mutex.h>
@@ -40,6 +39,8 @@
#include <dev/ofw/ofw_bus_subr.h>
#endif
+#include "gpio_if.h"
+
#define GPIOBUS_IVAR(d) (struct gpiobus_ivar *) device_get_ivars(d)
#define GPIOBUS_SOFTC(d) (struct gpiobus_softc *) device_get_softc(d)
#define GPIOBUS_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
@@ -73,6 +74,13 @@ struct ofw_gpiobus_devinfo {
struct ofw_bus_devinfo opd_obdinfo;
};
+static __inline int
+gpio_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells,
+ pcell_t *gpios, uint32_t *pin, uint32_t *flags)
+{
+ return (GPIO_MAP_GPIOS(bus, dev, gparent, gcells, gpios, pin, flags));
+}
+
device_t ofw_gpiobus_add_fdt_child(device_t, phandle_t);
#endif
void gpiobus_print_pins(struct gpiobus_ivar *);
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index f4eb37df96ae5..6e182926695ba 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -30,22 +30,14 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/bus.h>
-#include <sys/gpio.h>
#include <sys/kernel.h>
-#include <sys/libkern.h>
-#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/module.h>
-#include <sys/mutex.h>
#include <dev/gpio/gpiobusvar.h>
#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/openfirm.h>
-
-#include <machine/resource.h>
-
-#include "gpio_if.h"
-#include "gpiobus_if.h"
static int ofw_gpiobus_parse_gpios(struct gpiobus_softc *,
struct gpiobus_ivar *, phandle_t);
@@ -186,7 +178,7 @@ ofw_gpiobus_parse_gpios(struct gpiobus_softc *sc, struct gpiobus_ivar *dinfo,
}
/* Get the GPIO pin number and flags. */
- if (ofw_bus_map_gpios(sc->sc_dev, child, gpio, cells,
+ if (gpio_map_gpios(sc->sc_dev, child, gpio, cells,
&gpios[i + 1], &dinfo->pins[j], &dinfo->flags[j]) != 0) {
ofw_gpiobus_free_ivars(dinfo);
free(gpios, M_DEVBUF);
diff --git a/sys/dev/lindev/full.c b/sys/dev/lindev/full.c
deleted file mode 100644
index 294094c075d3e..0000000000000
--- a/sys/dev/lindev/full.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*-
- * Copyright (c) 2009 Ed Schouten <ed@FreeBSD.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/systm.h>
-#include <sys/uio.h>
-
-#include <dev/lindev/lindev.h>
-
-static struct cdev *full_dev;
-
-static d_read_t full_read;
-static d_write_t full_write;
-
-static struct cdevsw full_cdevsw = {
- .d_version = D_VERSION,
- .d_read = full_read,
- .d_write = full_write,
- .d_name = "full",
-};
-
-static void *zbuf;
-
-/* ARGSUSED */
-static int
-full_read(struct cdev *dev __unused, struct uio *uio, int flags __unused)
-{
- int error = 0;
-
- while (uio->uio_resid > 0 && error == 0)
- error = uiomove(zbuf, MIN(uio->uio_resid, PAGE_SIZE), uio);
-
- return (error);
-}
-
-/* ARGSUSED */
-static int
-full_write(struct cdev *dev __unused, struct uio *uio __unused,
- int flags __unused)
-{
-
- return (ENOSPC);
-}
-
-/* ARGSUSED */
-int
-lindev_modevent_full(module_t mod __unused, int type, void *data __unused)
-{
-
- switch(type) {
- case MOD_LOAD:
- zbuf = (void *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK | M_ZERO);
- full_dev = make_dev(&full_cdevsw, 0, UID_ROOT, GID_WHEEL,
- 0666, "full");
- if (bootverbose)
- printf("full: <full device>\n");
- break;
-
- case MOD_UNLOAD:
- destroy_dev(full_dev);
- free(zbuf, M_TEMP);
- break;
-
- case MOD_SHUTDOWN:
- break;
-
- default:
- return (EOPNOTSUPP);
- }
-
- return (0);
-}
-
diff --git a/sys/dev/mpr/mpi/mpi2.h b/sys/dev/mpr/mpi/mpi2.h
new file mode 100644
index 0000000000000..10f17aba067ec
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2.h
@@ -0,0 +1,1257 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2.h
+ * Title: MPI Message independent structures and definitions
+ * including System Interface Register Set and
+ * scatter/gather formats.
+ * Creation Date: June 21, 2006
+ *
+ * mpi2.h Version: 02.00.33
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_H
+#define MPI2_H
+
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+
+/* major version for all MPI v2.x */
+#define MPI2_VERSION_MAJOR (0x02)
+
+/* minor version for MPI v2.0 compatible products */
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_VERSION_02_00 (0x0200)
+
+
+/* minor version for MPI v2.5 compatible products */
+#define MPI25_VERSION_MINOR (0x05)
+#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI25_VERSION_MINOR)
+#define MPI2_VERSION_02_05 (0x0205)
+
+
+/* Unit and Dev versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x21)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
+
+
+/*****************************************************************************
+*
+* IOC State Definitions
+*
+*****************************************************************************/
+
+#define MPI2_IOC_STATE_RESET (0x00000000)
+#define MPI2_IOC_STATE_READY (0x10000000)
+#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI2_IOC_STATE_FAULT (0x40000000)
+
+#define MPI2_IOC_STATE_MASK (0xF0000000)
+#define MPI2_IOC_STATE_SHIFT (28)
+
+/* Fault state range for prodcut specific codes */
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
+
+
+/*****************************************************************************
+*
+* System Interface Register Definitions
+*
+*****************************************************************************/
+
+typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
+{
+ U32 Doorbell; /* 0x00 */
+ U32 WriteSequence; /* 0x04 */
+ U32 HostDiagnostic; /* 0x08 */
+ U32 Reserved1; /* 0x0C */
+ U32 DiagRWData; /* 0x10 */
+ U32 DiagRWAddressLow; /* 0x14 */
+ U32 DiagRWAddressHigh; /* 0x18 */
+ U32 Reserved2[5]; /* 0x1C */
+ U32 HostInterruptStatus; /* 0x30 */
+ U32 HostInterruptMask; /* 0x34 */
+ U32 DCRData; /* 0x38 */
+ U32 DCRAddress; /* 0x3C */
+ U32 Reserved3[2]; /* 0x40 */
+ U32 ReplyFreeHostIndex; /* 0x48 */
+ U32 Reserved4[8]; /* 0x4C */
+ U32 ReplyPostHostIndex; /* 0x6C */
+ U32 Reserved5; /* 0x70 */
+ U32 HCBSize; /* 0x74 */
+ U32 HCBAddressLow; /* 0x78 */
+ U32 HCBAddressHigh; /* 0x7C */
+ U32 Reserved6[16]; /* 0x80 */
+ U32 RequestDescriptorPostLow; /* 0xC0 */
+ U32 RequestDescriptorPostHigh; /* 0xC4 */
+ U32 Reserved7[14]; /* 0xC8 */
+} MPI2_SYSTEM_INTERFACE_REGS, MPI2_POINTER PTR_MPI2_SYSTEM_INTERFACE_REGS,
+ Mpi2SystemInterfaceRegs_t, MPI2_POINTER pMpi2SystemInterfaceRegs_t;
+
+/*
+ * Defines for working with the Doorbell register.
+ */
+#define MPI2_DOORBELL_OFFSET (0x00000000)
+
+/* IOC --> System values */
+#define MPI2_DOORBELL_USED (0x08000000)
+#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
+
+/* System --> IOC values */
+#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
+
+
+/*
+ * Defines for the WriteSequence register
+ */
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+/*
+ * Defines for the HostDiagnostic register
+ */
+#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+
+#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
+#define MPI2_DIAG_HCB_MODE (0x00000100)
+#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
+#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI2_DIAG_RESET_HISTORY (0x00000020)
+#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
+#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
+
+/*
+ * Offsets for DiagRWData and address
+ */
+#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
+#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
+#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
+
+/*
+ * Defines for the HostInterruptStatus register
+ */
+#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
+#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
+#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
+#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
+#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
+#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
+
+/*
+ * Defines for the HostInterruptMask register
+ */
+#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
+#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
+#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
+#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
+#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
+
+/*
+ * Offsets for DCRData and address
+ */
+#define MPI2_DCR_DATA_OFFSET (0x00000038)
+#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
+
+/*
+ * Offset for the Reply Free Queue
+ */
+#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
+
+/*
+ * Defines for the Reply Descriptor Post Queue
+ */
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */
+
+
+/*
+ * Defines for the HCBSize and address
+ */
+#define MPI2_HCB_SIZE_OFFSET (0x00000074)
+#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
+#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
+
+#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
+#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
+
+/*
+ * Offsets for the Request Queue
+ */
+#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
+#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+
+
+/* Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
+/*****************************************************************************
+*
+* Message Descriptors
+*
+*****************************************************************************/
+
+/* Request Descriptors */
+
+/* Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 DescriptorTypeDependent; /* 0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
+
+/* defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+
+/* High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
+
+
+/* SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 DevHandle; /* 0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
+
+
+/* SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 IoIndex; /* 0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
+
+
+/* RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR
+{
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 LMID; /* 0x04 */
+ U16 Reserved; /* 0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+
+/* Fast Path SCSI IO Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi25FastPathSCSIIORequestDescriptor_t,
+ MPI2_POINTER pMpi25FastPathSCSIIORequestDescriptor_t;
+
+
+/* union of Request Descriptors */
+typedef union _MPI2_REQUEST_DESCRIPTOR_UNION
+{
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ U64 Words;
+} MPI2_REQUEST_DESCRIPTOR_UNION, MPI2_POINTER PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
+ Mpi2RequestDescriptorUnion_t, MPI2_POINTER pMpi2RequestDescriptorUnion_t;
+
+
+/* Reply Descriptors */
+
+/* Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 DescriptorTypeDependent1; /* 0x02 */
+ U32 DescriptorTypeDependent2; /* 0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
+
+/* defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/* values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+/* Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U32 ReplyFrameAddress; /* 0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
+
+#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
+
+
+/* SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U16 TaskTag; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+
+/* TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U8 SequenceNumber; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 IoIndex; /* 0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+
+/* Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U8 VP_ID; /* 0x02 */
+ U8 Flags; /* 0x03 */
+ U16 InitiatorDevHandle; /* 0x04 */
+ U16 IoIndex; /* 0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/* defines for Flags field */
+#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
+
+
+/* RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+{
+ U8 ReplyFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+ U32 Reserved; /* 0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+
+/* Fast Path SCSI IO Success Reply Descriptor */
+typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+
+
+/* union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION
+{
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ U64 Words;
+} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+
+
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) /* SCSI Task Management */
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_FUNCTION_IOC_FACTS (0x03) /* IOC Facts */
+#define MPI2_FUNCTION_CONFIG (0x04) /* Configuration */
+#define MPI2_FUNCTION_PORT_FACTS (0x05) /* Port Facts */
+#define MPI2_FUNCTION_PORT_ENABLE (0x06) /* Port Enable */
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) /* Event Notification */
+#define MPI2_FUNCTION_EVENT_ACK (0x08) /* Event Acknowledge */
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) /* FW Download */
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) /* Target Assist */
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) /* Target Status Send */
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) /* Target Mode Abort */
+#define MPI2_FUNCTION_FW_UPLOAD (0x12) /* FW Upload */
+#define MPI2_FUNCTION_RAID_ACTION (0x15) /* RAID Action */
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) /* SCSI IO RAID Passthrough */
+#define MPI2_FUNCTION_TOOLBOX (0x17) /* Toolbox */
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) /* SCSI Enclosure Processor */
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) /* SMP Passthrough */
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) /* SAS IO Unit Control */
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) /* SATA Passthrough */
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) /* Diagnostic Buffer Post */
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) /* Diagnostic Release */
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator */
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) /* Host Based Discovery Action */
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) /* Power Management Control */
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) /* Send Host Message */
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) /* beginning of product-specific range */
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* end of product-specific range */
+
+
+
+/* Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+
+/*****************************************************************************
+*
+* IOC Status Values
+*
+*****************************************************************************/
+
+/* mask for IOCStatus status value */
+#define MPI2_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SUCCESS (0x0000)
+#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI2_IOCSTATUS_BUSY (0x0002)
+#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
+#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+/****************************************************************************
+* RAID Accelerator values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
+
+/****************************************************************************
+* IOCStatus flag to indicate that log info is available
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+
+/****************************************************************************
+* IOCLogInfo Types
+****************************************************************************/
+
+#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+
+/*****************************************************************************
+*
+* Standard Message Structures
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Request Message Header for all request messages
+****************************************************************************/
+
+typedef struct _MPI2_REQUEST_HEADER
+{
+ U16 FunctionDependent1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 FunctionDependent2; /* 0x04 */
+ U8 FunctionDependent3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+} MPI2_REQUEST_HEADER, MPI2_POINTER PTR_MPI2_REQUEST_HEADER,
+ MPI2RequestHeader_t, MPI2_POINTER pMPI2RequestHeader_t;
+
+
+/****************************************************************************
+* Default Reply
+****************************************************************************/
+
+typedef struct _MPI2_DEFAULT_REPLY
+{
+ U16 FunctionDependent1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 FunctionDependent2; /* 0x04 */
+ U8 FunctionDependent3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U16 FunctionDependent5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_DEFAULT_REPLY, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY,
+ MPI2DefaultReply_t, MPI2_POINTER pMPI2DefaultReply_t;
+
+
+/* common version structure/union used in messages and configuration pages */
+
+typedef struct _MPI2_VERSION_STRUCT
+{
+ U8 Dev; /* 0x00 */
+ U8 Unit; /* 0x01 */
+ U8 Minor; /* 0x02 */
+ U8 Major; /* 0x03 */
+} MPI2_VERSION_STRUCT;
+
+typedef union _MPI2_VERSION_UNION
+{
+ MPI2_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI2_VERSION_UNION;
+
+
+/* LUN field defines, common to many structures */
+#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* MPI Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_SIMPLE32
+{
+ U32 FlagsLength;
+ U32 Address;
+} MPI2_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_SGE_SIMPLE32,
+ Mpi2SGESimple32_t, MPI2_POINTER pMpi2SGESimple32_t;
+
+typedef struct _MPI2_SGE_SIMPLE64
+{
+ U32 FlagsLength;
+ U64 Address;
+} MPI2_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_SGE_SIMPLE64,
+ Mpi2SGESimple64_t, MPI2_POINTER pMpi2SGESimple64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION
+{
+ U32 FlagsLength;
+ union
+ {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
+
+
+/****************************************************************************
+* MPI Chain Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_CHAIN32
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} MPI2_SGE_CHAIN32, MPI2_POINTER PTR_MPI2_SGE_CHAIN32,
+ Mpi2SGEChain32_t, MPI2_POINTER pMpi2SGEChain32_t;
+
+typedef struct _MPI2_SGE_CHAIN64
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} MPI2_SGE_CHAIN64, MPI2_POINTER PTR_MPI2_SGE_CHAIN64,
+ Mpi2SGEChain64_t, MPI2_POINTER pMpi2SGEChain64_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union
+ {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
+
+
+/****************************************************************************
+* MPI Transaction Context Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANSACTION32
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION32, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION32,
+ Mpi2SGETransaction32_t, MPI2_POINTER pMpi2SGETransaction32_t;
+
+typedef struct _MPI2_SGE_TRANSACTION64
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION64, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION64,
+ Mpi2SGETransaction64_t, MPI2_POINTER pMpi2SGETransaction64_t;
+
+typedef struct _MPI2_SGE_TRANSACTION96
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION96, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION96,
+ Mpi2SGETransaction96_t, MPI2_POINTER pMpi2SGETransaction96_t;
+
+typedef struct _MPI2_SGE_TRANSACTION128
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION128, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION128,
+ Mpi2SGETransaction_t128, MPI2_POINTER pMpi2SGETransaction_t128;
+
+typedef struct _MPI2_SGE_TRANSACTION_UNION
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union
+ {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ } u;
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANSACTION_UNION,
+ Mpi2SGETransactionUnion_t, MPI2_POINTER pMpi2SGETransactionUnion_t;
+
+
+/****************************************************************************
+* MPI SGE union for IO SGL's - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_IO_UNION
+{
+ union
+ {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_MPI_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_IO_UNION,
+ Mpi2MpiSGEIOUnion_t, MPI2_POINTER pMpi2MpiSGEIOUnion_t;
+
+
+/****************************************************************************
+* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION
+{
+ union
+ {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_SGE_TRANS_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
+ Mpi2SGETransSimpleUnion_t, MPI2_POINTER pMpi2SGETransSimpleUnion_t;
+
+
+/****************************************************************************
+* All MPI SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_UNION
+{
+ union
+ {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_MPI_SGE_UNION, MPI2_POINTER PTR_MPI2_MPI_SGE_UNION,
+ Mpi2MpiSgeUnion_t, MPI2_POINTER pMpi2MpiSgeUnion_t;
+
+
+/****************************************************************************
+* MPI SGE field definition and masks
+****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI2_SGE_FLAGS_DIRECTION (0x04)
+#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI2_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/* Element Type */
+
+#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) /* for MPI v2.0 products only */
+#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) /* for MPI v2.0 products only */
+#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/* Address location */
+
+#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/* Direction */
+
+#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
+/* Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/* Context Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
+
+/****************************************************************************
+* MPI SGE operation Macros
+****************************************************************************/
+
+/* SIMPLE FlagsLength manipulations... */
+#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
+#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI2_SGE_SET_FLAGS_LENGTH(f,l) (MPI2_SGE_SET_FLAGS(f) | MPI2_SGE_LENGTH(l))
+
+#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_SGE_SET_FLAGS_LENGTH(f,l)
+
+/* CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_SGE_SET_FLAGS(f)
+#define MPI2_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_SGE_LENGTH(l)
+
+#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> MPI2_SGE_CHAIN_OFFSET_SHIFT)
+
+
+/*****************************************************************************
+*
+* Fusion-MPT IEEE Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IEEE Simple Element structures
+****************************************************************************/
+
+/* MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_SIMPLE32
+{
+ U32 Address;
+ U32 FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE64
+{
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 Reserved2;
+ U8 Flags;
+} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
+{
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
+
+
+/****************************************************************************
+* IEEE Chain Element structures
+****************************************************************************/
+
+/* MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+
+/* MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION
+{
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
+
+/* MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+typedef struct _MPI25_IEEE_SGE_CHAIN64
+{
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 NextChainOffset;
+ U8 Flags;
+} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t;
+
+
+/****************************************************************************
+* All IEEE SGE types union
+****************************************************************************/
+
+/* MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_UNION
+{
+ union
+ {
+ MPI2_IEEE_SGE_SIMPLE_UNION Simple;
+ MPI2_IEEE_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_IEEE_SGE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_UNION,
+ Mpi2IeeeSgeUnion_t, MPI2_POINTER pMpi2IeeeSgeUnion_t;
+
+
+/****************************************************************************
+* IEEE SGE union for IO SGL's
+****************************************************************************/
+
+typedef union _MPI25_SGE_IO_UNION
+{
+ MPI2_IEEE_SGE_SIMPLE64 IeeeSimple;
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain;
+} MPI25_SGE_IO_UNION, MPI2_POINTER PTR_MPI25_SGE_IO_UNION,
+ Mpi25SGEIOUnion_t, MPI2_POINTER pMpi25SGEIOUnion_t;
+
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/* Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/* Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5, use in IEEE Simple or Chain element */
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) /* use in IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) /* for MPI v2.0, use in IEEE Simple Element only; for MPI v2.5, use in IEEE Simple or Chain element */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) /* use in MPI v2.0 IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
+
+/****************************************************************************
+* IEEE SGE operation Macros
+****************************************************************************/
+
+/* SIMPLE FlagsLength manipulations... */
+#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
+
+#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) | MPI2_IEEE32_SGE_LENGTH(l))
+
+#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f,l)
+
+/* CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_IEEE32_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI2_IEEE32_SGE_SET_FLAGS(f)
+#define MPI2_IEEE32_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI2_IEEE32_SGE_LENGTH(l)
+
+
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI/IEEE Scatter Gather Unions
+*
+*****************************************************************************/
+
+typedef union _MPI2_SIMPLE_SGE_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION, MPI2_POINTER PTR_MPI2_SIMPLE_SGE_UNION,
+ Mpi2SimpleSgeUntion_t, MPI2_POINTER pMpi2SimpleSgeUntion_t;
+
+
+typedef union _MPI2_SGE_IO_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
+
+
+/****************************************************************************
+*
+* Values for SGLFlags field, used in many request messages with an SGL
+*
+****************************************************************************/
+
+/* values for MPI SGL Data Location Address Space subfield */
+#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+/* values for SGL Type subfield */
+#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
+#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) /* MPI v2.0 products only */
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
+
+
+#endif
+
diff --git a/sys/dev/mpr/mpi/mpi2_cnfg.h b/sys/dev/mpr/mpi/mpi2_cnfg.h
new file mode 100644
index 0000000000000..d82750f80913f
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_cnfg.h
@@ -0,0 +1,3169 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2_cnfg.h
+ * Title: MPI Configuration messages and pages
+ * Creation Date: November 10, 2006
+ *
+ * mpi2_cnfg.h Version: 02.00.27
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up bit
+ * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO
+ * Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * Incorporating additions for MPI v2.5.
+ * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
+ * obsolete for MPI v2.5 and later.
+ * Added some defines for 12G SAS speeds.
+ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ * match the specification.
+ * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
+ * future use.
+ * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ * MPI2_CONFIG_PAGE_MAN_7.
+ * Added EnclosureLevel and ConnectorName fields to
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added EnclosureLevel field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_CNFG_H
+#define MPI2_CNFG_H
+
+/*****************************************************************************
+* Configuration Page Header and defines
+*****************************************************************************/
+
+/* Config Page Header */
+typedef struct _MPI2_CONFIG_PAGE_HEADER
+{
+ U8 PageVersion; /* 0x00 */
+ U8 PageLength; /* 0x01 */
+ U8 PageNumber; /* 0x02 */
+ U8 PageType; /* 0x03 */
+} MPI2_CONFIG_PAGE_HEADER, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER,
+ Mpi2ConfigPageHeader_t, MPI2_POINTER pMpi2ConfigPageHeader_t;
+
+typedef union _MPI2_CONFIG_PAGE_HEADER_UNION
+{
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} MPI2_CONFIG_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
+ Mpi2ConfigPageHeaderUnion, MPI2_POINTER pMpi2ConfigPageHeaderUnion;
+
+/* Extended Config Page Header */
+typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER
+{
+ U8 PageVersion; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 PageNumber; /* 0x02 */
+ U8 PageType; /* 0x03 */
+ U16 ExtPageLength; /* 0x04 */
+ U8 ExtPageType; /* 0x06 */
+ U8 Reserved2; /* 0x07 */
+} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ MPI2_POINTER PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ Mpi2ConfigExtendedPageHeader_t, MPI2_POINTER pMpi2ConfigExtendedPageHeader_t;
+
+typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
+{
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
+ U8 Bytes[8];
+ U16 Word16[4];
+ U32 Word32[2];
+} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, MPI2_POINTER PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ Mpi2ConfigPageExtendedHeaderUnion, MPI2_POINTER pMpi2ConfigPageExtendedHeaderUnion;
+
+
+/* PageType field values */
+#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/* ExtPageType field values */
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+
+
+/*****************************************************************************
+* PageAddress defines
+*****************************************************************************/
+
+/* RAID Volume PageAddress format */
+#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/* RAID Physical Disk PageAddress format */
+#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
+#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
+#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
+
+#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
+
+
+/* SAS Expander PageAddress format */
+#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+
+
+/* SAS Device PageAddress format */
+#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/* SAS PHY PageAddress format */
+#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
+
+#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+
+
+/* SAS Port PageAddress format */
+#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+
+#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
+
+
+/* SAS Enclosure PageAddress format */
+#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/* RAID Configuration PageAddress format */
+#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
+#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
+#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
+
+#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
+
+
+/* Driver Persistent Mapping PageAddress format */
+#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
+
+#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
+#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
+#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
+
+
+/* Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+
+
+
+/****************************************************************************
+* Configuration messages
+****************************************************************************/
+
+/* Configuration Request Message */
+typedef struct _MPI2_CONFIG_REQUEST
+{
+ U8 Action; /* 0x00 */
+ U8 SGLFlags; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 ExtPageLength; /* 0x04 */
+ U8 ExtPageType; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U8 Reserved2; /* 0x0C */
+ U8 ProxyVF_ID; /* 0x0D */
+ U16 Reserved4; /* 0x0E */
+ U32 Reserved3; /* 0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
+ U32 PageAddress; /* 0x18 */
+ MPI2_SGE_IO_UNION PageBufferSGE; /* 0x1C */
+} MPI2_CONFIG_REQUEST, MPI2_POINTER PTR_MPI2_CONFIG_REQUEST,
+ Mpi2ConfigRequest_t, MPI2_POINTER pMpi2ConfigRequest_t;
+
+/* values for the Action field */
+#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* Config Reply Message */
+typedef struct _MPI2_CONFIG_REPLY
+{
+ U8 Action; /* 0x00 */
+ U8 SGLFlags; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 ExtPageLength; /* 0x04 */
+ U8 ExtPageType; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U16 Reserved2; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x14 */
+} MPI2_CONFIG_REPLY, MPI2_POINTER PTR_MPI2_CONFIG_REPLY,
+ Mpi2ConfigReply_t, MPI2_POINTER pMpi2ConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+
+#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+
+/* MPI v2.0 SAS products */
+#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
+#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
+#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
+#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
+#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
+#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
+#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
+#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+/* MPI v2.5 SAS products */
+#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096)
+#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097)
+#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090)
+#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091)
+#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
+#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
+
+
+
+
+/* Manufacturing Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 ChipName[16]; /* 0x04 */
+ U8 ChipRevision[8]; /* 0x14 */
+ U8 BoardName[16]; /* 0x1C */
+ U8 BoardAssembly[16]; /* 0x2C */
+ U8 BoardTracerNumber[16]; /* 0x3C */
+} MPI2_CONFIG_PAGE_MAN_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_0,
+ Mpi2ManufacturingPage0_t, MPI2_POINTER pMpi2ManufacturingPage0_t;
+
+#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 VPD[256]; /* 0x04 */
+} MPI2_CONFIG_PAGE_MAN_1,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_1,
+ Mpi2ManufacturingPage1_t, MPI2_POINTER pMpi2ManufacturingPage1_t;
+
+#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI2_CHIP_REVISION_ID
+{
+ U16 DeviceID; /* 0x00 */
+ U8 PCIRevisionID; /* 0x02 */
+ U8 Reserved; /* 0x03 */
+} MPI2_CHIP_REVISION_ID, MPI2_POINTER PTR_MPI2_CHIP_REVISION_ID,
+ Mpi2ChipRevisionId_t, MPI2_POINTER pMpi2ChipRevisionId_t;
+
+
+/* Manufacturing Page 2 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_2
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */
+ U32 HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 0x08 */
+} MPI2_CONFIG_PAGE_MAN_2,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_2,
+ Mpi2ManufacturingPage2_t, MPI2_POINTER pMpi2ManufacturingPage2_t;
+
+#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 3 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
+#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_3
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /* 0x04 */
+ U32 Info[MPI2_MAN_PAGE_3_INFO_WORDS];/* 0x08 */
+} MPI2_CONFIG_PAGE_MAN_3,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_3,
+ Mpi2ManufacturingPage3_t, MPI2_POINTER pMpi2ManufacturingPage3_t;
+
+#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 4 */
+
+typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS
+{
+ U8 PowerSaveFlags; /* 0x00 */
+ U8 InternalOperationsSleepTime; /* 0x01 */
+ U8 InternalOperationsRunTime; /* 0x02 */
+ U8 HostIdleTime; /* 0x03 */
+} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ MPI2_POINTER PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ Mpi2ManPage4PwrSaveSettings_t, MPI2_POINTER pMpi2ManPage4PwrSaveSettings_t;
+
+/* defines for the PowerSaveFlags field */
+#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
+#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
+#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
+#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_4
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Flags; /* 0x08 */
+ U8 InquirySize; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ U8 InquiryData[56]; /* 0x10 */
+ U32 RAID0VolumeSettings; /* 0x48 */
+ U32 RAID1EVolumeSettings; /* 0x4C */
+ U32 RAID1VolumeSettings; /* 0x50 */
+ U32 RAID10VolumeSettings; /* 0x54 */
+ U32 Reserved4; /* 0x58 */
+ U32 Reserved5; /* 0x5C */
+ MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /* 0x60 */
+ U8 MaxOCEDisks; /* 0x64 */
+ U8 ResyncRate; /* 0x65 */
+ U16 DataScrubDuration; /* 0x66 */
+ U8 MaxHotSpares; /* 0x68 */
+ U8 MaxPhysDisksPerVol; /* 0x69 */
+ U8 MaxPhysDisks; /* 0x6A */
+ U8 MaxVolumes; /* 0x6B */
+} MPI2_CONFIG_PAGE_MAN_4,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_4,
+ Mpi2ManufacturingPage4_t, MPI2_POINTER pMpi2ManufacturingPage4_t;
+
+#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
+
+/* Manufacturing Page 4 Flags field */
+#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
+#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
+
+#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
+#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
+#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
+
+#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
+#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
+#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
+#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
+#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
+
+#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
+#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
+#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
+#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
+
+#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
+#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
+#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
+#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
+#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
+#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
+#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
+#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
+
+
+/* Manufacturing Page 5 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
+#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_MANUFACTURING5_ENTRY
+{
+ U64 WWID; /* 0x00 */
+ U64 DeviceName; /* 0x08 */
+} MPI2_MANUFACTURING5_ENTRY, MPI2_POINTER PTR_MPI2_MANUFACTURING5_ENTRY,
+ Mpi2Manufacturing5Entry_t, MPI2_POINTER pMpi2Manufacturing5Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_5
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhys; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ MPI2_MANUFACTURING5_ENTRY Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/* 0x08 */
+} MPI2_CONFIG_PAGE_MAN_5,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_5,
+ Mpi2ManufacturingPage5_t, MPI2_POINTER pMpi2ManufacturingPage5_t;
+
+#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
+
+
+/* Manufacturing Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_6
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo;/* 0x04 */
+} MPI2_CONFIG_PAGE_MAN_6,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_6,
+ Mpi2ManufacturingPage6_t, MPI2_POINTER pMpi2ManufacturingPage6_t;
+
+#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+/* Manufacturing Page 7 */
+
+typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
+{
+ U32 Pinout; /* 0x00 */
+ U8 Connector[16]; /* 0x04 */
+ U8 Location; /* 0x14 */
+ U8 ReceptacleID; /* 0x15 */
+ U16 Slot; /* 0x16 */
+ U32 Reserved2; /* 0x18 */
+} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
+ Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
+
+/* defines for the Pinout field */
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+
+/* defines for the Location field */
+#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_7
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U32 Flags; /* 0x0C */
+ U8 EnclosureName[16]; /* 0x10 */
+ U8 NumPhys; /* 0x20 */
+ U8 Reserved3; /* 0x21 */
+ U16 Reserved4; /* 0x22 */
+ MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /* 0x24 */
+} MPI2_CONFIG_PAGE_MAN_7,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
+ Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
+
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
+
+/* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008)
+#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
+#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+/*
+ * Generic structure to use for product-specific manufacturing pages
+ * (currently Manufacturing Page 8 through Manufacturing Page 31).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_PS
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo;/* 0x04 */
+} MPI2_CONFIG_PAGE_MAN_PS,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_PS,
+ Mpi2ManufacturingPagePS_t, MPI2_POINTER pMpi2ManufacturingPagePS_t;
+
+#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+/* IO Unit Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U64 UniqueValue; /* 0x04 */
+ MPI2_VERSION_UNION NvdataVersionDefault; /* 0x08 */
+ MPI2_VERSION_UNION NvdataVersionPersistent; /* 0x0A */
+} MPI2_CONFIG_PAGE_IO_UNIT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
+ Mpi2IOUnitPage0_t, MPI2_POINTER pMpi2IOUnitPage0_t;
+
+#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
+
+
+/* IO Unit Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Flags; /* 0x04 */
+} MPI2_CONFIG_PAGE_IO_UNIT_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
+ Mpi2IOUnitPage1_t, MPI2_POINTER pMpi2IOUnitPage1_t;
+
+#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
+
+/* IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
+#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
+#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
+#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
+#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
+#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
+#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
+#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
+#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+
+
+/* IO Unit Page 3 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for GPIOCount at runtime.
+ */
+#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 GPIOCount; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U16 GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/* 0x08 */
+} MPI2_CONFIG_PAGE_IO_UNIT_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
+ Mpi2IOUnitPage3_t, MPI2_POINTER pMpi2IOUnitPage3_t;
+
+#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
+
+/* defines for IO Unit Page 3 GPIOVal field */
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
+
+
+/* IO Unit Page 5 */
+
+/*
+ * Upper layer code (drivers, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumDmaEngines at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
+#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U64 RaidAcceleratorBufferBaseAddress; /* 0x04 */
+ U64 RaidAcceleratorBufferSize; /* 0x0C */
+ U64 RaidAcceleratorControlBaseAddress; /* 0x14 */
+ U8 RAControlSize; /* 0x1C */
+ U8 NumDmaEngines; /* 0x1D */
+ U8 RAMinControlSize; /* 0x1E */
+ U8 RAMaxControlSize; /* 0x1F */
+ U32 Reserved1; /* 0x20 */
+ U32 Reserved2; /* 0x24 */
+ U32 Reserved3; /* 0x28 */
+ U32 DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /* 0x2C */
+} MPI2_CONFIG_PAGE_IO_UNIT_5, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
+ Mpi2IOUnitPage5_t, MPI2_POINTER pMpi2IOUnitPage5_t;
+
+#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
+
+/* defines for IO Unit Page 5 DmaEngineCapabilities field */
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
+#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
+
+#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
+#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
+#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
+#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
+
+
+/* IO Unit Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 Flags; /* 0x04 */
+ U8 RAHostControlSize; /* 0x06 */
+ U8 Reserved0; /* 0x07 */
+ U64 RaidAcceleratorHostControlBaseAddress; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+ U32 Reserved3; /* 0x18 */
+} MPI2_CONFIG_PAGE_IO_UNIT_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
+ Mpi2IOUnitPage6_t, MPI2_POINTER pMpi2IOUnitPage6_t;
+
+#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
+
+/* defines for IO Unit Page 6 Flags field */
+#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
+
+
+/* IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 CurrentPowerMode; /* 0x04 */ /* reserved in MPI 2.0 */
+ U8 PreviousPowerMode; /* 0x05 */ /* reserved in MPI 2.0 */
+ U8 PCIeWidth; /* 0x06 */
+ U8 PCIeSpeed; /* 0x07 */
+ U32 ProcessorState; /* 0x08 */
+ U32 PowerManagementCapabilities; /* 0x0C */
+ U16 IOCTemperature; /* 0x10 */
+ U8 IOCTemperatureUnits; /* 0x12 */
+ U8 IOCSpeed; /* 0x13 */
+ U16 BoardTemperature; /* 0x14 */
+ U8 BoardTemperatureUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U32 Reserved6; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+ Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
+
+/* defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
+#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
+#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40)
+#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80)
+#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01)
+#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04)
+#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05)
+#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06)
+
+
+/* defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+
+/* defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+
+/* defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+
+/* defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) /* obsolete */
+#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) /* obsolete */
+#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) /* obsolete */
+
+/* obsolete names for the PowerManagementCapabilities bits (above) */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */
+
+
+/* defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
+
+/* defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+
+/* defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
+
+/* IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR
+{
+ U16 Flags; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U16 Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /* 0x04 */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+} MPI2_IOUNIT8_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT8_SENSOR,
+ Mpi2IOUnit8Sensor_t, MPI2_POINTER pMpi2IOUnit8Sensor_t;
+
+/* defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U8 NumSensors; /* 0x0C */
+ U8 PollingInterval; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_IOUNIT8_SENSOR Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+ Mpi2IOUnitPage8_t, MPI2_POINTER pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/* IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR
+{
+ U16 CurrentTemperature; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U8 Flags; /* 0x04 */
+ U8 Reserved2; /* 0x05 */
+ U16 Reserved3; /* 0x06 */
+ U32 Reserved4; /* 0x08 */
+ U32 Reserved5; /* 0x0C */
+} MPI2_IOUNIT9_SENSOR, MPI2_POINTER PTR_MPI2_IOUNIT9_SENSOR,
+ Mpi2IOUnit9Sensor_t, MPI2_POINTER pMpi2IOUnit9Sensor_t;
+
+/* defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U8 NumSensors; /* 0x0C */
+ U8 Reserved4; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_IOUNIT9_SENSOR Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+ Mpi2IOUnitPage9_t, MPI2_POINTER pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/* IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION
+{
+ U8 CreditPercent; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_IOUNIT10_FUNCTION, MPI2_POINTER PTR_MPI2_IOUNIT10_FUNCTION,
+ Mpi2IOUnit10Function_t, MPI2_POINTER pMpi2IOUnit10Function_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumFunctions; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ MPI2_IOUNIT10_FUNCTION Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES]; /* 0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+ Mpi2IOUnitPage10_t, MPI2_POINTER pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+/* IOC Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U16 VendorID; /* 0x0C */
+ U16 DeviceID; /* 0x0E */
+ U8 RevisionID; /* 0x10 */
+ U8 Reserved3; /* 0x11 */
+ U16 Reserved4; /* 0x12 */
+ U32 ClassCode; /* 0x14 */
+ U16 SubsystemVendorID; /* 0x18 */
+ U16 SubsystemID; /* 0x1A */
+} MPI2_CONFIG_PAGE_IOC_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_0,
+ Mpi2IOCPage0_t, MPI2_POINTER pMpi2IOCPage0_t;
+
+#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
+
+
+/* IOC Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Flags; /* 0x04 */
+ U32 CoalescingTimeout; /* 0x08 */
+ U8 CoalescingDepth; /* 0x0C */
+ U8 PCISlotNum; /* 0x0D */
+ U8 PCIBusNum; /* 0x0E */
+ U8 PCIDomainSegment; /* 0x0F */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+} MPI2_CONFIG_PAGE_IOC_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_1,
+ Mpi2IOCPage1_t, MPI2_POINTER pMpi2IOCPage1_t;
+
+#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
+
+/* defines for IOC Page 1 Flags field */
+#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
+
+/* IOC Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_6
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 CapabilitiesFlags; /* 0x04 */
+ U8 MaxDrivesRAID0; /* 0x08 */
+ U8 MaxDrivesRAID1; /* 0x09 */
+ U8 MaxDrivesRAID1E; /* 0x0A */
+ U8 MaxDrivesRAID10; /* 0x0B */
+ U8 MinDrivesRAID0; /* 0x0C */
+ U8 MinDrivesRAID1; /* 0x0D */
+ U8 MinDrivesRAID1E; /* 0x0E */
+ U8 MinDrivesRAID10; /* 0x0F */
+ U32 Reserved1; /* 0x10 */
+ U8 MaxGlobalHotSpares; /* 0x14 */
+ U8 MaxPhysDisks; /* 0x15 */
+ U8 MaxVolumes; /* 0x16 */
+ U8 MaxConfigs; /* 0x17 */
+ U8 MaxOCEDisks; /* 0x18 */
+ U8 Reserved2; /* 0x19 */
+ U16 Reserved3; /* 0x1A */
+ U32 SupportedStripeSizeMapRAID0; /* 0x1C */
+ U32 SupportedStripeSizeMapRAID1E; /* 0x20 */
+ U32 SupportedStripeSizeMapRAID10; /* 0x24 */
+ U32 Reserved4; /* 0x28 */
+ U32 Reserved5; /* 0x2C */
+ U16 DefaultMetadataSize; /* 0x30 */
+ U16 Reserved6; /* 0x32 */
+ U16 MaxBadBlockTableEntries; /* 0x34 */
+ U16 Reserved7; /* 0x36 */
+ U32 IRNvsramVersion; /* 0x38 */
+} MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6,
+ Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t;
+
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
+
+/* defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
+#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/* IOC Page 7 */
+
+#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_7
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/* 0x08 */
+ U16 SASBroadcastPrimitiveMasks; /* 0x18 */
+ U16 SASNotifyPrimitiveMasks; /* 0x1A */
+ U32 Reserved3; /* 0x1C */
+} MPI2_CONFIG_PAGE_IOC_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_7,
+ Mpi2IOCPage7_t, MPI2_POINTER pMpi2IOCPage7_t;
+
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
+
+
+/* IOC Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_8
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumDevsPerEnclosure; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U16 MaxPersistentEntries; /* 0x08 */
+ U16 MaxNumPhysicalMappedIDs; /* 0x0A */
+ U16 Flags; /* 0x0C */
+ U16 Reserved3; /* 0x0E */
+ U16 IRVolumeMappingFlags; /* 0x10 */
+ U16 Reserved4; /* 0x12 */
+ U32 Reserved5; /* 0x14 */
+} MPI2_CONFIG_PAGE_IOC_8, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_8,
+ Mpi2IOCPage8_t, MPI2_POINTER pMpi2IOCPage8_t;
+
+#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
+
+/* defines for IOC Page 8 Flags field */
+#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
+#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
+
+#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
+#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
+
+#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
+#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
+
+/* defines for IOC Page 8 IRVolumeMappingFlags */
+#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
+#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+/* BIOS Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 BiosOptions; /* 0x04 */
+ U32 IOCSettings; /* 0x08 */
+ U32 Reserved1; /* 0x0C */
+ U32 DeviceSettings; /* 0x10 */
+ U16 NumberOfDevices; /* 0x14 */
+ U16 UEFIVersion; /* 0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /* 0x18 */
+ U16 IOTimeoutSequential; /* 0x1A */
+ U16 IOTimeoutOther; /* 0x1C */
+ U16 IOTimeoutBlockDevicesRM; /* 0x1E */
+} MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
+ Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
+
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+
+/* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
+#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/* values for BIOS Page 1 IOCSettings field */
+#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/* values for BIOS Page 1 DeviceSettings field */
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/* defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
+
+/* BIOS Page 2 */
+
+typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER
+{
+ U32 Reserved1; /* 0x00 */
+ U32 Reserved2; /* 0x04 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+ U32 Reserved5; /* 0x10 */
+ U32 Reserved6; /* 0x14 */
+} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ Mpi2BootDeviceAdapterOrder_t, MPI2_POINTER pMpi2BootDeviceAdapterOrder_t;
+
+typedef struct _MPI2_BOOT_DEVICE_SAS_WWID
+{
+ U64 SASAddress; /* 0x00 */
+ U8 LUN[8]; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+} MPI2_BOOT_DEVICE_SAS_WWID, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_SAS_WWID,
+ Mpi2BootDeviceSasWwid_t, MPI2_POINTER pMpi2BootDeviceSasWwid_t;
+
+typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT
+{
+ U64 EnclosureLogicalID; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U16 SlotNumber; /* 0x10 */
+ U16 Reserved3; /* 0x12 */
+ U32 Reserved4; /* 0x14 */
+} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ MPI2_POINTER PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ Mpi2BootDeviceEnclosureSlot_t, MPI2_POINTER pMpi2BootDeviceEnclosureSlot_t;
+
+typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME
+{
+ U64 DeviceName; /* 0x00 */
+ U8 LUN[8]; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+} MPI2_BOOT_DEVICE_DEVICE_NAME, MPI2_POINTER PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
+ Mpi2BootDeviceDeviceName_t, MPI2_POINTER pMpi2BootDeviceDeviceName_t;
+
+typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE
+{
+ MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
+ MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+ MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
+} MPI2_BIOSPAGE2_BOOT_DEVICE, MPI2_POINTER PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
+ Mpi2BiosPage2BootDevice_t, MPI2_POINTER pMpi2BiosPage2BootDevice_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_2
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+ U32 Reserved5; /* 0x14 */
+ U32 Reserved6; /* 0x18 */
+ U8 ReqBootDeviceForm; /* 0x1C */
+ U8 Reserved7; /* 0x1D */
+ U16 Reserved8; /* 0x1E */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /* 0x20 */
+ U8 ReqAltBootDeviceForm; /* 0x38 */
+ U8 Reserved9; /* 0x39 */
+ U16 Reserved10; /* 0x3A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /* 0x3C */
+ U8 CurrentBootDeviceForm; /* 0x58 */
+ U8 Reserved11; /* 0x59 */
+ U16 Reserved12; /* 0x5A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /* 0x58 */
+} MPI2_CONFIG_PAGE_BIOS_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_2,
+ Mpi2BiosPage2_t, MPI2_POINTER pMpi2BiosPage2_t;
+
+#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
+
+/* values for BIOS Page 2 BootDeviceForm fields */
+#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
+#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
+#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
+
+
+/* BIOS Page 3 */
+
+typedef struct _MPI2_ADAPTER_INFO
+{
+ U8 PciBusNumber; /* 0x00 */
+ U8 PciDeviceAndFunctionNumber; /* 0x01 */
+ U16 AdapterFlags; /* 0x02 */
+} MPI2_ADAPTER_INFO, MPI2_POINTER PTR_MPI2_ADAPTER_INFO,
+ Mpi2AdapterInfo_t, MPI2_POINTER pMpi2AdapterInfo_t;
+
+#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_3
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 GlobalFlags; /* 0x04 */
+ U32 BiosVersion; /* 0x08 */
+ MPI2_ADAPTER_INFO AdapterOrder[4]; /* 0x0C */
+ U32 Reserved1; /* 0x1C */
+} MPI2_CONFIG_PAGE_BIOS_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_3,
+ Mpi2BiosPage3_t, MPI2_POINTER pMpi2BiosPage3_t;
+
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+
+/* values for BIOS Page 3 GlobalFlags */
+#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
+
+#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/* BIOS Page 4 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
+#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_BIOS4_ENTRY
+{
+ U64 ReassignmentWWID; /* 0x00 */
+ U64 ReassignmentDeviceName; /* 0x08 */
+} MPI2_BIOS4_ENTRY, MPI2_POINTER PTR_MPI2_BIOS4_ENTRY,
+ Mpi2MBios4Entry_t, MPI2_POINTER pMpi2Bios4Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_4
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhys; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ MPI2_BIOS4_ENTRY Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /* 0x08 */
+} MPI2_CONFIG_PAGE_BIOS_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_4,
+ Mpi2BiosPage4_t, MPI2_POINTER pMpi2BiosPage4_t;
+
+#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+/* RAID Volume Page 0 */
+
+typedef struct _MPI2_RAIDVOL0_PHYS_DISK
+{
+ U8 RAIDSetNum; /* 0x00 */
+ U8 PhysDiskMap; /* 0x01 */
+ U8 PhysDiskNum; /* 0x02 */
+ U8 Reserved; /* 0x03 */
+} MPI2_RAIDVOL0_PHYS_DISK, MPI2_POINTER PTR_MPI2_RAIDVOL0_PHYS_DISK,
+ Mpi2RaidVol0PhysDisk_t, MPI2_POINTER pMpi2RaidVol0PhysDisk_t;
+
+/* defines for the PhysDiskMap field */
+#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAIDVOL0_SETTINGS
+{
+ U16 Settings; /* 0x00 */
+ U8 HotSparePool; /* 0x01 */
+ U8 Reserved; /* 0x02 */
+} MPI2_RAIDVOL0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDVOL0_SETTINGS,
+ Mpi2RaidVol0Settings_t, MPI2_POINTER pMpi2RaidVol0Settings_t;
+
+/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/* RAID Volume Page 0 VolumeSettings defines */
+#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
+#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
+
+#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
+#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
+#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
+#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhysDisks at runtime.
+ */
+#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 DevHandle; /* 0x04 */
+ U8 VolumeState; /* 0x06 */
+ U8 VolumeType; /* 0x07 */
+ U32 VolumeStatusFlags; /* 0x08 */
+ MPI2_RAIDVOL0_SETTINGS VolumeSettings; /* 0x0C */
+ U64 MaxLBA; /* 0x10 */
+ U32 StripeSize; /* 0x18 */
+ U16 BlockSize; /* 0x1C */
+ U16 Reserved1; /* 0x1E */
+ U8 SupportedPhysDisks; /* 0x20 */
+ U8 ResyncRate; /* 0x21 */
+ U16 DataScrubDuration; /* 0x22 */
+ U8 NumPhysDisks; /* 0x24 */
+ U8 Reserved2; /* 0x25 */
+ U8 Reserved3; /* 0x26 */
+ U8 InactiveStatus; /* 0x27 */
+ MPI2_RAIDVOL0_PHYS_DISK PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /* 0x28 */
+} MPI2_CONFIG_PAGE_RAID_VOL_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
+ Mpi2RaidVolPage0_t, MPI2_POINTER pMpi2RaidVolPage0_t;
+
+#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
+
+/* values for RAID VolumeState */
+#define MPI2_RAID_VOL_STATE_MISSING (0x00)
+#define MPI2_RAID_VOL_STATE_FAILED (0x01)
+#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
+#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
+#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
+#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
+
+/* values for RAID VolumeType */
+#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
+#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
+#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
+#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
+#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/* values for RAID Volume Page 0 VolumeStatusFlags field */
+#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
+#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
+#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
+
+/* values for RAID Volume Page 0 SupportedPhysDisks field */
+#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
+#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
+#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
+#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
+
+/* values for RAID Volume Page 0 InactiveStatus field */
+#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+/* RAID Volume Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 DevHandle; /* 0x04 */
+ U16 Reserved0; /* 0x06 */
+ U8 GUID[24]; /* 0x08 */
+ U8 Name[16]; /* 0x20 */
+ U64 WWID; /* 0x30 */
+ U32 Reserved1; /* 0x38 */
+ U32 Reserved2; /* 0x3C */
+} MPI2_CONFIG_PAGE_RAID_VOL_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
+ Mpi2RaidVolPage1_t, MPI2_POINTER pMpi2RaidVolPage1_t;
+
+#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+/* RAID Physical Disk Page 0 */
+
+typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS
+{
+ U16 Reserved1; /* 0x00 */
+ U8 HotSparePool; /* 0x02 */
+ U8 Reserved2; /* 0x03 */
+} MPI2_RAIDPHYSDISK0_SETTINGS, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
+ Mpi2RaidPhysDisk0Settings_t, MPI2_POINTER pMpi2RaidPhysDisk0Settings_t;
+
+/* use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
+
+typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA
+{
+ U8 VendorID[8]; /* 0x00 */
+ U8 ProductID[16]; /* 0x08 */
+ U8 ProductRevLevel[4]; /* 0x18 */
+ U8 SerialNum[32]; /* 0x1C */
+} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ MPI2_POINTER PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ Mpi2RaidPhysDisk0InquiryData_t, MPI2_POINTER pMpi2RaidPhysDisk0InquiryData_t;
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 DevHandle; /* 0x04 */
+ U8 Reserved1; /* 0x06 */
+ U8 PhysDiskNum; /* 0x07 */
+ MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /* 0x10 */
+ U32 Reserved3; /* 0x4C */
+ U8 PhysDiskState; /* 0x50 */
+ U8 OfflineReason; /* 0x51 */
+ U8 IncompatibleReason; /* 0x52 */
+ U8 PhysDiskAttributes; /* 0x53 */
+ U32 PhysDiskStatusFlags; /* 0x54 */
+ U64 DeviceMaxLBA; /* 0x58 */
+ U64 HostMaxLBA; /* 0x60 */
+ U64 CoercedMaxLBA; /* 0x68 */
+ U16 BlockSize; /* 0x70 */
+ U16 Reserved5; /* 0x72 */
+ U32 Reserved6; /* 0x74 */
+} MPI2_CONFIG_PAGE_RD_PDISK_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
+ Mpi2RaidPhysDiskPage0_t, MPI2_POINTER pMpi2RaidPhysDiskPage0_t;
+
+#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
+
+/* PhysDiskState defines */
+#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
+#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
+#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
+#define MPI2_RAID_PD_STATE_ONLINE (0x03)
+#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
+#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
+#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
+#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
+
+/* OfflineReason defines */
+#define MPI2_PHYSDISK0_ONLINE (0x00)
+#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
+#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
+#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
+#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
+
+/* IncompatibleReason defines */
+#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
+
+/* PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
+#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
+#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
+#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
+#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
+
+/* PhysDiskStatusFlags defines */
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
+#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
+#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
+#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
+
+
+/* RAID Physical Disk Page 1 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
+#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _MPI2_RAIDPHYSDISK1_PATH
+{
+ U16 DevHandle; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U64 WWID; /* 0x04 */
+ U64 OwnerWWID; /* 0x0C */
+ U8 OwnerIdentifier; /* 0x14 */
+ U8 Reserved2; /* 0x15 */
+ U16 Flags; /* 0x16 */
+} MPI2_RAIDPHYSDISK1_PATH, MPI2_POINTER PTR_MPI2_RAIDPHYSDISK1_PATH,
+ Mpi2RaidPhysDisk1Path_t, MPI2_POINTER pMpi2RaidPhysDisk1Path_t;
+
+/* RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
+#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
+#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
+{
+ MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhysDiskPaths; /* 0x04 */
+ U8 PhysDiskNum; /* 0x05 */
+ U16 Reserved1; /* 0x06 */
+ U32 Reserved2; /* 0x08 */
+ MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/* 0x0C */
+} MPI2_CONFIG_PAGE_RD_PDISK_1,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
+ Mpi2RaidPhysDiskPage1_t, MPI2_POINTER pMpi2RaidPhysDiskPage1_t;
+
+#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* values for fields used by several types of SAS Config Pages
+****************************************************************************/
+
+/* values for NegotiatedLinkRates fields */
+#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
+#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
+#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/* link rates used for Negotiated Physical and Logical Link Rate */
+#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
+#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+
+
+/* values for AttachedPhyInfo fields */
+#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+
+#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+
+
+/* values for PhyInfo fields */
+#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
+
+#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
+#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
+#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
+#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+
+#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+
+#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
+
+
+/* values for SAS ProgrammedLinkRate fields */
+#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
+#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+
+
+/* values for SAS HwLinkRate fields */
+#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
+#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+/* SAS IO Unit Page 0 */
+
+typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA
+{
+ U8 Port; /* 0x00 */
+ U8 PortFlags; /* 0x01 */
+ U8 PhyFlags; /* 0x02 */
+ U8 NegotiatedLinkRate; /* 0x03 */
+ U32 ControllerPhyDeviceInfo;/* 0x04 */
+ U16 AttachedDevHandle; /* 0x08 */
+ U16 ControllerDevHandle; /* 0x0A */
+ U32 DiscoveryStatus; /* 0x0C */
+ U32 Reserved; /* 0x10 */
+} MPI2_SAS_IO_UNIT0_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
+ Mpi2SasIOUnit0PhyData_t, MPI2_POINTER pMpi2SasIOUnit0PhyData_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
+#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 NumPhys; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ Mpi2SasIOUnitPage0_t, MPI2_POINTER pMpi2SasIOUnitPage0_t;
+
+#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
+
+/* values for SAS IO Unit Page 0 PortFlags */
+#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+
+/* values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
+#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/* see mpi2_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/* values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
+
+
+/* SAS IO Unit Page 1 */
+
+typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA
+{
+ U8 Port; /* 0x00 */
+ U8 PortFlags; /* 0x01 */
+ U8 PhyFlags; /* 0x02 */
+ U8 MaxMinLinkRate; /* 0x03 */
+ U32 ControllerPhyDeviceInfo; /* 0x04 */
+ U16 MaxTargetPortConnectTime; /* 0x08 */
+ U16 Reserved1; /* 0x0A */
+} MPI2_SAS_IO_UNIT1_PHY_DATA, MPI2_POINTER PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
+ Mpi2SasIOUnit1PhyData_t, MPI2_POINTER pMpi2SasIOUnit1PhyData_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
+#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 ControlFlags; /* 0x08 */
+ U16 SASNarrowMaxQueueDepth; /* 0x0A */
+ U16 AdditionalControlFlags; /* 0x0C */
+ U16 SASWideMaxQueueDepth; /* 0x0E */
+ U8 NumPhys; /* 0x10 */
+ U8 SATAMaxQDepth; /* 0x11 */
+ U8 ReportDeviceMissingDelay; /* 0x12 */
+ U8 IODeviceMissingDelay; /* 0x13 */
+ MPI2_SAS_IO_UNIT1_PHY_DATA PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /* 0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ Mpi2SasIOUnitPage1_t, MPI2_POINTER pMpi2SasIOUnitPage1_t;
+
+#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
+
+/* values for SAS IO Unit Page 1 ControlFlags */
+#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+
+#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
+
+#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) /* MPI v2.0 only. Obsolete in MPI v2.5 and later. */
+
+/* values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/* values for SAS IO Unit Page 1 PortFlags */
+#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/* values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
+#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+
+/* values for SAS IO Unit Page 1 MaxMinLinkRate */
+#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
+#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
+#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
+#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+
+/* see mpi2_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+/* SAS IO Unit Page 4 */
+
+typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
+{
+ U8 MaxTargetSpinup; /* 0x00 */
+ U8 SpinupDelay; /* 0x01 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
+
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
+#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ MPI2_SAS_IOUNIT4_SPINUP_GROUP SpinupGroupParameters[4]; /* 0x08 */
+ U32 Reserved1; /* 0x18 */
+ U32 Reserved2; /* 0x1C */
+ U32 Reserved3; /* 0x20 */
+ U8 BootDeviceWaitTime; /* 0x24 */
+ U8 Reserved4; /* 0x25 */
+ U16 Reserved5; /* 0x26 */
+ U8 NumPhys; /* 0x28 */
+ U8 PEInitialSpinupDelay; /* 0x29 */
+ U8 PEReplyDelay; /* 0x2A */
+ U8 Flags; /* 0x2B */
+ U8 PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /* 0x2C */
+} MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ Mpi2SasIOUnitPage4_t, MPI2_POINTER pMpi2SasIOUnitPage4_t;
+
+#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
+
+/* defines for Flags field */
+#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
+
+/* defines for PHY field */
+#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+/* SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
+{
+ U8 ControlFlags; /* 0x00 */
+ U8 PortWidthModGroup; /* 0x01 */
+ U16 InactivityTimerExponent; /* 0x02 */
+ U8 SATAPartialTimeout; /* 0x04 */
+ U8 Reserved2; /* 0x05 */
+ U8 SATASlumberTimeout; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+ U8 SASPartialTimeout; /* 0x08 */
+ U8 Reserved4; /* 0x09 */
+ U8 SASSlumberTimeout; /* 0x0A */
+ U8 Reserved5; /* 0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ Mpi2SasIOUnit5PhyPmSettings_t, MPI2_POINTER pMpi2SasIOUnit5PhyPmSettings_t;
+
+/* defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+
+/* defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
+/* defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 NumPhys; /* 0x08 */
+ U8 Reserved1; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ Mpi2SasIOUnitPage5_t, MPI2_POINTER pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/* SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+{
+ U8 CurrentStatus; /* 0x00 */
+ U8 CurrentModulation; /* 0x01 */
+ U8 CurrentUtilization; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ MPI2_POINTER pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/* defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/* defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U8 NumGroups; /* 0x10 */
+ U8 Reserved3; /* 0x11 */
+ U16 Reserved4; /* 0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /* 0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, MPI2_POINTER pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/* SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+{
+ U8 Flags; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U8 Threshold75Pct; /* 0x04 */
+ U8 Threshold50Pct; /* 0x05 */
+ U8 Threshold25Pct; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ MPI2_POINTER PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ MPI2_POINTER pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/* defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 SamplingInterval; /* 0x08 */
+ U8 WindowLength; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U8 NumGroups; /* 0x14 */
+ U8 Reserved4; /* 0x15 */
+ U16 Reserved5; /* 0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX]; /* 0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, MPI2_POINTER pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/* SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 PowerManagementCapabilities; /* 0x0C */
+ U8 TxRxSleepStatus; /* 0x10 */ /* reserved in MPI 2.0 */
+ U8 Reserved2; /* 0x11 */
+ U16 Reserved3; /* 0x12 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, MPI2_POINTER pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/* defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+
+/* defines for TxRxSleepStatus field */
+#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03)
+
+
+
+/* SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U64 TimeStamp; /* 0x08 */
+ U32 Reserved1; /* 0x10 */
+ U32 Reserved2; /* 0x14 */
+ U32 FastPathPendedRequests; /* 0x18 */
+ U32 FastPathUnPendedRequests; /* 0x1C */
+ U32 FastPathHostRequestStarts; /* 0x20 */
+ U32 FastPathFirmwareRequestStarts; /* 0x24 */
+ U32 FastPathHostCompletions; /* 0x28 */
+ U32 FastPathFirmwareCompletions; /* 0x2C */
+ U32 NonFastPathRequestStarts; /* 0x30 */
+ U32 NonFastPathHostCompletions; /* 0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+ Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+/* SAS Expander Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 PhysicalPort; /* 0x08 */
+ U8 ReportGenLength; /* 0x09 */
+ U16 EnclosureHandle; /* 0x0A */
+ U64 SASAddress; /* 0x0C */
+ U32 DiscoveryStatus; /* 0x14 */
+ U16 DevHandle; /* 0x18 */
+ U16 ParentDevHandle; /* 0x1A */
+ U16 ExpanderChangeCount; /* 0x1C */
+ U16 ExpanderRouteIndexes; /* 0x1E */
+ U8 NumPhys; /* 0x20 */
+ U8 SASLevel; /* 0x21 */
+ U16 Flags; /* 0x22 */
+ U16 STPBusInactivityTimeLimit; /* 0x24 */
+ U16 STPMaxConnectTimeLimit; /* 0x26 */
+ U16 STP_SMP_NexusLossTime; /* 0x28 */
+ U16 MaxNumRoutedSasAddresses; /* 0x2A */
+ U64 ActiveZoneManagerSASAddress;/* 0x2C */
+ U16 ZoneLockInactivityLimit; /* 0x34 */
+ U16 Reserved1; /* 0x36 */
+ U8 TimeToReducedFunc; /* 0x38 */
+ U8 InitialTimeToReducedFunc; /* 0x39 */
+ U8 MaxReducedFuncTime; /* 0x3A */
+ U8 Reserved2; /* 0x3B */
+} MPI2_CONFIG_PAGE_EXPANDER_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
+ Mpi2ExpanderPage0_t, MPI2_POINTER pMpi2ExpanderPage0_t;
+
+#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
+
+/* values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+
+/* values for SAS Expander Page 0 Flags field */
+#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+
+
+/* SAS Expander Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 PhysicalPort; /* 0x08 */
+ U8 Reserved1; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U8 NumPhys; /* 0x0C */
+ U8 Phy; /* 0x0D */
+ U16 NumTableEntriesProgrammed; /* 0x0E */
+ U8 ProgrammedLinkRate; /* 0x10 */
+ U8 HwLinkRate; /* 0x11 */
+ U16 AttachedDevHandle; /* 0x12 */
+ U32 PhyInfo; /* 0x14 */
+ U32 AttachedDeviceInfo; /* 0x18 */
+ U16 ExpanderDevHandle; /* 0x1C */
+ U8 ChangeCount; /* 0x1E */
+ U8 NegotiatedLinkRate; /* 0x1F */
+ U8 PhyIdentifier; /* 0x20 */
+ U8 AttachedPhyIdentifier; /* 0x21 */
+ U8 Reserved3; /* 0x22 */
+ U8 DiscoveryInfo; /* 0x23 */
+ U32 AttachedPhyInfo; /* 0x24 */
+ U8 ZoneGroup; /* 0x28 */
+ U8 SelfConfigStatus; /* 0x29 */
+ U16 Reserved4; /* 0x2A */
+} MPI2_CONFIG_PAGE_EXPANDER_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
+ Mpi2ExpanderPage1_t, MPI2_POINTER pMpi2ExpanderPage1_t;
+
+#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
+
+/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/* see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines used for the AttachedDeviceInfo field */
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/* values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+/* SAS Device Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 Slot; /* 0x08 */
+ U16 EnclosureHandle; /* 0x0A */
+ U64 SASAddress; /* 0x0C */
+ U16 ParentDevHandle; /* 0x14 */
+ U8 PhyNum; /* 0x16 */
+ U8 AccessStatus; /* 0x17 */
+ U16 DevHandle; /* 0x18 */
+ U8 AttachedPhyIdentifier; /* 0x1A */
+ U8 ZoneGroup; /* 0x1B */
+ U32 DeviceInfo; /* 0x1C */
+ U16 Flags; /* 0x20 */
+ U8 PhysicalPort; /* 0x22 */
+ U8 MaxPortConnections; /* 0x23 */
+ U64 DeviceName; /* 0x24 */
+ U8 PortGroups; /* 0x2C */
+ U8 DmaGroup; /* 0x2D */
+ U8 ControlGroup; /* 0x2E */
+ U8 EnclosureLevel; /* 0x2F */
+ U8 ConnectorName[4]; /* 0x30 */
+ U32 Reserved3; /* 0x34 */
+} MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
+ Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t;
+
+#define MPI2_SASDEVICE0_PAGEVERSION (0x09)
+
+/* values for SAS Device Page 0 AccessStatus field */
+#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
+#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
+#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
+/* specific values for SATA Init failures */
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/* see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+/* values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
+#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+
+/* SAS Device Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U64 SASAddress; /* 0x0C */
+ U32 Reserved2; /* 0x14 */
+ U16 DevHandle; /* 0x18 */
+ U16 Reserved3; /* 0x1A */
+ U8 InitialRegDeviceFIS[20];/* 0x1C */
+} MPI2_CONFIG_PAGE_SAS_DEV_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
+ Mpi2SasDevicePage1_t, MPI2_POINTER pMpi2SasDevicePage1_t;
+
+#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+/* SAS PHY Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 OwnerDevHandle; /* 0x08 */
+ U16 Reserved1; /* 0x0A */
+ U16 AttachedDevHandle; /* 0x0C */
+ U8 AttachedPhyIdentifier; /* 0x0E */
+ U8 Reserved2; /* 0x0F */
+ U32 AttachedPhyInfo; /* 0x10 */
+ U8 ProgrammedLinkRate; /* 0x14 */
+ U8 HwLinkRate; /* 0x15 */
+ U8 ChangeCount; /* 0x16 */
+ U8 Flags; /* 0x17 */
+ U32 PhyInfo; /* 0x18 */
+ U8 NegotiatedLinkRate; /* 0x1C */
+ U8 Reserved3; /* 0x1D */
+ U16 Reserved4; /* 0x1E */
+} MPI2_CONFIG_PAGE_SAS_PHY_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
+ Mpi2SasPhyPage0_t, MPI2_POINTER pMpi2SasPhyPage0_t;
+
+#define MPI2_SASPHY0_PAGEVERSION (0x03)
+
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/* values for SAS PHY Page 0 Flags field */
+#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/* SAS PHY Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 InvalidDwordCount; /* 0x0C */
+ U32 RunningDisparityErrorCount; /* 0x10 */
+ U32 LossDwordSynchCount; /* 0x14 */
+ U32 PhyResetProblemCount; /* 0x18 */
+} MPI2_CONFIG_PAGE_SAS_PHY_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
+ Mpi2SasPhyPage1_t, MPI2_POINTER pMpi2SasPhyPage1_t;
+
+#define MPI2_SASPHY1_PAGEVERSION (0x01)
+
+
+/* SAS PHY Page 2 */
+
+typedef struct _MPI2_SASPHY2_PHY_EVENT
+{
+ U8 PhyEventCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 PhyEventInfo; /* 0x04 */
+} MPI2_SASPHY2_PHY_EVENT, MPI2_POINTER PTR_MPI2_SASPHY2_PHY_EVENT,
+ Mpi2SasPhy2PhyEvent_t, MPI2_POINTER pMpi2SasPhy2PhyEvent_t;
+
+/* use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
+#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 NumPhyEvents; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_SASPHY2_PHY_EVENT PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_2, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
+ Mpi2SasPhyPage2_t, MPI2_POINTER pMpi2SasPhyPage2_t;
+
+#define MPI2_SASPHY2_PAGEVERSION (0x00)
+
+
+/* SAS PHY Page 3 */
+
+typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG
+{
+ U8 PhyEventCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U8 CounterType; /* 0x04 */
+ U8 ThresholdWindow; /* 0x05 */
+ U8 TimeUnits; /* 0x06 */
+ U8 Reserved3; /* 0x07 */
+ U32 EventThreshold; /* 0x08 */
+ U16 ThresholdFlags; /* 0x0C */
+ U16 Reserved4; /* 0x0E */
+} MPI2_SASPHY3_PHY_EVENT_CONFIG, MPI2_POINTER PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ Mpi2SasPhy3PhyEventConfig_t, MPI2_POINTER pMpi2SasPhy3PhyEventConfig_t;
+
+/* values for PhyEventCode field */
+#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
+#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+
+/* values for the CounterType field */
+#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/* values for the TimeUnits field */
+#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+
+/* values for the ThresholdFlags field */
+#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
+#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 NumPhyEvents; /* 0x0C */
+ U8 Reserved2; /* 0x0D */
+ U16 Reserved3; /* 0x0E */
+ MPI2_SASPHY3_PHY_EVENT_CONFIG PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /* 0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_3, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
+ Mpi2SasPhyPage3_t, MPI2_POINTER pMpi2SasPhyPage3_t;
+
+#define MPI2_SASPHY3_PAGEVERSION (0x00)
+
+
+/* SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 Reserved1; /* 0x08 */
+ U8 Reserved2; /* 0x0A */
+ U8 Flags; /* 0x0B */
+ U8 InitialFrame[28]; /* 0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/* values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
+/****************************************************************************
+* SAS Port Config Pages
+****************************************************************************/
+
+/* SAS Port Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 PortNumber; /* 0x08 */
+ U8 PhysicalPort; /* 0x09 */
+ U8 PortWidth; /* 0x0A */
+ U8 PhysicalPortWidth; /* 0x0B */
+ U8 ZoneGroup; /* 0x0C */
+ U8 Reserved1; /* 0x0D */
+ U16 Reserved2; /* 0x0E */
+ U64 SASAddress; /* 0x10 */
+ U32 DeviceInfo; /* 0x18 */
+ U32 Reserved3; /* 0x1C */
+ U32 Reserved4; /* 0x20 */
+} MPI2_CONFIG_PAGE_SAS_PORT_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
+ Mpi2SasPortPage0_t, MPI2_POINTER pMpi2SasPortPage0_t;
+
+#define MPI2_SASPORT0_PAGEVERSION (0x00)
+
+/* see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+/* SAS Enclosure Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U64 EnclosureLogicalID; /* 0x0C */
+ U16 Flags; /* 0x14 */
+ U16 EnclosureHandle; /* 0x16 */
+ U16 NumSlots; /* 0x18 */
+ U16 StartSlot; /* 0x1A */
+ U8 Reserved2; /* 0x1C */
+ U8 EnclosureLevel; /* 0x1D */
+ U16 SEPDevHandle; /* 0x1E */
+ U32 Reserved3; /* 0x20 */
+ U32 Reserved4; /* 0x24 */
+} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t;
+
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
+
+/* values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Page
+****************************************************************************/
+
+/* Log Page 0 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumLogEntries at runtime.
+ */
+#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
+#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_LOG_0_ENTRY
+{
+ U64 TimeStamp; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U16 LogSequence; /* 0x0C */
+ U16 LogEntryQualifier; /* 0x0E */
+ U8 VP_ID; /* 0x10 */
+ U8 VF_ID; /* 0x11 */
+ U16 Reserved2; /* 0x12 */
+ U8 LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/* 0x14 */
+} MPI2_LOG_0_ENTRY, MPI2_POINTER PTR_MPI2_LOG_0_ENTRY,
+ Mpi2Log0Entry_t, MPI2_POINTER pMpi2Log0Entry_t;
+
+/* values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
+#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
+#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
+
+typedef struct _MPI2_CONFIG_PAGE_LOG_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+ U16 NumLogEntries; /* 0x10 */
+ U16 Reserved3; /* 0x12 */
+ MPI2_LOG_0_ENTRY LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /* 0x14 */
+} MPI2_CONFIG_PAGE_LOG_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_LOG_0,
+ Mpi2LogPage0_t, MPI2_POINTER pMpi2LogPage0_t;
+
+#define MPI2_LOG_0_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* RAID Config Page
+****************************************************************************/
+
+/* RAID Page 0 */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check the value returned for NumElements at runtime.
+ */
+#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
+#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
+#endif
+
+typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT
+{
+ U16 ElementFlags; /* 0x00 */
+ U16 VolDevHandle; /* 0x02 */
+ U8 HotSparePool; /* 0x04 */
+ U8 PhysDiskNum; /* 0x05 */
+ U16 PhysDiskDevHandle; /* 0x06 */
+} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ MPI2_POINTER PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ Mpi2RaidConfig0ConfigElement_t, MPI2_POINTER pMpi2RaidConfig0ConfigElement_t;
+
+/* values for the ElementFlags field */
+#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
+#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
+#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
+
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 NumHotSpares; /* 0x08 */
+ U8 NumPhysDisks; /* 0x09 */
+ U8 NumVolumes; /* 0x0A */
+ U8 ConfigNum; /* 0x0B */
+ U32 Flags; /* 0x0C */
+ U8 ConfigGUID[24]; /* 0x10 */
+ U32 Reserved1; /* 0x28 */
+ U8 NumElements; /* 0x2C */
+ U8 Reserved2; /* 0x2D */
+ U16 Reserved3; /* 0x2E */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /* 0x30 */
+} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ Mpi2RaidConfigurationPage0_t, MPI2_POINTER pMpi2RaidConfigurationPage0_t;
+
+#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
+
+/* values for RAID Configuration Page 0 Flags field */
+#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
+
+
+/****************************************************************************
+* Driver Persistent Mapping Config Pages
+****************************************************************************/
+
+/* Driver Persistent Mapping Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY
+{
+ U64 PhysicalIdentifier; /* 0x00 */
+ U16 MappingInformation; /* 0x08 */
+ U16 DeviceIndex; /* 0x0A */
+ U32 PhysicalBitsMapping; /* 0x0C */
+ U32 Reserved1; /* 0x10 */
+} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ Mpi2DriverMap0Entry_t, MPI2_POINTER pMpi2DriverMap0Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /* 0x08 */
+} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ Mpi2DriverMappingPage0_t, MPI2_POINTER pMpi2DriverMappingPage0_t;
+
+#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
+
+/* values for Driver Persistent Mapping Page 0 MappingInformation field */
+#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
+#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
+#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
+
+
+/****************************************************************************
+* Ethernet Config Pages
+****************************************************************************/
+
+/* Ethernet Page 0 */
+
+/* IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR
+{
+ U32 IPv4Addr;
+ U32 IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, MPI2_POINTER PTR_MPI2_ETHERNET_IP_ADDR,
+ Mpi2EthernetIpAddr_t, MPI2_POINTER pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U8 NumInterfaces; /* 0x08 */
+ U8 Reserved0; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Status; /* 0x0C */
+ U8 MediaState; /* 0x10 */
+ U8 Reserved2; /* 0x11 */
+ U16 Reserved3; /* 0x12 */
+ U8 MacAddress[6]; /* 0x14 */
+ U8 Reserved4; /* 0x1A */
+ U8 Reserved5; /* 0x1B */
+ MPI2_ETHERNET_IP_ADDR IpAddress; /* 0x1C */
+ MPI2_ETHERNET_IP_ADDR SubnetMask; /* 0x2C */
+ MPI2_ETHERNET_IP_ADDR GatewayIpAddress; /* 0x3C */
+ MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /* 0x4C */
+ MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /* 0x5C */
+ MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /* 0x6C */
+ U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+ Mpi2EthernetPage0_t, MPI2_POINTER pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
+
+/* values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
+
+/* values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
+#define MPI2_ETHPG0_MS_10MBIT (0x01)
+#define MPI2_ETHPG0_MS_100MBIT (0x02)
+#define MPI2_ETHPG0_MS_1GBIT (0x03)
+
+
+/* Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved0; /* 0x08 */
+ U32 Flags; /* 0x0C */
+ U8 MediaState; /* 0x10 */
+ U8 Reserved1; /* 0x11 */
+ U16 Reserved2; /* 0x12 */
+ U8 MacAddress[6]; /* 0x14 */
+ U8 Reserved3; /* 0x1A */
+ U8 Reserved4; /* 0x1B */
+ MPI2_ETHERNET_IP_ADDR StaticIpAddress; /* 0x1C */
+ MPI2_ETHERNET_IP_ADDR StaticSubnetMask; /* 0x2C */
+ MPI2_ETHERNET_IP_ADDR StaticGatewayIpAddress; /* 0x3C */
+ MPI2_ETHERNET_IP_ADDR StaticDNS1IpAddress; /* 0x4C */
+ MPI2_ETHERNET_IP_ADDR StaticDNS2IpAddress; /* 0x5C */
+ U32 Reserved5; /* 0x6C */
+ U32 Reserved6; /* 0x70 */
+ U32 Reserved7; /* 0x74 */
+ U32 Reserved8; /* 0x78 */
+ U8 HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/* 0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+ Mpi2EthernetPage1_t, MPI2_POINTER pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
+
+/* values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
+
+/* values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+
+
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ * Generic structure to use for product-specific extended manufacturing pages
+ * (currently Extended Manufacturing Page 40 through Extended Manufacturing
+ * Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo; /* 0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t, MPI2_POINTER pMpi2ExtManufacturingPagePS_t;
+
+/* PageVersion should be provided by product-specific code */
+
+#endif
+
diff --git a/sys/dev/mpr/mpi/mpi2_hbd.h b/sys/dev/mpr/mpi/mpi2_hbd.h
new file mode 100644
index 0000000000000..d0cc09f905a0f
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_hbd.h
@@ -0,0 +1,152 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2009-2011 LSI Corporation.
+ *
+ *
+ * Name: mpi2_hbd.h
+ * Title: MPI Host Based Discovery messages and structures
+ * Creation Date: October 21, 2009
+ *
+ * mpi2_hbd.h Version: 02.00.02
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 10-28-09 02.00.00 Initial version.
+ * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from
+ * HBD Action request, replaced by AdditionalInfo field.
+ * 11-18-11 02.00.02 Incorporating additions for MPI v2.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_HBD_H
+#define MPI2_HBD_H
+
+/****************************************************************************
+* Host Based Discovery Action messages
+****************************************************************************/
+
+/* Host Based Discovery Action Request Message */
+typedef struct _MPI2_HBD_ACTION_REQUEST
+{
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ U64 SASAddress; /* 0x10 */
+ U32 Reserved5; /* 0x18 */
+ U32 HbdDeviceInfo; /* 0x1C */
+ U16 ParentDevHandle; /* 0x20 */
+ U16 MaxQDepth; /* 0x22 */
+ U8 FirstPhyIdentifier; /* 0x24 */
+ U8 Port; /* 0x25 */
+ U8 MaxConnections; /* 0x26 */
+ U8 MaxRate; /* 0x27 */
+ U32 AdditionalInfo; /* 0x28 */
+ U16 InitialAWT; /* 0x2C */
+ U16 Reserved7; /* 0x2E */
+ U32 Reserved8; /* 0x30 */
+} MPI2_HBD_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_HBD_ACTION_REQUEST,
+ Mpi2HbdActionRequest_t, MPI2_POINTER pMpi2HbdActionRequest_t;
+
+/* values for the Operation field */
+#define MPI2_HBD_OP_ADD_DEVICE (0x01)
+#define MPI2_HBD_OP_REMOVE_DEVICE (0x02)
+#define MPI2_HBD_OP_UPDATE_DEVICE (0x03)
+
+/* values for the HbdDeviceInfo field */
+#define MPI2_HBD_DEVICE_INFO_VIRTUAL_DEVICE (0x00004000)
+#define MPI2_HBD_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_HBD_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_HBD_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_HBD_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_HBD_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_HBD_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_HBD_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_HBD_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_HBD_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_HBD_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_HBD_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_HBD_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_HBD_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_HBD_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_HBD_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+/* values for the MaxRate field */
+#define MPI2_HBD_MAX_RATE_MASK (0x0F)
+#define MPI2_HBD_MAX_RATE_1_5 (0x08)
+#define MPI2_HBD_MAX_RATE_3_0 (0x09)
+#define MPI2_HBD_MAX_RATE_6_0 (0x0A)
+#define MPI25_HBD_MAX_RATE_12_0 (0x0B)
+
+
+/* Host Based Discovery Action Reply Message */
+typedef struct _MPI2_HBD_ACTION_REPLY
+{
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_HBD_ACTION_REPLY, MPI2_POINTER PTR_MPI2_HBD_ACTION_REPLY,
+ Mpi2HbdActionReply_t, MPI2_POINTER pMpi2HbdActionReply_t;
+
+
+#endif
+
+
diff --git a/sys/dev/mpr/mpi/mpi2_history.txt b/sys/dev/mpr/mpi/mpi2_history.txt
new file mode 100644
index 0000000000000..296b75b878591
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_history.txt
@@ -0,0 +1,619 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+ ==============================
+ Fusion-MPT MPI 2.0 / 2.5 Header File Change History
+ ==============================
+
+ Copyright (c) 2000-2013 LSI Corporation.
+
+ ---------------------------------------
+ Header Set Release Version: 02.00.33
+ Header Set Release Date: 12-05-13
+ ---------------------------------------
+
+ Filename Current version Prior version
+ ---------- --------------- -------------
+ mpi2.h 02.00.33 02.00.32
+ mpi2_cnfg.h 02.00.27 02.00.26
+ mpi2_init.h 02.00.15 02.00.15
+ mpi2_ioc.h 02.00.24 02.00.23
+ mpi2_raid.h 02.00.10 02.00.10
+ mpi2_sas.h 02.00.08 02.00.08
+ mpi2_targ.h 02.00.06 02.00.06
+ mpi2_tool.h 02.00.11 02.00.11
+ mpi2_type.h 02.00.00 02.00.00
+ mpi2_ra.h 02.00.00 02.00.00
+ mpi2_hbd.h 02.00.02 02.00.02
+ mpi2_history.txt 02.00.33 02.00.32
+
+
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+
+mpi2.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+
+mpi2_cnfg.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up bit
+ * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO
+ * Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * Incorporating additions for MPI v2.5.
+ * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
+ * obsolete for MPI v2.5 and later.
+ * Added some defines for 12G SAS speeds.
+ * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ * match the specification.
+ * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
+ * future use.
+ * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for
+ * MPI2_CONFIG_PAGE_MAN_7.
+ * Added EnclosureLevel and ConnectorName fields to
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_DEV_0.
+ * Added EnclosureLevel field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * --------------------------------------------------------------------------
+
+mpi2_init.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h becasue they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 11-18-11 02.00.12 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * --------------------------------------------------------------------------
+
+mpi2_ioc.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 11-18-11 02.00.20 Incorporating additions for MPI v2.5.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
+ * Added ElapsedSeconds field to
+ * MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ * Added Encrypted Hash Extended Image.
+ * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
+ * --------------------------------------------------------------------------
+
+mpi2_raid.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 11-18-11 02.00.07 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
+ * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
+ * --------------------------------------------------------------------------
+
+mpi2_sas.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
+ * Passthrough Request message.
+ * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
+ * for anything newer than MPI v2.0.
+ * --------------------------------------------------------------------------
+
+mpi2_targ.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
+ * BufferPostFlags field of CommandBufferPostBase Request.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
+ * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
+ * Target Status Send Request only takes a single SGE for
+ * response data.
+ * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure.
+ * 11-18-11 02.00.05 Incorporating additions for MPI v2.5.
+ * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT
+ * request message structure.
+ * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and
+ * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS.
+ * --------------------------------------------------------------------------
+
+mpi2_tool.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request
+ * message.
+ * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
+ * it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * --------------------------------------------------------------------------
+
+mpi2_type.h
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+
+mpi2_ra.h
+ * 05-06-09 02.00.00 Initial version.
+ * --------------------------------------------------------------------------
+
+mpi2_hbd.h
+ * 10-28-09 02.00.00 Initial version.
+ * 08-11-10 02.00.01 Removed PortGroups, DmaGroup, and ControlGroup from
+ * HBD Action request, replaced by AdditionalInfo field.
+ * 11-18-11 02.00.02 Incorporating additions for MPI v2.5.
+ * --------------------------------------------------------------------------
+
+
+mpi2_history.txt Parts list history
+
+Filename 02.00.33 02.00.32 02.00.31 02.00.30
+---------- -------- -------- -------- --------
+mpi2.h 02.00.33 02.00.32 02.00.31 02.00.30
+mpi2_cnfg.h 02.00.27 02.00.26 02.00.25 02.00.25
+mpi2_init.h 02.00.15 02.00.15 02.00.15 02.00.15
+mpi2_ioc.h 02.00.24 02.00.23 02.00.22 02.00.22
+mpi2_raid.h 02.00.10 02.00.10 02.00.10 02.00.09
+mpi2_sas.h 02.00.08 02.00.08 02.00.07 02.00.07
+mpi2_targ.h 02.00.06 02.00.06 02.00.06 02.00.06
+mpi2_tool.h 02.00.11 02.00.11 02.00.10 02.00.10
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02
+
+Filename 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24
+---------- -------- -------- -------- -------- -------- --------
+mpi2.h 02.00.29 02.00.28 02.00.27 02.00.26 02.00.25 02.00.24
+mpi2_cnfg.h 02.00.24 02.00.23 02.00.22 02.00.22 02.00.22 02.00.22
+mpi2_init.h 02.00.14 02.00.14 02.00.14 02.00.14 02.00.13 02.00.13
+mpi2_ioc.h 02.00.22 02.00.22 02.00.22 02.00.21 02.00.21 02.00.20
+mpi2_raid.h 02.00.09 02.00.09 02.00.09 02.00.08 02.00.08 02.00.08
+mpi2_sas.h 02.00.07 02.00.07 02.00.07 02.00.07 02.00.06 02.00.06
+mpi2_targ.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05
+mpi2_tool.h 02.00.10 02.00.10 02.00.10 02.00.09 02.00.08 02.00.08
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_hbd.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
+
+Filename 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18
+---------- -------- -------- -------- -------- -------- --------
+mpi2.h 02.00.23 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18
+mpi2_cnfg.h 02.00.22 02.00.21 02.00.20 02.00.19 02.00.18 02.00.17
+mpi2_init.h 02.00.12 02.00.11 02.00.11 02.00.11 02.00.11 02.00.11
+mpi2_ioc.h 02.00.20 02.00.19 02.00.18 02.00.17 02.00.17 02.00.16
+mpi2_raid.h 02.00.07 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05
+mpi2_sas.h 02.00.06 02.00.05 02.00.05 02.00.05 02.00.05 02.00.05
+mpi2_targ.h 02.00.05 02.00.04 02.00.04 02.00.04 02.00.04 02.00.04
+mpi2_tool.h 02.00.08 02.00.07 02.00.07 02.00.06 02.00.06 02.00.06
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_hbd.h 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 02.00.01
+
+Filename 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12
+---------- -------- -------- -------- -------- -------- --------
+mpi2.h 02.00.17 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12
+mpi2_cnfg.h 02.00.16 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11
+mpi2_init.h 02.00.10 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07
+mpi2_ioc.h 02.00.15 02.00.15 02.00.14 02.00.13 02.00.12 02.00.11
+mpi2_raid.h 02.00.05 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03
+mpi2_sas.h 02.00.05 02.00.04 02.00.03 02.00.03 02.00.02 02.00.02
+mpi2_targ.h 02.00.04 02.00.04 02.00.04 02.00.03 02.00.03 02.00.03
+mpi2_tool.h 02.00.06 02.00.05 02.00.04 02.00.04 02.00.04 02.00.03
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_ra.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_hbd.h 02.00.01 02.00.00 02.00.00 02.00.00
+
+Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
+---------- -------- -------- -------- -------- -------- --------
+mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
+mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06
+mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03
+mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06
+mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02
+mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01
+mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02
+mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+
+Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
+---------- -------- -------- -------- -------- -------- --------
+mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
+mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
+mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
+mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
+mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00
+mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
+mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
+
diff --git a/sys/dev/mpr/mpi/mpi2_init.h b/sys/dev/mpr/mpi/mpi2_init.h
new file mode 100644
index 0000000000000..62b0cfed120a8
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_init.h
@@ -0,0 +1,614 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2_init.h
+ * Title: MPI SCSI initiator mode messages and structures
+ * Creation Date: June 23, 2006
+ *
+ * mpi2_init.h Version: 02.00.15
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h becasue they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 11-18-11 02.00.12 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ * replacing the Reserved4 field.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_INIT_H
+#define MPI2_INIT_H
+
+/*****************************************************************************
+*
+* SCSI Initiator Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SCSI IO messages and associated structures
+****************************************************************************/
+
+typedef struct _MPI2_SCSI_IO_CDB_EEDP32
+{
+ U8 CDB[20]; /* 0x00 */
+ U32 PrimaryReferenceTag; /* 0x14 */
+ U16 PrimaryApplicationTag; /* 0x18 */
+ U16 PrimaryApplicationTagMask; /* 0x1A */
+ U32 TransferLength; /* 0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
+
+/* MPI v2.0 CDB field */
+typedef union _MPI2_SCSI_IO_CDB_UNION
+{
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
+
+/* MPI v2.0 SCSI IO Request Message */
+typedef struct _MPI2_SCSI_IO_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 SenseBufferLowAddress; /* 0x0C */
+ U16 SGLFlags; /* 0x10 */
+ U8 SenseBufferLength; /* 0x12 */
+ U8 Reserved4; /* 0x13 */
+ U8 SGLOffset0; /* 0x14 */
+ U8 SGLOffset1; /* 0x15 */
+ U8 SGLOffset2; /* 0x16 */
+ U8 SGLOffset3; /* 0x17 */
+ U32 SkipCount; /* 0x18 */
+ U32 DataLength; /* 0x1C */
+ U32 BidirectionalDataLength; /* 0x20 */
+ U16 IoFlags; /* 0x24 */
+ U16 EEDPFlags; /* 0x26 */
+ U32 EEDPBlockSize; /* 0x28 */
+ U32 SecondaryReferenceTag; /* 0x2C */
+ U16 SecondaryApplicationTag; /* 0x30 */
+ U16 ApplicationTagTranslationMask; /* 0x32 */
+ U8 LUN[8]; /* 0x34 */
+ U32 Control; /* 0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI2_SGE_IO_UNION SGL; /* 0x60 */
+
+} MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST,
+ Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t;
+
+/* SCSI IO MsgFlags bits */
+
+/* MsgFlags for SenseBufferAddressSpace */
+#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
+#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
+#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+
+/* SCSI IO SGLFlags bits */
+
+/* base values for Data Location Address Space */
+#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+
+/* base values for Type */
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
+
+/* shift values for each sub-field */
+#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+
+/* number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
+/* SCSI IO IoFlags bits */
+
+/* Large CDB Address Space */
+#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
+#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
+
+#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
+#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/* SCSI IO EEDPFlags bits */
+
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+/* SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
+
+/* SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/* alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+
+/* MPI v2.5 CDB field */
+typedef union _MPI25_SCSI_IO_CDB_UNION
+{
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_IEEE_SGE_SIMPLE64 SGE;
+} MPI25_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI25_SCSI_IO_CDB_UNION,
+ Mpi25ScsiIoCdb_t, MPI2_POINTER pMpi25ScsiIoCdb_t;
+
+/* MPI v2.5 SCSI IO Request Message */
+typedef struct _MPI25_SCSI_IO_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 SenseBufferLowAddress; /* 0x0C */
+ U8 DMAFlags; /* 0x10 */
+ U8 Reserved5; /* 0x11 */
+ U8 SenseBufferLength; /* 0x12 */
+ U8 Reserved4; /* 0x13 */
+ U8 SGLOffset0; /* 0x14 */
+ U8 SGLOffset1; /* 0x15 */
+ U8 SGLOffset2; /* 0x16 */
+ U8 SGLOffset3; /* 0x17 */
+ U32 SkipCount; /* 0x18 */
+ U32 DataLength; /* 0x1C */
+ U32 BidirectionalDataLength; /* 0x20 */
+ U16 IoFlags; /* 0x24 */
+ U16 EEDPFlags; /* 0x26 */
+ U16 EEDPBlockSize; /* 0x28 */
+ U16 Reserved6; /* 0x2A */
+ U32 SecondaryReferenceTag; /* 0x2C */
+ U16 SecondaryApplicationTag; /* 0x30 */
+ U16 ApplicationTagTranslationMask; /* 0x32 */
+ U8 LUN[8]; /* 0x34 */
+ U32 Control; /* 0x3C */
+ MPI25_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+
+#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */
+ MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI25_SGE_IO_UNION SGL; /* 0x60 */
+
+} MPI25_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI25_SCSI_IO_REQUEST,
+ Mpi25SCSIIORequest_t, MPI2_POINTER pMpi25SCSIIORequest_t;
+
+/* use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */
+
+/* Defines for the DMAFlags field
+ * Each setting affects 4 SGLS, from SGL0 to SGL3.
+ * D = Data
+ * C = Cache DIF
+ * I = Interleaved
+ * H = Host DIF
+ */
+#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F)
+
+/* number of SGLOffset fields */
+#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
+
+/* defines for the IoFlags field */
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+
+#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/* MPI v2.5 defines for the EEDPFlags bits */
+/* use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */
+#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0)
+#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0)
+
+#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030)
+#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010)
+
+/* use MPI2_LUN_ defines from mpi2.h for the LUN field */
+
+/* use MPI2_SCSIIO_CONTROL_ defines for the Control field */
+
+
+/* NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so
+ * MPI2_SCSI_IO_REPLY is used for both.
+ */
+
+/* SCSI IO Error Reply Message */
+typedef struct _MPI2_SCSI_IO_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 SCSIStatus; /* 0x0C */
+ U8 SCSIState; /* 0x0D */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 TransferCount; /* 0x14 */
+ U32 SenseCount; /* 0x18 */
+ U32 ResponseInfo; /* 0x1C */
+ U16 TaskTag; /* 0x20 */
+ U16 SCSIStatusQualifier; /* 0x22 */
+ U32 BidirectionalTransferCount; /* 0x24 */
+ U32 EEDPErrorOffset; /* 0x28 */ /* MPI 2.5 only; Reserved in MPI 2.0 */
+ U32 Reserved6; /* 0x2C */
+} MPI2_SCSI_IO_REPLY, MPI2_POINTER PTR_MPI2_SCSI_IO_REPLY,
+ Mpi2SCSIIOReply_t, MPI2_POINTER pMpi2SCSIIOReply_t;
+
+/* SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
+
+#define MPI2_SCSI_STATUS_GOOD (0x00)
+#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI2_SCSI_STATUS_BUSY (0x08)
+#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /* obsolete */
+#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
+
+/* SCSI IO Reply SCSIState flags */
+
+#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI2_SCSI_STATE_TERMINATED (0x08)
+#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
+#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/* SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved1; /* 0x04 */
+ U8 TaskType; /* 0x05 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 LUN[8]; /* 0x0C */
+ U32 Reserved4[7]; /* 0x14 */
+ U16 TaskMID; /* 0x30 */
+ U16 Reserved5; /* 0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST,
+ MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
+ Mpi2SCSITaskManagementRequest_t,
+ MPI2_POINTER pMpi2SCSITaskManagementRequest_t;
+
+/* TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/* obsolete TaskType name */
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
+
+/* MsgFlags bits */
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+
+
+/* SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 ResponseCode; /* 0x04 */
+ U8 TaskType; /* 0x05 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 TerminationCount; /* 0x14 */
+ U32 ResponseInfo; /* 0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY,
+ MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
+ Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
+
+/* ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
+
+/****************************************************************************
+* SCSI Enclosure Processor messages
+****************************************************************************/
+
+/* SCSI Enclosure Processor Request Message */
+typedef struct _MPI2_SEP_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Action; /* 0x04 */
+ U8 Flags; /* 0x05 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 SlotStatus; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+ U32 Reserved5; /* 0x18 */
+ U16 Slot; /* 0x1C */
+ U16 EnclosureHandle; /* 0x1E */
+} MPI2_SEP_REQUEST, MPI2_POINTER PTR_MPI2_SEP_REQUEST,
+ Mpi2SepRequest_t, MPI2_POINTER pMpi2SepRequest_t;
+
+/* Action defines */
+#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/* Flags defines */
+#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
+#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+
+/* SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+
+
+/* SCSI Enclosure Processor Reply Message */
+typedef struct _MPI2_SEP_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Action; /* 0x04 */
+ U8 Flags; /* 0x05 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 SlotStatus; /* 0x14 */
+ U32 Reserved4; /* 0x18 */
+ U16 Slot; /* 0x1C */
+ U16 EnclosureHandle; /* 0x1E */
+} MPI2_SEP_REPLY, MPI2_POINTER PTR_MPI2_SEP_REPLY,
+ Mpi2SepReply_t, MPI2_POINTER pMpi2SepReply_t;
+
+/* SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+
+
+#endif
+
+
diff --git a/sys/dev/mpr/mpi/mpi2_ioc.h b/sys/dev/mpr/mpi/mpi2_ioc.h
new file mode 100644
index 0000000000000..38a46f5280bc5
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_ioc.h
@@ -0,0 +1,1856 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: October 11, 2006
+ *
+ * mpi2_ioc.h Version: 02.00.24
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 11-18-11 02.00.20 Incorporating additions for MPI v2.5.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
+ * Added ElapsedSeconds field to
+ * MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ * Added Encrypted Hash Extended Image.
+ * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_IOC_H
+#define MPI2_IOC_H
+
+/*****************************************************************************
+*
+* IOC Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IOCInit message
+****************************************************************************/
+
+/* IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST
+{
+ U8 WhoInit; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 MsgVersion; /* 0x0C */
+ U16 HeaderVersion; /* 0x0E */
+ U32 Reserved5; /* 0x10 */
+ U16 Reserved6; /* 0x14 */
+ U8 Reserved7; /* 0x16 */
+ U8 HostMSIxVectors; /* 0x17 */
+ U16 Reserved8; /* 0x18 */
+ U16 SystemRequestFrameSize; /* 0x1A */
+ U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
+ U16 ReplyFreeQueueDepth; /* 0x1E */
+ U32 SenseBufferAddressHigh; /* 0x20 */
+ U32 SystemReplyAddressHigh; /* 0x24 */
+ U64 SystemRequestFrameBaseAddress; /* 0x28 */
+ U64 ReplyDescriptorPostQueueAddress;/* 0x30 */
+ U64 ReplyFreeQueueAddress; /* 0x38 */
+ U64 TimeStamp; /* 0x40 */
+} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
+
+/* WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
+
+/* MsgVersion */
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/* HeaderVersion */
+#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+
+/* minimum depth for a Reply Descriptor Post Queue */
+#define MPI2_RDPQ_DEPTH_MIN (16)
+
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
+{
+ U64 RDPQBaseAddress; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+ MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+ Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry;
+
+/* IOCInit Reply message */
+typedef struct _MPI2_IOC_INIT_REPLY
+{
+ U8 WhoInit; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_IOC_INIT_REPLY, MPI2_POINTER PTR_MPI2_IOC_INIT_REPLY,
+ Mpi2IOCInitReply_t, MPI2_POINTER pMpi2IOCInitReply_t;
+
+
+/****************************************************************************
+* IOCFacts message
+****************************************************************************/
+
+/* IOCFacts Request message */
+typedef struct _MPI2_IOC_FACTS_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+} MPI2_IOC_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_IOC_FACTS_REQUEST,
+ Mpi2IOCFactsRequest_t, MPI2_POINTER pMpi2IOCFactsRequest_t;
+
+
+/* IOCFacts Reply message */
+typedef struct _MPI2_IOC_FACTS_REPLY
+{
+ U16 MsgVersion; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 HeaderVersion; /* 0x04 */
+ U8 IOCNumber; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U16 IOCExceptions; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 MaxChainDepth; /* 0x14 */
+ U8 WhoInit; /* 0x15 */
+ U8 NumberOfPorts; /* 0x16 */
+ U8 MaxMSIxVectors; /* 0x17 */
+ U16 RequestCredit; /* 0x18 */
+ U16 ProductID; /* 0x1A */
+ U32 IOCCapabilities; /* 0x1C */
+ MPI2_VERSION_UNION FWVersion; /* 0x20 */
+ U16 IOCRequestFrameSize; /* 0x24 */
+ U16 IOCMaxChainSegmentSize; /* 0x26 */ /* MPI 2.5 only; Reserved in MPI 2.0 */
+ U16 MaxInitiators; /* 0x28 */
+ U16 MaxTargets; /* 0x2A */
+ U16 MaxSasExpanders; /* 0x2C */
+ U16 MaxEnclosures; /* 0x2E */
+ U16 ProtocolFlags; /* 0x30 */
+ U16 HighPriorityCredit; /* 0x32 */
+ U16 MaxReplyDescriptorPostQueueDepth; /* 0x34 */
+ U8 ReplyFrameSize; /* 0x36 */
+ U8 MaxVolumes; /* 0x37 */
+ U16 MaxDevHandle; /* 0x38 */
+ U16 MaxPersistentEntries; /* 0x3A */
+ U16 MinDevHandle; /* 0x3C */
+ U16 Reserved4; /* 0x3E */
+} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
+ Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
+
+/* MsgVersion */
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+/* HeaderVersion */
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+/* IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
+#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
+
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
+
+#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
+#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+
+/* defines for WhoInit field are after the IOCInit Request */
+
+/* ProductID field uses MPI2_FW_HEADER_PID_ */
+
+/* IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
+#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
+#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
+#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
+#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
+#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
+#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
+#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
+#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
+
+/* ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+
+
+/****************************************************************************
+* PortFacts message
+****************************************************************************/
+
+/* PortFacts Request message */
+typedef struct _MPI2_PORT_FACTS_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 PortNumber; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+} MPI2_PORT_FACTS_REQUEST, MPI2_POINTER PTR_MPI2_PORT_FACTS_REQUEST,
+ Mpi2PortFactsRequest_t, MPI2_POINTER pMpi2PortFactsRequest_t;
+
+/* PortFacts Reply message */
+typedef struct _MPI2_PORT_FACTS_REPLY
+{
+ U16 Reserved1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 PortNumber; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 Reserved5; /* 0x14 */
+ U8 PortType; /* 0x15 */
+ U16 Reserved6; /* 0x16 */
+ U16 MaxPostedCmdBuffers; /* 0x18 */
+ U16 Reserved7; /* 0x1A */
+} MPI2_PORT_FACTS_REPLY, MPI2_POINTER PTR_MPI2_PORT_FACTS_REPLY,
+ Mpi2PortFactsReply_t, MPI2_POINTER pMpi2PortFactsReply_t;
+
+/* PortType values */
+#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+
+
+/****************************************************************************
+* PortEnable message
+****************************************************************************/
+
+/* PortEnable Request message */
+typedef struct _MPI2_PORT_ENABLE_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2; /* 0x04 */
+ U8 PortFlags; /* 0x05 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+} MPI2_PORT_ENABLE_REQUEST, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REQUEST,
+ Mpi2PortEnableRequest_t, MPI2_POINTER pMpi2PortEnableRequest_t;
+
+
+/* PortEnable Reply message */
+typedef struct _MPI2_PORT_ENABLE_REPLY
+{
+ U16 Reserved1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2; /* 0x04 */
+ U8 PortFlags; /* 0x05 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_PORT_ENABLE_REPLY, MPI2_POINTER PTR_MPI2_PORT_ENABLE_REPLY,
+ Mpi2PortEnableReply_t, MPI2_POINTER pMpi2PortEnableReply_t;
+
+
+/****************************************************************************
+* EventNotification message
+****************************************************************************/
+
+/* EventNotification Request message */
+#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];/* 0x14 */
+ U16 SASBroadcastPrimitiveMasks; /* 0x24 */
+ U16 SASNotifyPrimitiveMasks; /* 0x26 */
+ U32 Reserved8; /* 0x28 */
+} MPI2_EVENT_NOTIFICATION_REQUEST,
+ MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
+ Mpi2EventNotificationRequest_t, MPI2_POINTER pMpi2EventNotificationRequest_t;
+
+
+/* EventNotification Reply message */
+typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
+{
+ U16 EventDataLength; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 AckRequired; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U16 Event; /* 0x14 */
+ U16 Reserved4; /* 0x16 */
+ U32 EventContext; /* 0x18 */
+ U32 EventData[1]; /* 0x1C */
+} MPI2_EVENT_NOTIFICATION_REPLY, MPI2_POINTER PTR_MPI2_EVENT_NOTIFICATION_REPLY,
+ Mpi2EventNotificationReply_t, MPI2_POINTER pMpi2EventNotificationReply_t;
+
+/* AckRequired */
+#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/* Event */
+#define MPI2_EVENT_LOG_DATA (0x0001)
+#define MPI2_EVENT_STATE_CHANGE (0x0002)
+#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
+#define MPI2_EVENT_EVENT_CHANGE (0x000A)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */
+#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
+#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
+#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
+#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
+#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
+#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
+#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
+#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_IR_VOLUME (0x001E)
+#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
+#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
+#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
+#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
+#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
+
+
+/* Log Entry Added Event data */
+
+/* the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
+#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED
+{
+ U64 TimeStamp; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U16 LogSequence; /* 0x0C */
+ U16 LogEntryQualifier; /* 0x0E */
+ U8 VP_ID; /* 0x10 */
+ U8 VF_ID; /* 0x11 */
+ U16 Reserved2; /* 0x12 */
+ U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH];/* 0x14 */
+} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ Mpi2EventDataLogEntryAdded_t, MPI2_POINTER pMpi2EventDataLogEntryAdded_t;
+
+
+/* GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT
+{
+ U8 GPIONum; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ Mpi2EventDataGpioInterrupt_t, MPI2_POINTER pMpi2EventDataGpioInterrupt_t;
+
+
+/* Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE
+{
+ U16 Status; /* 0x00 */
+ U8 SensorNum; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U16 CurrentTemperature; /* 0x04 */
+ U16 Reserved2; /* 0x06 */
+ U32 Reserved3; /* 0x08 */
+ U32 Reserved4; /* 0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_TEMPERATURE,
+ Mpi2EventDataTemperature_t, MPI2_POINTER pMpi2EventDataTemperature_t;
+
+/* Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+
+/* Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE
+{
+ U8 SourceVF_ID; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+ U32 HostData[1]; /* 0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, MPI2_POINTER PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+ Mpi2EventDataHostMessage_t, MPI2_POINTER pMpi2EventDataHostMessage_t;
+
+
+/* Power Performance Change Event */
+
+typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE
+{
+ U8 CurrentPowerMode; /* 0x00 */
+ U8 PreviousPowerMode; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ Mpi2EventDataPowerPerfChange_t, MPI2_POINTER pMpi2EventDataPowerPerfChange_t;
+
+/* defines for CurrentPowerMode and PreviousPowerMode fields */
+#define MPI2_EVENT_PM_INIT_MASK (0xC0)
+#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_INIT_HOST (0x40)
+#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80)
+#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI2_EVENT_PM_MODE_MASK (0x07)
+#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01)
+#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04)
+#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05)
+#define MPI2_EVENT_PM_MODE_STANDBY (0x06)
+
+
+/* Hard Reset Received Event data */
+
+typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED
+{
+ U8 Reserved1; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ Mpi2EventDataHardResetReceived_t,
+ MPI2_POINTER pMpi2EventDataHardResetReceived_t;
+
+
+/* Task Set Full Event data */
+/* this event is obsolete */
+
+typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL
+{
+ U16 DevHandle; /* 0x00 */
+ U16 CurrentDepth; /* 0x02 */
+} MPI2_EVENT_DATA_TASK_SET_FULL, MPI2_POINTER PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
+ Mpi2EventDataTaskSetFull_t, MPI2_POINTER pMpi2EventDataTaskSetFull_t;
+
+
+/* SAS Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
+{
+ U16 TaskTag; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U8 ASC; /* 0x04 */
+ U8 ASCQ; /* 0x05 */
+ U16 DevHandle; /* 0x06 */
+ U32 Reserved2; /* 0x08 */
+ U64 SASAddress; /* 0x0C */
+ U8 LUN[8]; /* 0x14 */
+} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ Mpi2EventDataSasDeviceStatusChange_t,
+ MPI2_POINTER pMpi2EventDataSasDeviceStatusChange_t;
+
+/* SAS Device Status Change Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
+
+
+/* Integrated RAID Operation Status Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS
+{
+ U16 VolDevHandle; /* 0x00 */
+ U16 Reserved1; /* 0x02 */
+ U8 RAIDOperation; /* 0x04 */
+ U8 PercentComplete; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U32 ElapsedSeconds; /* 0x08 */
+} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ Mpi2EventDataIrOperationStatus_t,
+ MPI2_POINTER pMpi2EventDataIrOperationStatus_t;
+
+/* Integrated RAID Operation Status Event data RAIDOperation values */
+#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
+#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
+#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
+#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
+
+
+/* Integrated RAID Volume Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_VOLUME
+{
+ U16 VolDevHandle; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 NewValue; /* 0x04 */
+ U32 PreviousValue; /* 0x08 */
+} MPI2_EVENT_DATA_IR_VOLUME, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_VOLUME,
+ Mpi2EventDataIrVolume_t, MPI2_POINTER pMpi2EventDataIrVolume_t;
+
+/* Integrated RAID Volume Event data ReasonCode values */
+#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
+
+
+/* Integrated RAID Physical Disk Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 PhysDiskNum; /* 0x03 */
+ U16 PhysDiskDevHandle; /* 0x04 */
+ U16 Reserved2; /* 0x06 */
+ U16 Slot; /* 0x08 */
+ U16 EnclosureHandle; /* 0x0A */
+ U32 NewValue; /* 0x0C */
+ U32 PreviousValue; /* 0x10 */
+} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ Mpi2EventDataIrPhysicalDisk_t, MPI2_POINTER pMpi2EventDataIrPhysicalDisk_t;
+
+/* Integrated RAID Physical Disk Event data ReasonCode values */
+#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
+
+
+/* Integrated RAID Configuration Change List Event data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumElements at runtime.
+ */
+#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
+#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT
+{
+ U16 ElementFlags; /* 0x00 */
+ U16 VolDevHandle; /* 0x02 */
+ U8 ReasonCode; /* 0x04 */
+ U8 PhysDiskNum; /* 0x05 */
+ U16 PhysDiskDevHandle; /* 0x06 */
+} MPI2_EVENT_IR_CONFIG_ELEMENT, MPI2_POINTER PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
+ Mpi2EventIrConfigElement_t, MPI2_POINTER pMpi2EventIrConfigElement_t;
+
+/* IR Configuration Change List Event data ElementFlags values */
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
+
+/* IR Configuration Change List Event data ReasonCode values */
+#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
+#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
+#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
+#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
+#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
+
+typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST
+{
+ U8 NumElements; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 Reserved2; /* 0x02 */
+ U8 ConfigNum; /* 0x03 */
+ U32 Flags; /* 0x04 */
+ MPI2_EVENT_IR_CONFIG_ELEMENT ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT]; /* 0x08 */
+} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ Mpi2EventDataIrConfigChangeList_t,
+ MPI2_POINTER pMpi2EventDataIrConfigChangeList_t;
+
+/* IR Configuration Change List Event data Flags values */
+#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
+
+
+/* SAS Discovery Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY
+{
+ U8 Flags; /* 0x00 */
+ U8 ReasonCode; /* 0x01 */
+ U8 PhysicalPort; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 DiscoveryStatus; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_DISCOVERY,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
+ Mpi2EventDataSasDiscovery_t, MPI2_POINTER pMpi2EventDataSasDiscovery_t;
+
+/* SAS Discovery Event data Flags values */
+#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
+#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
+
+/* SAS Discovery Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+
+/* SAS Discovery Event data DiscoveryStatus values */
+#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
+#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
+
+
+/* SAS Broadcast Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE
+{
+ U8 PhyNum; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U8 PortWidth; /* 0x02 */
+ U8 Primitive; /* 0x03 */
+} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ Mpi2EventDataSasBroadcastPrimitive_t,
+ MPI2_POINTER pMpi2EventDataSasBroadcastPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI2_EVENT_PRIMITIVE_SES (0x02)
+#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+
+/* SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE
+{
+ U8 PhyNum; /* 0x00 */
+ U8 Port; /* 0x01 */
+ U8 Reserved1; /* 0x02 */
+ U8 Primitive; /* 0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ Mpi2EventDataSasNotifyPrimitive_t,
+ MPI2_POINTER pMpi2EventDataSasNotifyPrimitive_t;
+
+/* defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
+
+/* SAS Initiator Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE
+{
+ U8 ReasonCode; /* 0x00 */
+ U8 PhysicalPort; /* 0x01 */
+ U16 DevHandle; /* 0x02 */
+ U64 SASAddress; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasInitDevStatusChange_t,
+ MPI2_POINTER pMpi2EventDataSasInitDevStatusChange_t;
+
+/* SAS Initiator Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+
+
+/* SAS Initiator Device Table Overflow Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
+{
+ U16 MaxInit; /* 0x00 */
+ U16 CurrentInit; /* 0x02 */
+ U64 SASAddress; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ Mpi2EventDataSasInitTableOverflow_t,
+ MPI2_POINTER pMpi2EventDataSasInitTableOverflow_t;
+
+
+/* SAS Topology Change List Event data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumEntries at runtime.
+ */
+#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY
+{
+ U16 AttachedDevHandle; /* 0x00 */
+ U8 LinkRate; /* 0x02 */
+ U8 PhyStatus; /* 0x03 */
+} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, MPI2_POINTER PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
+ Mpi2EventSasTopoPhyEntry_t, MPI2_POINTER pMpi2EventSasTopoPhyEntry_t;
+
+typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
+{
+ U16 EnclosureHandle; /* 0x00 */
+ U16 ExpanderDevHandle; /* 0x02 */
+ U8 NumPhys; /* 0x04 */
+ U8 Reserved1; /* 0x05 */
+ U16 Reserved2; /* 0x06 */
+ U8 NumEntries; /* 0x08 */
+ U8 StartPhyNum; /* 0x09 */
+ U8 ExpStatus; /* 0x0A */
+ U8 PhysicalPort; /* 0x0B */
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /* 0x0C*/
+} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ Mpi2EventDataSasTopologyChangeList_t,
+ MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
+
+/* values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+
+/* defines for the LinkRate field */
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+
+#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
+#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+
+/* values for the PhyStatus field */
+#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
+#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
+/* values for the PhyStatus ReasonCode sub-field */
+#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
+#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
+#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
+
+
+/* SAS Enclosure Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE
+{
+ U16 EnclosureHandle; /* 0x00 */
+ U8 ReasonCode; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U64 EnclosureLogicalID; /* 0x04 */
+ U16 NumSlots; /* 0x0C */
+ U16 StartSlot; /* 0x0E */
+ U32 PhyBits; /* 0x10 */
+} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasEnclDevStatusChange_t,
+ MPI2_POINTER pMpi2EventDataSasEnclDevStatusChange_t;
+
+/* SAS Enclosure Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+
+
+/* SAS PHY Counter Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER
+{
+ U64 TimeStamp; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U8 PhyEventCode; /* 0x0C */
+ U8 PhyNum; /* 0x0D */
+ U16 Reserved2; /* 0x0E */
+ U32 PhyEventInfo; /* 0x10 */
+ U8 CounterType; /* 0x14 */
+ U8 ThresholdWindow; /* 0x15 */
+ U8 TimeUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
+ U32 EventThreshold; /* 0x18 */
+ U16 ThresholdFlags; /* 0x1C */
+ U16 Reserved4; /* 0x1E */
+} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ Mpi2EventDataSasPhyCounter_t, MPI2_POINTER pMpi2EventDataSasPhyCounter_t;
+
+/* use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h for the PhyEventCode field */
+
+/* use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType field */
+
+/* use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits field */
+
+/* use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags field */
+
+
+/* SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE
+{
+ U8 ReasonCode; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ MPI2_POINTER PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, MPI2_POINTER pMpi2EventDataSasQuiesce_t;
+
+/* SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+
+/* Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS
+{
+ U8 Flags; /* 0x00 */
+ U8 NegotiatedLinkRate; /* 0x01 */
+ U8 PhyNum; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U32 Reserved1; /* 0x04 */
+ U8 InitialFrame[28]; /* 0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
+
+/* values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR
+{
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY
+{
+ U8 DescriptorType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/* values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+
+
+/****************************************************************************
+* EventAck message
+****************************************************************************/
+
+/* EventAck Request message */
+typedef struct _MPI2_EVENT_ACK_REQUEST
+{
+ U16 Reserved1; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Event; /* 0x0C */
+ U16 Reserved5; /* 0x0E */
+ U32 EventContext; /* 0x10 */
+} MPI2_EVENT_ACK_REQUEST, MPI2_POINTER PTR_MPI2_EVENT_ACK_REQUEST,
+ Mpi2EventAckRequest_t, MPI2_POINTER pMpi2EventAckRequest_t;
+
+
+/* EventAck Reply message */
+typedef struct _MPI2_EVENT_ACK_REPLY
+{
+ U16 Reserved1; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_EVENT_ACK_REPLY, MPI2_POINTER PTR_MPI2_EVENT_ACK_REPLY,
+ Mpi2EventAckReply_t, MPI2_POINTER pMpi2EventAckReply_t;
+
+
+/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/* SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST
+{
+ U16 HostDataLength; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U8 Reserved4; /* 0x0C */
+ U8 DestVF_ID; /* 0x0D */
+ U16 Reserved5; /* 0x0E */
+ U32 Reserved6; /* 0x10 */
+ U32 Reserved7; /* 0x14 */
+ U32 Reserved8; /* 0x18 */
+ U32 Reserved9; /* 0x1C */
+ U32 Reserved10; /* 0x20 */
+ U32 HostData[1]; /* 0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+ MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+ Mpi2SendHostMessageRequest_t, MPI2_POINTER pMpi2SendHostMessageRequest_t;
+
+
+/* SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY
+{
+ U16 HostDataLength; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, MPI2_POINTER PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+ Mpi2SendHostMessageReply_t, MPI2_POINTER pMpi2SendHostMessageReply_t;
+
+
+/****************************************************************************
+* FWDownload message
+****************************************************************************/
+
+/* MPI v2.0 FWDownload Request message */
+typedef struct _MPI2_FW_DOWNLOAD_REQUEST
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 TotalImageSize; /* 0x0C */
+ U32 Reserved5; /* 0x10 */
+ MPI2_MPI_SGE_UNION SGL; /* 0x14 */
+} MPI2_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REQUEST,
+ Mpi2FWDownloadRequest, MPI2_POINTER pMpi2FWDownloadRequest;
+
+#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) /* MPI v2.5 and newer */
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+
+/* MPI v2.0 FWDownload TransactionContext Element */
+typedef struct _MPI2_FW_DOWNLOAD_TCSGE
+{
+ U8 Reserved1; /* 0x00 */
+ U8 ContextSize; /* 0x01 */
+ U8 DetailsLength; /* 0x02 */
+ U8 Flags; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 ImageOffset; /* 0x08 */
+ U32 ImageSize; /* 0x0C */
+} MPI2_FW_DOWNLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_TCSGE,
+ Mpi2FWDownloadTCSGE_t, MPI2_POINTER pMpi2FWDownloadTCSGE_t;
+
+
+/* MPI v2.5 FWDownload Request message */
+typedef struct _MPI25_FW_DOWNLOAD_REQUEST
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 TotalImageSize; /* 0x0C */
+ U32 Reserved5; /* 0x10 */
+ U32 Reserved6; /* 0x14 */
+ U32 ImageOffset; /* 0x18 */
+ U32 ImageSize; /* 0x1C */
+ MPI25_SGE_IO_UNION SGL; /* 0x20 */
+} MPI25_FW_DOWNLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_DOWNLOAD_REQUEST,
+ Mpi25FWDownloadRequest, MPI2_POINTER pMpi25FWDownloadRequest;
+
+
+/* FWDownload Reply message */
+typedef struct _MPI2_FW_DOWNLOAD_REPLY
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_FW_DOWNLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_DOWNLOAD_REPLY,
+ Mpi2FWDownloadReply_t, MPI2_POINTER pMpi2FWDownloadReply_t;
+
+
+/****************************************************************************
+* FWUpload message
+****************************************************************************/
+
+/* MPI v2.0 FWUpload Request message */
+typedef struct _MPI2_FW_UPLOAD_REQUEST
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ MPI2_MPI_SGE_UNION SGL; /* 0x14 */
+} MPI2_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REQUEST,
+ Mpi2FWUploadRequest_t, MPI2_POINTER pMpi2FWUploadRequest_t;
+
+#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
+#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+/* MPI v2.0 FWUpload TransactionContext Element */
+typedef struct _MPI2_FW_UPLOAD_TCSGE
+{
+ U8 Reserved1; /* 0x00 */
+ U8 ContextSize; /* 0x01 */
+ U8 DetailsLength; /* 0x02 */
+ U8 Flags; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 ImageOffset; /* 0x08 */
+ U32 ImageSize; /* 0x0C */
+} MPI2_FW_UPLOAD_TCSGE, MPI2_POINTER PTR_MPI2_FW_UPLOAD_TCSGE,
+ Mpi2FWUploadTCSGE_t, MPI2_POINTER pMpi2FWUploadTCSGE_t;
+
+
+/* MPI v2.5 FWUpload Request message */
+typedef struct _MPI25_FW_UPLOAD_REQUEST
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ U32 Reserved7; /* 0x14 */
+ U32 ImageOffset; /* 0x18 */
+ U32 ImageSize; /* 0x1C */
+ MPI25_SGE_IO_UNION SGL; /* 0x20 */
+} MPI25_FW_UPLOAD_REQUEST, MPI2_POINTER PTR_MPI25_FW_UPLOAD_REQUEST,
+ Mpi25FWUploadRequest_t, MPI2_POINTER pMpi25FWUploadRequest_t;
+
+
+/* FWUpload Reply message */
+typedef struct _MPI2_FW_UPLOAD_REPLY
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ActualImageSize; /* 0x14 */
+} MPI2_FW_UPLOAD_REPLY, MPI2_POINTER PTR_MPI2_FW_UPLOAD_REPLY,
+ Mpi2FWUploadReply_t, MPI2_POINTER pMPi2FWUploadReply_t;
+
+
+/* FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER
+{
+ U32 Signature; /* 0x00 */
+ U32 Signature0; /* 0x04 */
+ U32 Signature1; /* 0x08 */
+ U32 Signature2; /* 0x0C */
+ MPI2_VERSION_UNION MPIVersion; /* 0x10 */
+ MPI2_VERSION_UNION FWVersion; /* 0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /* 0x18 */
+ MPI2_VERSION_UNION PackageVersion; /* 0x1C */
+ U16 VendorID; /* 0x20 */
+ U16 ProductID; /* 0x22 */
+ U16 ProtocolFlags; /* 0x24 */
+ U16 Reserved26; /* 0x26 */
+ U32 IOCCapabilities; /* 0x28 */
+ U32 ImageSize; /* 0x2C */
+ U32 NextImageHeaderOffset; /* 0x30 */
+ U32 Checksum; /* 0x34 */
+ U32 Reserved38; /* 0x38 */
+ U32 Reserved3C; /* 0x3C */
+ U32 Reserved40; /* 0x40 */
+ U32 Reserved44; /* 0x44 */
+ U32 Reserved48; /* 0x48 */
+ U32 Reserved4C; /* 0x4C */
+ U32 Reserved50; /* 0x50 */
+ U32 Reserved54; /* 0x54 */
+ U32 Reserved58; /* 0x58 */
+ U32 Reserved5C; /* 0x5C */
+ U32 Reserved60; /* 0x60 */
+ U32 FirmwareVersionNameWhat; /* 0x64 */
+ U8 FirmwareVersionName[32]; /* 0x68 */
+ U32 VendorNameWhat; /* 0x88 */
+ U8 VendorName[32]; /* 0x8C */
+ U32 PackageNameWhat; /* 0x88 */
+ U8 PackageName[32]; /* 0x8C */
+ U32 ReservedD0; /* 0xD0 */
+ U32 ReservedD4; /* 0xD4 */
+ U32 ReservedD8; /* 0xD8 */
+ U32 ReservedDC; /* 0xDC */
+ U32 ReservedE0; /* 0xE0 */
+ U32 ReservedE4; /* 0xE4 */
+ U32 ReservedE8; /* 0xE8 */
+ U32 ReservedEC; /* 0xEC */
+ U32 ReservedF0; /* 0xF0 */
+ U32 ReservedF4; /* 0xF4 */
+ U32 ReservedF8; /* 0xF8 */
+ U32 ReservedFC; /* 0xFC */
+} MPI2_FW_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, MPI2_POINTER pMpi2FWImageHeader_t;
+
+/* Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+
+/* Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+
+/* Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+
+/* Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+
+
+/* defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/* SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+
+/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/* use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+
+/* Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER
+
+{
+ U8 ImageType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Checksum; /* 0x04 */
+ U32 ImageSize; /* 0x08 */
+ U32 NextImageHeaderOffset; /* 0x0C */
+ U32 PackageVersion; /* 0x10 */
+ U32 Reserved3; /* 0x14 */
+ U32 Reserved4; /* 0x18 */
+ U32 Reserved5; /* 0x1C */
+ U8 IdentifyString[32]; /* 0x20 */
+} MPI2_EXT_IMAGE_HEADER, MPI2_POINTER PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, MPI2_POINTER pMpi2ExtImageHeader_t;
+
+/* useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/* defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) /* MPI v2.5 and newer */
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */
+
+
+
+/* FLASH Layout Extended Image Data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION
+{
+ U8 RegionType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 RegionOffset; /* 0x04 */
+ U32 RegionSize; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+} MPI2_FLASH_REGION, MPI2_POINTER PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, MPI2_POINTER pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT
+{
+ U32 FlashSize; /* 0x00 */
+ U32 Reserved1; /* 0x04 */
+ U32 Reserved2; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS];/* 0x10 */
+} MPI2_FLASH_LAYOUT, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, MPI2_POINTER pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA
+{
+ U8 ImageRevision; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 SizeOfRegion; /* 0x02 */
+ U8 Reserved2; /* 0x03 */
+ U16 NumberOfLayouts; /* 0x04 */
+ U16 RegionsPerLayout; /* 0x06 */
+ U16 MinimumSectorAlignment; /* 0x08 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS];/* 0x10 */
+} MPI2_FLASH_LAYOUT_DATA, MPI2_POINTER PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, MPI2_POINTER pMpi2FlashLayoutData_t;
+
+/* defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_INIT (0x0A)
+
+/* ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+
+
+/* Supported Devices Extended Image Data */
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE
+{
+ U16 DeviceID; /* 0x00 */
+ U16 VendorID; /* 0x02 */
+ U16 DeviceIDMask; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+ U8 LowPCIRev; /* 0x08 */
+ U8 HighPCIRev; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+} MPI2_SUPPORTED_DEVICE, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, MPI2_POINTER pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA
+{
+ U8 ImageRevision; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 NumberOfDevices; /* 0x02 */
+ U8 Reserved2; /* 0x03 */
+ U32 Reserved3; /* 0x04 */
+ MPI2_SUPPORTED_DEVICE SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES]; /* 0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, MPI2_POINTER PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, MPI2_POINTER pMpi2SupportedDevicesData_t;
+
+/* ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+
+/* Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER
+
+{
+ U32 BootFlags; /* 0x00 */
+ U32 ImageSize; /* 0x04 */
+ U32 Signature0; /* 0x08 */
+ U32 Signature1; /* 0x0C */
+ U32 Signature2; /* 0x10 */
+ U32 ResetVector; /* 0x14 */
+} MPI2_INIT_IMAGE_FOOTER, MPI2_POINTER PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, MPI2_POINTER pMpi2InitImageFooter_t;
+
+/* defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/* defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/* defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/* defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/* defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/* Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/* defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY
+{
+ U8 HashImageType; /* 0x00 */
+ U8 HashAlgorithm; /* 0x01 */
+ U8 EncryptionAlgorithm; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+ U32 Reserved2; /* 0x04 */
+ U32 EncryptedHash[1]; /* 0x08 */ /* variable length */
+} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+ Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
+#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA
+{
+ U8 ImageVersion; /* 0x00 */
+ U8 NumHash; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 Reserved2; /* 0x04 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ /* variable number of entries */
+} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA,
+ Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t;
+
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/* PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST
+{
+ U8 Feature; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Parameter1; /* 0x0C */
+ U8 Parameter2; /* 0x0D */
+ U8 Parameter3; /* 0x0E */
+ U8 Parameter4; /* 0x0F */
+ U32 Reserved5; /* 0x10 */
+ U32 Reserved6; /* 0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, MPI2_POINTER pMpi2PwrMgmtControlRequest_t;
+
+/* defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) /* reserved in MPI 2.0 */
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/* Parameter1 contains a PHY number */
+/* Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/* Parameter3 and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION Feature */
+/* Parameter1 contains SAS port width modulation group number */
+/* Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/* Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/* Parameter4 is reserved */
+
+/* this next set (_PCIE_LINK) is obsolete */
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/* Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */
+/* Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */
+/* Parameter3 and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/* Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/* Parameter2, Parameter3, and Parameter4 are reserved */
+
+/* parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature */
+/* Parameter1 indicates host action regarding global power management mode */
+#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01)
+#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02)
+#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03)
+/* Parameter2 indicates the requested global power management mode */
+#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01)
+#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08)
+#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40)
+/* Parameter3 and Parameter4 are reserved */
+
+
+/* PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY
+{
+ U8 Feature; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, MPI2_POINTER PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, MPI2_POINTER pMpi2PwrMgmtControlReply_t;
+
+
+#endif
+
diff --git a/sys/dev/mpr/mpi/mpi2_ra.h b/sys/dev/mpr/mpi/mpi2_ra.h
new file mode 100644
index 0000000000000..76db06b71b98f
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_ra.h
@@ -0,0 +1,118 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2009 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ra.h
+ * Title: MPI RAID Accelerator messages and structures
+ * Creation Date: April 13, 2009
+ *
+ * mpi2_ra.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-06-09 02.00.00 Initial version.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RA_H
+#define MPI2_RA_H
+
+/* generic structure for RAID Accelerator Control Block */
+typedef struct _MPI2_RAID_ACCELERATOR_CONTROL_BLOCK
+{
+ U32 Reserved[8]; /* 0x00 */
+ U32 RaidAcceleratorCDB[1]; /* 0x20 */
+} MPI2_RAID_ACCELERATOR_CONTROL_BLOCK,
+ MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_CONTROL_BLOCK,
+ Mpi2RAIDAcceleratorControlBlock_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorControlBlock_t;
+
+
+/******************************************************************************
+*
+* RAID Accelerator Messages
+*
+*******************************************************************************/
+
+/* RAID Accelerator Request Message */
+typedef struct _MPI2_RAID_ACCELERATOR_REQUEST
+{
+ U16 Reserved0; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U64 RaidAcceleratorControlBlockAddress; /* 0x0C */
+ U8 DmaEngineNumber; /* 0x14 */
+ U8 Reserved4; /* 0x15 */
+ U16 Reserved5; /* 0x16 */
+ U32 Reserved6; /* 0x18 */
+ U32 Reserved7; /* 0x1C */
+ U32 Reserved8; /* 0x20 */
+} MPI2_RAID_ACCELERATOR_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REQUEST,
+ Mpi2RAIDAcceleratorRequest_t, MPI2_POINTER pMpi2RAIDAcceleratorRequest_t;
+
+
+/* RAID Accelerator Error Reply Message */
+typedef struct _MPI2_RAID_ACCELERATOR_REPLY
+{
+ U16 Reserved0; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ProductSpecificData[3]; /* 0x14 */
+} MPI2_RAID_ACCELERATOR_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_REPLY,
+ Mpi2RAIDAcceleratorReply_t, MPI2_POINTER pMpi2RAIDAcceleratorReply_t;
+
+
+#endif
+
+
diff --git a/sys/dev/mpr/mpi/mpi2_raid.h b/sys/dev/mpr/mpi/mpi2_raid.h
new file mode 100644
index 0000000000000..c6bb59864a3c9
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_raid.h
@@ -0,0 +1,406 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2_raid.h
+ * Title: MPI Integrated RAID messages and structures
+ * Creation Date: April 26, 2007
+ *
+ * mpi2_raid.h Version: 02.00.10
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 11-18-11 02.00.07 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
+ * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RAID_H
+#define MPI2_RAID_H
+
+/*****************************************************************************
+*
+* Integrated RAID Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* RAID Action messages
+****************************************************************************/
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
+#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
+
+/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
+#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
+
+/* ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
+typedef struct _MPI2_RAID_ACTION_RATE_DATA
+{
+ U8 RateToChange; /* 0x00 */
+ U8 RateOrMode; /* 0x01 */
+ U16 DataScrubDuration; /* 0x02 */
+} MPI2_RAID_ACTION_RATE_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_RATE_DATA,
+ Mpi2RaidActionRateData_t, MPI2_POINTER pMpi2RaidActionRateData_t;
+
+#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
+#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
+#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
+
+/* ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION
+{
+ U8 RAIDFunction; /* 0x00 */
+ U8 Flags; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ MPI2_POINTER PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ Mpi2RaidActionStartRaidFunction_t,
+ MPI2_POINTER pMpi2RaidActionStartRaidFunction_t;
+
+/* defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
+
+/* defines for the Flags field */
+#define MPI2_RAID_ACTION_START_NEW (0x00)
+#define MPI2_RAID_ACTION_START_RESUME (0x01)
+
+/* ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION
+{
+ U8 RAIDFunction; /* 0x00 */
+ U8 Flags; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ MPI2_POINTER PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ Mpi2RaidActionStopRaidFunction_t,
+ MPI2_POINTER pMpi2RaidActionStopRaidFunction_t;
+
+/* defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
+
+/* defines for the Flags field */
+#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
+#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
+
+/* ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
+typedef struct _MPI2_RAID_ACTION_HOT_SPARE
+{
+ U8 HotSparePool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 DevHandle; /* 0x02 */
+} MPI2_RAID_ACTION_HOT_SPARE, MPI2_POINTER PTR_MPI2_RAID_ACTION_HOT_SPARE,
+ Mpi2RaidActionHotSpare_t, MPI2_POINTER pMpi2RaidActionHotSpare_t;
+
+/* ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
+typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE
+{
+ U8 Flags; /* 0x00 */
+ U8 DeviceFirmwareUpdateModeTimeout; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+} MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ MPI2_POINTER PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ Mpi2RaidActionFwUpdateMode_t, MPI2_POINTER pMpi2RaidActionFwUpdateMode_t;
+
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
+#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
+
+typedef union _MPI2_RAID_ACTION_DATA
+{
+ U32 Word;
+ MPI2_RAID_ACTION_RATE_DATA Rates;
+ MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
+ MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
+ MPI2_RAID_ACTION_HOT_SPARE HotSpare;
+ MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
+} MPI2_RAID_ACTION_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_DATA,
+ Mpi2RaidActionData_t, MPI2_POINTER pMpi2RaidActionData_t;
+
+
+/* RAID Action Request Message */
+typedef struct _MPI2_RAID_ACTION_REQUEST
+{
+ U8 Action; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 VolDevHandle; /* 0x04 */
+ U8 PhysDiskNum; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ MPI2_RAID_ACTION_DATA ActionDataWord; /* 0x10 */
+ MPI2_SGE_SIMPLE_UNION ActionDataSGE; /* 0x14 */
+} MPI2_RAID_ACTION_REQUEST, MPI2_POINTER PTR_MPI2_RAID_ACTION_REQUEST,
+ Mpi2RaidActionRequest_t, MPI2_POINTER pMpi2RaidActionRequest_t;
+
+/* RAID Action request Action values */
+
+#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
+#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
+#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
+#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
+#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
+#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
+#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
+#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
+#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
+#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
+#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+
+/* RAID Volume Creation Structure */
+
+/*
+ * The following define can be customized for the targeted product.
+ */
+#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
+#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
+#endif
+
+typedef struct _MPI2_RAID_VOLUME_PHYSDISK
+{
+ U8 RAIDSetNum; /* 0x00 */
+ U8 PhysDiskMap; /* 0x01 */
+ U16 PhysDiskDevHandle; /* 0x02 */
+} MPI2_RAID_VOLUME_PHYSDISK, MPI2_POINTER PTR_MPI2_RAID_VOLUME_PHYSDISK,
+ Mpi2RaidVolumePhysDisk_t, MPI2_POINTER pMpi2RaidVolumePhysDisk_t;
+
+/* defines for the PhysDiskMap field */
+#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT
+{
+ U8 NumPhysDisks; /* 0x00 */
+ U8 VolumeType; /* 0x01 */
+ U16 Reserved1; /* 0x02 */
+ U32 VolumeCreationFlags; /* 0x04 */
+ U32 VolumeSettings; /* 0x08 */
+ U8 Reserved2; /* 0x0C */
+ U8 ResyncRate; /* 0x0D */
+ U16 DataScrubDuration; /* 0x0E */
+ U64 VolumeMaxLBA; /* 0x10 */
+ U32 StripeSize; /* 0x18 */
+ U8 Name[16]; /* 0x1C */
+ MPI2_RAID_VOLUME_PHYSDISK PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS];/* 0x2C */
+} MPI2_RAID_VOLUME_CREATION_STRUCT,
+ MPI2_POINTER PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
+ Mpi2RaidVolumeCreationStruct_t, MPI2_POINTER pMpi2RaidVolumeCreationStruct_t;
+
+/* use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
+
+/* defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) /* MPI 2.0 only */
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
+/* The following is an obsolete define.
+ * It must be shifted left 24 bits in order to set the proper bit.
+ */
+#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
+
+
+/* RAID Online Capacity Expansion Structure */
+
+typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
+{
+ U32 Flags; /* 0x00 */
+ U16 DevHandle0; /* 0x04 */
+ U16 Reserved1; /* 0x06 */
+ U16 DevHandle1; /* 0x08 */
+ U16 Reserved2; /* 0x0A */
+} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ MPI2_POINTER PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ Mpi2RaidOnlineCapacityExpansion_t,
+ MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
+
+
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT
+{
+ U16 SourceDevHandle; /* 0x00 */
+ U16 CandidateDevHandle; /* 0x02 */
+ U32 Flags; /* 0x04 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ Mpi2RaidCompatibilityInputStruct_t,
+ MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
+
+/* RAID Volume Indicator Structure */
+
+typedef struct _MPI2_RAID_VOL_INDICATOR
+{
+ U64 TotalBlocks; /* 0x00 */
+ U64 BlocksRemaining; /* 0x08 */
+ U32 Flags; /* 0x10 */
+ U32 ElapsedSeconds; /* 0x14 */
+} MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR,
+ Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t;
+
+/* defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000)
+
+#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
+#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
+#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
+#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
+#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+
+
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT
+{
+ U8 State; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 GenericAttributes; /* 0x04 */
+ U32 OEMSpecificAttributes; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ Mpi2RaidCompatibilityResultStruct_t,
+ MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
+
+
+/* RAID Action Reply ActionData union */
+typedef union _MPI2_RAID_ACTION_REPLY_DATA
+{
+ U32 Word[6];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
+} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
+ Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
+
+/* use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+
+/* RAID Action Reply Message */
+typedef struct _MPI2_RAID_ACTION_REPLY
+{
+ U8 Action; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 VolDevHandle; /* 0x04 */
+ U8 PhysDiskNum; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U16 Reserved3; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ MPI2_RAID_ACTION_REPLY_DATA ActionData; /* 0x14 */
+} MPI2_RAID_ACTION_REPLY, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY,
+ Mpi2RaidActionReply_t, MPI2_POINTER pMpi2RaidActionReply_t;
+
+
+#endif
+
diff --git a/sys/dev/mpr/mpi/mpi2_sas.h b/sys/dev/mpr/mpi/mpi2_sas.h
new file mode 100644
index 0000000000000..2a107f1e16170
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_sas.h
@@ -0,0 +1,346 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: February 9, 2007
+ *
+ * mpi2_sas.h Version: 02.00.08
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
+ * Passthrough Request message.
+ * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
+ * for anything newer than MPI v2.0.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_SAS_H
+#define MPI2_SAS_H
+
+/*
+ * Values for SASStatus.
+ */
+#define MPI2_SASSTATUS_SUCCESS (0x00)
+#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+
+/*
+ * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ * data and SAS Configuration pages.
+ */
+#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+
+/*****************************************************************************
+*
+* SAS Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SMP Passthrough messages
+****************************************************************************/
+
+/* SMP Passthrough Request Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST
+{
+ U8 PassthroughFlags; /* 0x00 */
+ U8 PhysicalPort; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 RequestDataLength; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U64 SASAddress; /* 0x10 */
+ U32 Reserved3; /* 0x18 */
+ U32 Reserved4; /* 0x1C */
+ MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */ /* MPI v2.5: IEEE Simple 64 elements only */
+} MPI2_SMP_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
+ Mpi2SmpPassthroughRequest_t, MPI2_POINTER pMpi2SmpPassthroughRequest_t;
+
+/* values for PassthroughFlags field */
+#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* SMP Passthrough Reply Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REPLY
+{
+ U8 PassthroughFlags; /* 0x00 */
+ U8 PhysicalPort; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 ResponseDataLength; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U8 Reserved2; /* 0x0C */
+ U8 SASStatus; /* 0x0D */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 Reserved3; /* 0x14 */
+ U8 ResponseData[4]; /* 0x18 */
+} MPI2_SMP_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SMP_PASSTHROUGH_REPLY,
+ Mpi2SmpPassthroughReply_t, MPI2_POINTER pMpi2SmpPassthroughReply_t;
+
+/* values for PassthroughFlags field */
+#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+/* values for SASStatus field are at the top of this file */
+
+
+/****************************************************************************
+* SATA Passthrough messages
+****************************************************************************/
+
+typedef union _MPI2_SATA_PT_SGE_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple; /* MPI v2.0 only */
+ MPI2_SGE_CHAIN_UNION MpiChain; /* MPI v2.0 only */
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /* MPI v2.0 only */
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /* MPI v2.5 only */
+} MPI2_SATA_PT_SGE_UNION, MPI2_POINTER PTR_MPI2_SATA_PT_SGE_UNION,
+ Mpi2SataPTSGEUnion_t, MPI2_POINTER pMpi2SataPTSGEUnion_t;
+
+
+/* SATA Passthrough Request Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
+{
+ U16 DevHandle; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 PassthroughFlags; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */ /* MPI v2.0 only. Reserved on MPI v2.5. */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U32 Reserved2; /* 0x0C */
+ U32 Reserved3; /* 0x10 */
+ U32 Reserved4; /* 0x14 */
+ U32 DataLength; /* 0x18 */
+ U8 CommandFIS[20]; /* 0x1C */
+ MPI2_SATA_PT_SGE_UNION SGL; /* 0x30 */ /* MPI v2.5: IEEE 64 elements only */
+} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
+ Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
+
+/* values for PassthroughFlags field */
+#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/* MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* SATA Passthrough Reply Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REPLY
+{
+ U16 DevHandle; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 PassthroughFlags; /* 0x04 */
+ U8 SGLFlags; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved1; /* 0x0A */
+ U8 Reserved2; /* 0x0C */
+ U8 SASStatus; /* 0x0D */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 StatusFIS[20]; /* 0x14 */
+ U32 StatusControlRegisters; /* 0x28 */
+ U32 TransferCount; /* 0x2C */
+} MPI2_SATA_PASSTHROUGH_REPLY, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REPLY,
+ Mpi2SataPassthroughReply_t, MPI2_POINTER pMpi2SataPassthroughReply_t;
+
+/* values for SASStatus field are at the top of this file */
+
+
+/****************************************************************************
+* SAS IO Unit Control messages
+****************************************************************************/
+
+/* SAS IO Unit Control Request Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
+{
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U8 PhyNum; /* 0x0E */
+ U8 PrimFlags; /* 0x0F */
+ U32 Primitive; /* 0x10 */
+ U8 LookupMethod; /* 0x14 */
+ U8 Reserved5; /* 0x15 */
+ U16 SlotNumber; /* 0x16 */
+ U64 LookupAddress; /* 0x18 */
+ U32 IOCParameterValue; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+ U32 Reserved8; /* 0x28 */
+} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ Mpi2SasIoUnitControlRequest_t, MPI2_POINTER pMpi2SasIoUnitControlRequest_t;
+
+/* values for the Operation field */
+#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */
+#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
+#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12)
+#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/* values for the PrimFlags field */
+#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+/* values for the LookupMethod field */
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+
+/* SAS IO Unit Control Reply Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY
+{
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ MPI2_POINTER PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ Mpi2SasIoUnitControlReply_t, MPI2_POINTER pMpi2SasIoUnitControlReply_t;
+
+
+#endif
+
+
diff --git a/sys/dev/mpr/mpi/mpi2_targ.h b/sys/dev/mpr/mpi/mpi2_targ.h
new file mode 100644
index 0000000000000..506e64f6370c6
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_targ.h
@@ -0,0 +1,600 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_targ.h
+ * Title: MPI Target mode messages and structures
+ * Creation Date: September 8, 2006
+ *
+ * mpi2_targ.h Version: 02.00.06
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
+ * BufferPostFlags field of CommandBufferPostBase Request.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
+ * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
+ * Target Status Send Request only takes a single SGE for
+ * response data.
+ * 02-10-10 02.00.04 Added comment to MPI2_TARGET_SSP_RSP_IU structure.
+ * 11-18-11 02.00.05 Incorporating additions for MPI v2.5.
+ * 11-27-12 02.00.06 Added InitiatorDevHandle field to MPI2_TARGET_MODE_ABORT
+ * request message structure.
+ * Added AbortType MPI2_TARGET_MODE_ABORT_DEVHANDLE and
+ * MPI2_TARGET_MODE_ABORT_ALL_COMMANDS.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TARG_H
+#define MPI2_TARG_H
+
+
+/******************************************************************************
+*
+* SCSI Target Messages
+*
+*******************************************************************************/
+
+/****************************************************************************
+* Target Command Buffer Post Base Request
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST
+{
+ U8 BufferPostFlags; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 TotalCmdBuffers; /* 0x04 */
+ U8 Reserved; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ U16 CmdBufferLength; /* 0x10 */
+ U16 Reserved4; /* 0x12 */
+ U32 BaseAddressLow; /* 0x14 */
+ U32 BaseAddressHigh; /* 0x18 */
+} MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST,
+ MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST,
+ Mpi2TargetCmdBufferPostBaseRequest_t,
+ MPI2_POINTER pMpi2TargetCmdBufferPostBaseRequest_t;
+
+/* values for the BufferPostflags field */
+#define MPI2_CMD_BUF_POST_BASE_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_CMD_BUF_POST_BASE_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_CMD_BUF_POST_BASE_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_CMD_BUF_POST_BASE_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_CMD_BUF_POST_BASE_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+
+#define MPI2_CMD_BUF_POST_BASE_FLAGS_AUTO_POST_ALL (0x01)
+
+
+/****************************************************************************
+* Target Command Buffer Post List Request
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST
+{
+ U16 Reserved; /* 0x00 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 CmdBufferCount; /* 0x04 */
+ U8 Reserved1; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved2; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ U16 IoIndex[2]; /* 0x10 */
+} MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST,
+ MPI2_POINTER PTR_MPI2_TARGET_CMD_BUF_POST_LIST_REQUEST,
+ Mpi2TargetCmdBufferPostListRequest_t,
+ MPI2_POINTER pMpi2TargetCmdBufferPostListRequest_t;
+
+/****************************************************************************
+* Target Command Buffer Post Base List Reply
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_BUF_POST_BASE_LIST_REPLY
+{
+ U8 Flags; /* 0x00 */
+ U8 Reserved; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U16 IoIndex; /* 0x14 */
+ U16 Reserved5; /* 0x16 */
+ U32 Reserved6; /* 0x18 */
+} MPI2_TARGET_BUF_POST_BASE_LIST_REPLY,
+ MPI2_POINTER PTR_MPI2_TARGET_BUF_POST_BASE_LIST_REPLY,
+ Mpi2TargetCmdBufferPostBaseListReply_t,
+ MPI2_POINTER pMpi2TargetCmdBufferPostBaseListReply_t;
+
+/* Flags defines */
+#define MPI2_CMD_BUF_POST_REPLY_IOINDEX_VALID (0x01)
+
+
+/****************************************************************************
+* Command Buffer Formats (with 16 byte CDB)
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_SSP_CMD_BUFFER
+{
+ U8 FrameType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 InitiatorConnectionTag; /* 0x02 */
+ U32 HashedSourceSASAddress; /* 0x04 */
+ U16 Reserved2; /* 0x08 */
+ U16 Flags; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ U16 Tag; /* 0x10 */
+ U16 TargetPortTransferTag; /* 0x12 */
+ U32 DataOffset; /* 0x14 */
+ /* COMMAND information unit starts here */
+ U8 LogicalUnitNumber[8]; /* 0x18 */
+ U8 Reserved4; /* 0x20 */
+ U8 TaskAttribute; /* lower 3 bits */ /* 0x21 */
+ U8 Reserved5; /* 0x22 */
+ U8 AdditionalCDBLength; /* upper 5 bits */ /* 0x23 */
+ U8 CDB[16]; /* 0x24 */
+ /* Additional CDB bytes extend past the CDB field */
+} MPI2_TARGET_SSP_CMD_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_CMD_BUFFER,
+ Mpi2TargetSspCmdBuffer, MPI2_POINTER pMp2iTargetSspCmdBuffer;
+
+typedef struct _MPI2_TARGET_SSP_TASK_BUFFER
+{
+ U8 FrameType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 InitiatorConnectionTag; /* 0x02 */
+ U32 HashedSourceSASAddress; /* 0x04 */
+ U16 Reserved2; /* 0x08 */
+ U16 Flags; /* 0x0A */
+ U32 Reserved3; /* 0x0C */
+ U16 Tag; /* 0x10 */
+ U16 TargetPortTransferTag; /* 0x12 */
+ U32 DataOffset; /* 0x14 */
+ /* TASK information unit starts here */
+ U8 LogicalUnitNumber[8]; /* 0x18 */
+ U16 Reserved4; /* 0x20 */
+ U8 TaskManagementFunction; /* 0x22 */
+ U8 Reserved5; /* 0x23 */
+ U16 ManagedTaskTag; /* 0x24 */
+ U16 Reserved6; /* 0x26 */
+ U32 Reserved7; /* 0x28 */
+ U32 Reserved8; /* 0x2C */
+ U32 Reserved9; /* 0x30 */
+} MPI2_TARGET_SSP_TASK_BUFFER, MPI2_POINTER PTR_MPI2_TARGET_SSP_TASK_BUFFER,
+ Mpi2TargetSspTaskBuffer, MPI2_POINTER pMpi2TargetSspTaskBuffer;
+
+/* mask and shift for HashedSourceSASAddress field */
+#define MPI2_TARGET_HASHED_SAS_ADDRESS_MASK (0xFFFFFF00)
+#define MPI2_TARGET_HASHED_SAS_ADDRESS_SHIFT (8)
+
+
+/****************************************************************************
+* MPI v2.0 Target Assist Request
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_ASSIST_REQUEST
+{
+ U8 Reserved1; /* 0x00 */
+ U8 TargetAssistFlags; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 QueueTag; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 IoIndex; /* 0x0C */
+ U16 InitiatorConnectionTag; /* 0x0E */
+ U16 SGLFlags; /* 0x10 */
+ U8 SequenceNumber; /* 0x12 */
+ U8 Reserved4; /* 0x13 */
+ U8 SGLOffset0; /* 0x14 */
+ U8 SGLOffset1; /* 0x15 */
+ U8 SGLOffset2; /* 0x16 */
+ U8 SGLOffset3; /* 0x17 */
+ U32 SkipCount; /* 0x18 */
+ U32 DataLength; /* 0x1C */
+ U32 BidirectionalDataLength; /* 0x20 */
+ U16 IoFlags; /* 0x24 */
+ U16 EEDPFlags; /* 0x26 */
+ U32 EEDPBlockSize; /* 0x28 */
+ U32 SecondaryReferenceTag; /* 0x2C */
+ U16 SecondaryApplicationTag; /* 0x30 */
+ U16 ApplicationTagTranslationMask; /* 0x32 */
+ U32 PrimaryReferenceTag; /* 0x34 */
+ U16 PrimaryApplicationTag; /* 0x38 */
+ U16 PrimaryApplicationTagMask; /* 0x3A */
+ U32 RelativeOffset; /* 0x3C */
+ U32 Reserved5; /* 0x40 */
+ U32 Reserved6; /* 0x44 */
+ U32 Reserved7; /* 0x48 */
+ U32 Reserved8; /* 0x4C */
+ MPI2_SGE_IO_UNION SGL[1]; /* 0x50 */
+} MPI2_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI2_TARGET_ASSIST_REQUEST,
+ Mpi2TargetAssistRequest_t, MPI2_POINTER pMpi2TargetAssistRequest_t;
+
+/* Target Assist TargetAssistFlags bits */
+
+#define MPI2_TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80)
+#define MPI2_TARGET_ASSIST_FLAGS_TLR (0x10)
+#define MPI2_TARGET_ASSIST_FLAGS_RETRANSMIT (0x04)
+#define MPI2_TARGET_ASSIST_FLAGS_AUTO_STATUS (0x02)
+#define MPI2_TARGET_ASSIST_FLAGS_DATA_DIRECTION (0x01)
+
+/* Target Assist SGLFlags bits */
+
+/* base values for Data Location Address Space */
+#define MPI2_TARGET_ASSIST_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_PLBNTA_ADDR (0x0C)
+
+/* base values for Type */
+#define MPI2_TARGET_ASSIST_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_MPI_TYPE (0x00)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_32IEEE_TYPE (0x01)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_64IEEE_TYPE (0x02)
+
+/* shift values for each sub-field */
+#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_TARGET_ASSIST_SGLFLAGS_SGL0_SHIFT (0)
+
+/* Target Assist IoFlags bits */
+
+#define MPI2_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_TARGET_ASSIST_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200)
+
+/* Target Assist EEDPFlags bits */
+
+#define MPI2_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_TA_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_TA_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_TA_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_TA_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_TA_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_TA_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_TA_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_TA_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_TA_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+
+/****************************************************************************
+* MPI v2.5 Target Assist Request
+****************************************************************************/
+
+typedef struct _MPI25_TARGET_ASSIST_REQUEST
+{
+ U8 Reserved1; /* 0x00 */
+ U8 TargetAssistFlags; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 QueueTag; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 IoIndex; /* 0x0C */
+ U16 InitiatorConnectionTag; /* 0x0E */
+ U8 DMAFlags; /* 0x10 */
+ U8 Reserved9; /* 0x11 */
+ U8 SequenceNumber; /* 0x12 */
+ U8 Reserved4; /* 0x13 */
+ U8 SGLOffset0; /* 0x14 */
+ U8 SGLOffset1; /* 0x15 */
+ U8 SGLOffset2; /* 0x16 */
+ U8 SGLOffset3; /* 0x17 */
+ U32 SkipCount; /* 0x18 */
+ U32 DataLength; /* 0x1C */
+ U32 BidirectionalDataLength; /* 0x20 */
+ U16 IoFlags; /* 0x24 */
+ U16 EEDPFlags; /* 0x26 */
+ U16 EEDPBlockSize; /* 0x28 */
+ U16 Reserved10; /* 0x2A */
+ U32 SecondaryReferenceTag; /* 0x2C */
+ U16 SecondaryApplicationTag; /* 0x30 */
+ U16 ApplicationTagTranslationMask; /* 0x32 */
+ U32 PrimaryReferenceTag; /* 0x34 */
+ U16 PrimaryApplicationTag; /* 0x38 */
+ U16 PrimaryApplicationTagMask; /* 0x3A */
+ U32 RelativeOffset; /* 0x3C */
+ U32 Reserved5; /* 0x40 */
+ U32 Reserved6; /* 0x44 */
+ U32 Reserved7; /* 0x48 */
+ U32 Reserved8; /* 0x4C */
+ MPI25_SGE_IO_UNION SGL; /* 0x50 */
+} MPI25_TARGET_ASSIST_REQUEST, MPI2_POINTER PTR_MPI25_TARGET_ASSIST_REQUEST,
+ Mpi25TargetAssistRequest_t, MPI2_POINTER pMpi25TargetAssistRequest_t;
+
+/* use MPI2_TARGET_ASSIST_FLAGS_ defines for the Flags field */
+
+/* Defines for the DMAFlags field
+ * Each setting affects 4 SGLS, from SGL0 to SGL3.
+ * D = Data
+ * C = Cache DIF
+ * I = Interleaved
+ * H = Host DIF
+ */
+#define MPI25_TA_DMAFLAGS_OP_MASK (0x0F)
+#define MPI25_TA_DMAFLAGS_OP_D_D_D_D (0x00)
+#define MPI25_TA_DMAFLAGS_OP_D_D_D_C (0x01)
+#define MPI25_TA_DMAFLAGS_OP_D_D_D_I (0x02)
+#define MPI25_TA_DMAFLAGS_OP_D_D_C_C (0x03)
+#define MPI25_TA_DMAFLAGS_OP_D_D_C_I (0x04)
+#define MPI25_TA_DMAFLAGS_OP_D_D_I_I (0x05)
+#define MPI25_TA_DMAFLAGS_OP_D_C_C_C (0x06)
+#define MPI25_TA_DMAFLAGS_OP_D_C_C_I (0x07)
+#define MPI25_TA_DMAFLAGS_OP_D_C_I_I (0x08)
+#define MPI25_TA_DMAFLAGS_OP_D_I_I_I (0x09)
+#define MPI25_TA_DMAFLAGS_OP_D_H_D_D (0x0A)
+#define MPI25_TA_DMAFLAGS_OP_D_H_D_C (0x0B)
+#define MPI25_TA_DMAFLAGS_OP_D_H_D_I (0x0C)
+#define MPI25_TA_DMAFLAGS_OP_D_H_C_C (0x0D)
+#define MPI25_TA_DMAFLAGS_OP_D_H_C_I (0x0E)
+#define MPI25_TA_DMAFLAGS_OP_D_H_I_I (0x0F)
+
+/* defines for the IoFlags field */
+#define MPI25_TARGET_ASSIST_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI25_TARGET_ASSIST_IOFLAGS_RECEIVE_FIRST (0x0200)
+
+/* defines for the EEDPFlags field */
+#define MPI25_TA_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI25_TA_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI25_TA_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI25_TA_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI25_TA_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI25_TA_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI25_TA_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI25_TA_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0)
+#define MPI25_TA_EEDPFLAGS_COMPATIBLE_MODE (0x0000)
+#define MPI25_TA_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
+#define MPI25_TA_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080)
+#define MPI25_TA_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0)
+
+#define MPI25_TA_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030)
+#define MPI25_TA_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000)
+#define MPI25_TA_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010)
+
+#define MPI25_TA_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI25_TA_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI25_TA_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI25_TA_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI25_TA_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI25_TA_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI25_TA_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI25_TA_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI25_TA_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+
+/****************************************************************************
+* Target Status Send Request
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_STATUS_SEND_REQUEST
+{
+ U8 Reserved1; /* 0x00 */
+ U8 StatusFlags; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 QueueTag; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 IoIndex; /* 0x0C */
+ U16 InitiatorConnectionTag; /* 0x0E */
+ U16 SGLFlags; /* 0x10 */ /* MPI v2.0 only. Reserved on MPI v2.5. */
+ U16 Reserved4; /* 0x12 */
+ U8 SGLOffset0; /* 0x14 */
+ U8 Reserved5; /* 0x15 */
+ U16 Reserved6; /* 0x16 */
+ U32 Reserved7; /* 0x18 */
+ U32 Reserved8; /* 0x1C */
+ MPI2_SIMPLE_SGE_UNION StatusDataSGE; /* 0x20 */ /* MPI v2.5: This must be an IEEE Simple Element 64. */
+} MPI2_TARGET_STATUS_SEND_REQUEST,
+ MPI2_POINTER PTR_MPI2_TARGET_STATUS_SEND_REQUEST,
+ Mpi2TargetStatusSendRequest_t, MPI2_POINTER pMpi2TargetStatusSendRequest_t;
+
+/* Target Status Send StatusFlags bits */
+
+#define MPI2_TSS_FLAGS_REPOST_CMD_BUFFER (0x80)
+#define MPI2_TSS_FLAGS_RETRANSMIT (0x04)
+#define MPI2_TSS_FLAGS_AUTO_GOOD_STATUS (0x01)
+
+/* Target Status Send SGLFlags bits - MPI v2.0 only */
+/* Data Location Address Space */
+#define MPI2_TSS_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_TSS_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_TSS_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_TSS_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_TSS_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+/* Type */
+#define MPI2_TSS_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_TSS_SGLFLAGS_MPI_TYPE (0x00)
+#define MPI2_TSS_SGLFLAGS_IEEE32_TYPE (0x01)
+#define MPI2_TSS_SGLFLAGS_IEEE64_TYPE (0x02)
+
+
+
+/*
+ * NOTE: The SSP status IU is big-endian. When used on a little-endian system,
+ * this structure properly orders the bytes.
+ */
+typedef struct _MPI2_TARGET_SSP_RSP_IU
+{
+ U32 Reserved0[6]; /* reserved for SSP header */ /* 0x00 */
+
+ /* start of RESPONSE information unit */
+ U32 Reserved1; /* 0x18 */
+ U32 Reserved2; /* 0x1C */
+ U16 Reserved3; /* 0x20 */
+ U8 DataPres; /* lower 2 bits */ /* 0x22 */
+ U8 Status; /* 0x23 */
+ U32 Reserved4; /* 0x24 */
+ U32 SenseDataLength; /* 0x28 */
+ U32 ResponseDataLength; /* 0x2C */
+
+ /* start of Response or Sense Data (size may vary dynamically) */
+ U8 ResponseSenseData[4]; /* 0x30 */
+} MPI2_TARGET_SSP_RSP_IU, MPI2_POINTER PTR_MPI2_TARGET_SSP_RSP_IU,
+ Mpi2TargetSspRspIu_t, MPI2_POINTER pMpi2TargetSspRspIu_t;
+
+
+/****************************************************************************
+* Target Standard Reply - used with Target Assist or Target Status Send
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_STANDARD_REPLY
+{
+ U16 Reserved; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U16 IoIndex; /* 0x14 */
+ U16 Reserved5; /* 0x16 */
+ U32 TransferCount; /* 0x18 */
+ U32 BidirectionalTransferCount; /* 0x1C */
+} MPI2_TARGET_STANDARD_REPLY, MPI2_POINTER PTR_MPI2_TARGET_STANDARD_REPLY,
+ Mpi2TargetErrorReply_t, MPI2_POINTER pMpi2TargetErrorReply_t;
+
+
+/****************************************************************************
+* Target Mode Abort Request
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_MODE_ABORT_REQUEST
+{
+ U8 AbortType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 IoIndexToAbort; /* 0x0C */
+ U16 InitiatorDevHandle; /* 0x0E */
+ U32 MidToAbort; /* 0x10 */
+} MPI2_TARGET_MODE_ABORT, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT,
+ Mpi2TargetModeAbort_t, MPI2_POINTER pMpi2TargetModeAbort_t;
+
+/* Target Mode Abort AbortType values */
+
+#define MPI2_TARGET_MODE_ABORT_ALL_CMD_BUFFERS (0x00)
+#define MPI2_TARGET_MODE_ABORT_ALL_IO (0x01)
+#define MPI2_TARGET_MODE_ABORT_EXACT_IO (0x02)
+#define MPI2_TARGET_MODE_ABORT_EXACT_IO_REQUEST (0x03)
+#define MPI2_TARGET_MODE_ABORT_IO_REQUEST_AND_IO (0x04)
+#define MPI2_TARGET_MODE_ABORT_DEVHANDLE (0x05)
+#define MPI2_TARGET_MODE_ABORT_ALL_COMMANDS (0x06)
+
+
+/****************************************************************************
+* Target Mode Abort Reply
+****************************************************************************/
+
+typedef struct _MPI2_TARGET_MODE_ABORT_REPLY
+{
+ U16 Reserved; /* 0x00 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved1; /* 0x04 */
+ U8 Reserved2; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 AbortCount; /* 0x14 */
+} MPI2_TARGET_MODE_ABORT_REPLY, MPI2_POINTER PTR_MPI2_TARGET_MODE_ABORT_REPLY,
+ Mpi2TargetModeAbortReply_t, MPI2_POINTER pMpi2TargetModeAbortReply_t;
+
+
+#endif
+
diff --git a/sys/dev/mpr/mpi/mpi2_tool.h b/sys/dev/mpr/mpi/mpi2_tool.h
new file mode 100644
index 0000000000000..94542cc7959b8
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_tool.h
@@ -0,0 +1,546 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2013 LSI Corporation.
+ *
+ *
+ * Name: mpi2_tool.h
+ * Title: MPI diagnostic tool structures and definitions
+ * Creation Date: March 26, 2007
+ *
+ * mpi2_tool.h Version: 02.00.11
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request
+ * message.
+ * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
+ * it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TOOL_H
+#define MPI2_TOOL_H
+
+/*****************************************************************************
+*
+* Toolbox Messages
+*
+*****************************************************************************/
+
+/* defines for the Tools */
+#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
+#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
+
+
+/****************************************************************************
+* Toolbox reply
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_REPLY
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_TOOLBOX_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_REPLY,
+ Mpi2ToolboxReply_t, MPI2_POINTER pMpi2ToolboxReply_t;
+
+
+/****************************************************************************
+* Toolbox Clean Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Flags; /* 0x0C */
+ } MPI2_TOOLBOX_CLEAN_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
+ Mpi2ToolboxCleanRequest_t, MPI2_POINTER pMpi2ToolboxCleanRequest_t;
+
+/* values for the Flags field */
+#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
+#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+
+
+/****************************************************************************
+* Toolbox Memory Move request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x0C */
+} MPI2_TOOLBOX_MEM_MOVE_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
+ Mpi2ToolboxMemMoveRequest_t, MPI2_POINTER pMpi2ToolboxMemMoveRequest_t;
+
+
+/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 SGLFlags; /* 0x0C */
+ U8 Reserved5; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U32 Flags; /* 0x10 */
+ U32 DataLength; /* 0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ Mpi2ToolboxDiagDataUploadRequest_t,
+ MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t;
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER
+{
+ U32 DiagDataLength; /* 00h */
+ U8 FormatCode; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+ Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t;
+
+
+/****************************************************************************
+* Toolbox ISTWI Read Write Tool
+****************************************************************************/
+
+/* Toolbox ISTWI Read Write Tool request message */
+typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 Reserved6; /* 0x10 */
+ U8 DevIndex; /* 0x14 */
+ U8 Action; /* 0x15 */
+ U8 SGLFlags; /* 0x16 */
+ U8 Flags; /* 0x17 */
+ U16 TxDataLength; /* 0x18 */
+ U16 RxDataLength; /* 0x1A */
+ U32 Reserved8; /* 0x1C */
+ U32 Reserved9; /* 0x20 */
+ U32 Reserved10; /* 0x24 */
+ U32 Reserved11; /* 0x28 */
+ U32 Reserved12; /* 0x2C */
+ MPI2_SGE_SIMPLE_UNION SGL; /* 0x30 */
+} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ Mpi2ToolboxIstwiReadWriteRequest_t,
+ MPI2_POINTER pMpi2ToolboxIstwiReadWriteRequest_t;
+
+/* values for the Action field */
+#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
+#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
+#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
+#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
+#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
+#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+
+
+/* Toolbox ISTWI Read Write Tool reply message */
+typedef struct _MPI2_TOOLBOX_ISTWI_REPLY
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U8 DevIndex; /* 0x14 */
+ U8 Action; /* 0x15 */
+ U8 IstwiStatus; /* 0x16 */
+ U8 Reserved6; /* 0x17 */
+ U16 TxDataCount; /* 0x18 */
+ U16 RxDataCount; /* 0x1A */
+} MPI2_TOOLBOX_ISTWI_REPLY, MPI2_POINTER PTR_MPI2_TOOLBOX_ISTWI_REPLY,
+ Mpi2ToolboxIstwiReply_t, MPI2_POINTER pMpi2ToolboxIstwiReply_t;
+
+
+/****************************************************************************
+* Toolbox Beacon Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_BEACON_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Reserved5; /* 0x0C */
+ U8 PhysicalPort; /* 0x0D */
+ U8 Reserved6; /* 0x0E */
+ U8 Flags; /* 0x0F */
+} MPI2_TOOLBOX_BEACON_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_BEACON_REQUEST,
+ Mpi2ToolboxBeaconRequest_t, MPI2_POINTER pMpi2ToolboxBeaconRequest_t;
+
+/* values for the Flags field */
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
+
+
+/****************************************************************************
+* Toolbox Diagnostic CLI Tool
+****************************************************************************/
+
+#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
+
+/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 SGLFlags; /* 0x0C */
+ U8 Reserved5; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U32 DataLength; /* 0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */
+ MPI2_MPI_SGE_IO_UNION SGL; /* 0x70 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi2ToolboxDiagnosticCliRequest_t,
+ MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t;
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/* MPI v2.5 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U32 Reserved5; /* 0x0C */
+ U32 DataLength; /* 0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */
+ MPI25_SGE_IO_UNION SGL; /* 0x70 */
+} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ MPI2_POINTER PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi25ToolboxDiagnosticCliRequest_t,
+ MPI2_POINTER pMpi25ToolboxDiagnosticCliRequest_t;
+
+
+/* Toolbox Diagnostic CLI Tool reply message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ReturnedDataLength; /* 0x14 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
+ Mpi2ToolboxDiagnosticCliReply_t,
+ MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t;
+
+
+/****************************************************************************
+* Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST
+{
+ U8 Tool; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U8 Console; /* 0x0C */
+ U8 Flags; /* 0x0D */
+ U16 Reserved6; /* 0x0E */
+ U8 TextToDisplay[4]; /* 0x10 */ /* actual length determined at runtime based on frame size */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+ MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+ Mpi2ToolboxTextDisplayRequest_t,
+ MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
+
+
+
+/*****************************************************************************
+*
+* Diagnostic Buffer Messages
+*
+*****************************************************************************/
+
+
+/****************************************************************************
+* Diagnostic Buffer Post request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
+{
+ U8 ExtendedType; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U64 BufferAddress; /* 0x0C */
+ U32 BufferLength; /* 0x14 */
+ U32 Reserved5; /* 0x18 */
+ U32 Reserved6; /* 0x1C */
+ U32 Flags; /* 0x20 */
+ U32 ProductSpecific[23]; /* 0x24 */
+} MPI2_DIAG_BUFFER_POST_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
+ Mpi2DiagBufferPostRequest_t, MPI2_POINTER pMpi2DiagBufferPostRequest_t;
+
+/* values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
+
+/* values for the BufferType field */
+#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
+/* count of the number of buffer types */
+#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) /* for MPI v2.0 products only */
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REPLY
+{
+ U8 ExtendedType; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 TransferLength; /* 0x14 */
+} MPI2_DIAG_BUFFER_POST_REPLY, MPI2_POINTER PTR_MPI2_DIAG_BUFFER_POST_REPLY,
+ Mpi2DiagBufferPostReply_t, MPI2_POINTER pMpi2DiagBufferPostReply_t;
+
+
+/****************************************************************************
+* Diagnostic Release request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REQUEST
+{
+ U8 Reserved1; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+} MPI2_DIAG_RELEASE_REQUEST, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REQUEST,
+ Mpi2DiagReleaseRequest_t, MPI2_POINTER pMpi2DiagReleaseRequest_t;
+
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REPLY
+{
+ U8 Reserved1; /* 0x00 */
+ U8 BufferType; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 Reserved2; /* 0x04 */
+ U8 Reserved3; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved4; /* 0x0A */
+ U16 Reserved5; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI2_DIAG_RELEASE_REPLY, MPI2_POINTER PTR_MPI2_DIAG_RELEASE_REPLY,
+ Mpi2DiagReleaseReply_t, MPI2_POINTER pMpi2DiagReleaseReply_t;
+
+
+#endif
+
diff --git a/sys/dev/mpr/mpi/mpi2_type.h b/sys/dev/mpr/mpi/mpi2_type.h
new file mode 100644
index 0000000000000..da3aefb968f44
--- /dev/null
+++ b/sys/dev/mpr/mpi/mpi2_type.h
@@ -0,0 +1,131 @@
+/*-
+ * Copyright (c) 2013 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2000-2007 LSI Corporation.
+ *
+ *
+ * Name: mpi2_type.h
+ * Title: MPI basic type definitions
+ * Creation Date: August 16, 2006
+ *
+ * mpi2_type.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TYPE_H
+#define MPI2_TYPE_H
+
+
+/*******************************************************************************
+ * Define MPI2_POINTER if it hasn't already been defined. By default
+ * MPI2_POINTER is defined to be a near pointer. MPI2_POINTER can be defined as
+ * a far pointer by defining MPI2_POINTER as "far *" before this header file is
+ * included.
+ */
+#ifndef MPI2_POINTER
+#define MPI2_POINTER *
+#endif
+
+/* the basic types may have already been included by mpi_type.h */
+#ifndef MPI_TYPE_H
+/*****************************************************************************
+*
+* Basic Types
+*
+*****************************************************************************/
+
+typedef signed char S8;
+typedef unsigned char U8;
+typedef signed short S16;
+typedef unsigned short U16;
+
+#ifdef __FreeBSD__
+
+typedef int32_t S32;
+typedef uint32_t U32;
+
+#else
+
+#if defined(unix) || defined(__arm) || defined(ALPHA) || defined(__PPC__) || defined(__ppc)
+
+ typedef signed int S32;
+ typedef unsigned int U32;
+
+#else
+
+ typedef signed long S32;
+ typedef unsigned long U32;
+
+#endif
+#endif
+
+typedef struct _S64
+{
+ U32 Low;
+ S32 High;
+} S64;
+
+typedef struct _U64
+{
+ U32 Low;
+ U32 High;
+} U64;
+
+
+/*****************************************************************************
+*
+* Pointer Types
+*
+*****************************************************************************/
+
+typedef S8 *PS8;
+typedef U8 *PU8;
+typedef S16 *PS16;
+typedef U16 *PU16;
+typedef S32 *PS32;
+typedef U32 *PU32;
+typedef S64 *PS64;
+typedef U64 *PU64;
+
+#endif
+
+#endif
+
diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c
new file mode 100644
index 0000000000000..3656eec368646
--- /dev/null
+++ b/sys/dev/mpr/mpr.c
@@ -0,0 +1,2795 @@
+/*-
+ * Copyright (c) 2009 Yahoo! Inc.
+ * Copyright (c) 2012-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Communications core for LSI MPT2 */
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/endian.h>
+#include <sys/eventhandler.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+#include <sys/proc.h>
+
+#include <dev/pci/pcivar.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_sas.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+#include <dev/mpr/mpr_table.h>
+#include <dev/mpr/mpr_sas.h>
+
+static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag);
+static int mpr_init_queues(struct mpr_softc *sc);
+static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag);
+static int mpr_transition_operational(struct mpr_softc *sc);
+static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching);
+static void mpr_iocfacts_free(struct mpr_softc *sc);
+static void mpr_startup(void *arg);
+static int mpr_send_iocinit(struct mpr_softc *sc);
+static int mpr_alloc_queues(struct mpr_softc *sc);
+static int mpr_alloc_replies(struct mpr_softc *sc);
+static int mpr_alloc_requests(struct mpr_softc *sc);
+static int mpr_attach_log(struct mpr_softc *sc);
+static __inline void mpr_complete_command(struct mpr_softc *sc,
+ struct mpr_command *cm);
+static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *reply);
+static void mpr_config_complete(struct mpr_softc *sc,
+ struct mpr_command *cm);
+static void mpr_periodic(void *);
+static int mpr_reregister_events(struct mpr_softc *sc);
+static void mpr_enqueue_request(struct mpr_softc *sc,
+ struct mpr_command *cm);
+static int mpr_get_iocfacts(struct mpr_softc *sc,
+ MPI2_IOC_FACTS_REPLY *facts);
+static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag);
+SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters");
+
+MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory");
+
+/*
+ * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
+ * any state and back to its initialization state machine.
+ */
+static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
+
+/*
+ * Added this union to smoothly convert le64toh cm->cm_desc.Words.
+ * Compiler only supports unint64_t to be passed as an argument.
+ * Otherwise it will through this error:
+ * "aggregate value used where an integer was expected"
+ */
+typedef union _reply_descriptor {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+}reply_descriptor,address_descriptor;
+
+/* Rate limit chain-fail messages to 1 per minute */
+static struct timeval mpr_chainfail_interval = { 60, 0 };
+
+/*
+ * sleep_flag can be either CAN_SLEEP or NO_SLEEP.
+ * If this function is called from process context, it can sleep
+ * and there is no harm to sleep, in case if this fuction is called
+ * from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
+ * based on sleep flags driver will call either msleep, pause or DELAY.
+ * msleep and pause are of same variant, but pause is used when mpr_mtx
+ * is not hold by driver.
+ */
+static int
+mpr_diag_reset(struct mpr_softc *sc,int sleep_flag)
+{
+ uint32_t reg;
+ int i, error, tries = 0;
+ uint8_t first_wait_done = FALSE;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ /* Clear any pending interrupts */
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+
+ /*
+ * Force NO_SLEEP for threads prohibited to sleep
+ * e.a Thread from interrupt handler are prohibited to sleep.
+ */
+#if __FreeBSD_version >= 1000029
+ if (curthread->td_no_sleeping)
+#else //__FreeBSD_version < 1000029
+ if (curthread->td_pflags & TDP_NOSLEEPING)
+#endif //__FreeBSD_version >= 1000029
+ sleep_flag = NO_SLEEP;
+
+ /* Push the magic sequence */
+ error = ETIMEDOUT;
+ while (tries++ < 20) {
+ for (i = 0; i < sizeof(mpt2_reset_magic); i++)
+ mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
+ mpt2_reset_magic[i]);
+
+ /* wait 100 msec */
+ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
+ msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
+ "mprdiag", hz/10);
+ else if (sleep_flag == CAN_SLEEP)
+ pause("mprdiag", hz/10);
+ else
+ DELAY(100 * 1000);
+
+ reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
+ if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
+ error = 0;
+ break;
+ }
+ }
+ if (error)
+ return (error);
+
+ /* Send the actual reset. XXX need to refresh the reg? */
+ mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET,
+ reg | MPI2_DIAG_RESET_ADAPTER);
+
+ /* Wait up to 300 seconds in 50ms intervals */
+ error = ETIMEDOUT;
+ for (i = 0; i < 6000; i++) {
+ /*
+ * Wait 50 msec. If this is the first time through, wait 256
+ * msec to satisfy Diag Reset timing requirements.
+ */
+ if (first_wait_done) {
+ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
+ msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
+ "mprdiag", hz/20);
+ else if (sleep_flag == CAN_SLEEP)
+ pause("mprdiag", hz/20);
+ else
+ DELAY(50 * 1000);
+ } else {
+ DELAY(256 * 1000);
+ first_wait_done = TRUE;
+ }
+ /*
+ * Check for the RESET_ADAPTER bit to be cleared first, then
+ * wait for the RESET state to be cleared, which takes a little
+ * longer.
+ */
+ reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
+ if (reg & MPI2_DIAG_RESET_ADAPTER) {
+ continue;
+ }
+ reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
+ if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
+ error = 0;
+ break;
+ }
+ }
+ if (error)
+ return (error);
+
+ mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
+
+ return (0);
+}
+
+static int
+mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag)
+{
+
+ MPR_FUNCTRACE(sc);
+
+ mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
+ MPI2_DOORBELL_FUNCTION_SHIFT);
+
+ if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed : <%s>\n",
+ __func__);
+ return (ETIMEDOUT);
+ }
+
+ return (0);
+}
+
+static int
+mpr_transition_ready(struct mpr_softc *sc)
+{
+ uint32_t reg, state;
+ int error, tries = 0;
+ int sleep_flags;
+
+ MPR_FUNCTRACE(sc);
+ /* If we are in attach call, do not sleep */
+ sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE)
+ ? CAN_SLEEP : NO_SLEEP;
+
+ error = 0;
+ while (tries++ < 1200) {
+ reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
+ mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg);
+
+ /*
+ * Ensure the IOC is ready to talk. If it's not, try
+ * resetting it.
+ */
+ if (reg & MPI2_DOORBELL_USED) {
+ mpr_diag_reset(sc, sleep_flags);
+ DELAY(50000);
+ continue;
+ }
+
+ /* Is the adapter owned by another peer? */
+ if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
+ (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
+ device_printf(sc->mpr_dev, "IOC is under the control "
+ "of another peer host, aborting initialization.\n");
+ return (ENXIO);
+ }
+
+ state = reg & MPI2_IOC_STATE_MASK;
+ if (state == MPI2_IOC_STATE_READY) {
+ /* Ready to go! */
+ error = 0;
+ break;
+ } else if (state == MPI2_IOC_STATE_FAULT) {
+ mpr_dprint(sc, MPR_FAULT, "IOC in fault state 0x%x\n",
+ state & MPI2_DOORBELL_FAULT_CODE_MASK);
+ mpr_diag_reset(sc, sleep_flags);
+ } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
+ /* Need to take ownership */
+ mpr_message_unit_reset(sc, sleep_flags);
+ } else if (state == MPI2_IOC_STATE_RESET) {
+ /* Wait a bit, IOC might be in transition */
+ mpr_dprint(sc, MPR_FAULT,
+ "IOC in unexpected reset state\n");
+ } else {
+ mpr_dprint(sc, MPR_FAULT,
+ "IOC in unknown state 0x%x\n", state);
+ error = EINVAL;
+ break;
+ }
+
+ /* Wait 50ms for things to settle down. */
+ DELAY(50000);
+ }
+
+ if (error)
+ device_printf(sc->mpr_dev, "Cannot transition IOC to ready\n");
+
+ return (error);
+}
+
+static int
+mpr_transition_operational(struct mpr_softc *sc)
+{
+ uint32_t reg, state;
+ int error;
+
+ MPR_FUNCTRACE(sc);
+
+ error = 0;
+ reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
+ mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg);
+
+ state = reg & MPI2_IOC_STATE_MASK;
+ if (state != MPI2_IOC_STATE_READY) {
+ if ((error = mpr_transition_ready(sc)) != 0) {
+ mpr_dprint(sc, MPR_FAULT,
+ "%s failed to transition ready\n", __func__);
+ return (error);
+ }
+ }
+
+ error = mpr_send_iocinit(sc);
+ return (error);
+}
+
+/*
+ * This is called during attach and when re-initializing due to a Diag Reset.
+ * IOC Facts is used to allocate many of the structures needed by the driver.
+ * If called from attach, de-allocation is not required because the driver has
+ * not allocated any structures yet, but if called from a Diag Reset, previously
+ * allocated structures based on IOC Facts will need to be freed and re-
+ * allocated bases on the latest IOC Facts.
+ */
+static int
+mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching)
+{
+ int error, i;
+ Mpi2IOCFactsReply_t saved_facts;
+ uint8_t saved_mode, reallocating;
+ struct mprsas_lun *lun, *lun_tmp;
+ struct mprsas_target *targ;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ /* Save old IOC Facts and then only reallocate if Facts have changed */
+ if (!attaching) {
+ bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
+ }
+
+ /*
+ * Get IOC Facts. In all cases throughout this function, panic if doing
+ * a re-initialization and only return the error if attaching so the OS
+ * can handle it.
+ */
+ if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) {
+ if (attaching) {
+ mpr_dprint(sc, MPR_FAULT, "%s failed to get IOC Facts "
+ "with error %d\n", __func__, error);
+ return (error);
+ } else {
+ panic("%s failed to get IOC Facts with error %d\n",
+ __func__, error);
+ }
+ }
+
+ mpr_print_iocfacts(sc, sc->facts);
+
+ snprintf(sc->fw_version, sizeof(sc->fw_version),
+ "%02d.%02d.%02d.%02d",
+ sc->facts->FWVersion.Struct.Major,
+ sc->facts->FWVersion.Struct.Minor,
+ sc->facts->FWVersion.Struct.Unit,
+ sc->facts->FWVersion.Struct.Dev);
+
+ mpr_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version,
+ MPR_DRIVER_VERSION);
+ mpr_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities,
+ "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
+ "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
+ "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc");
+
+ /*
+ * If the chip doesn't support event replay then a hard reset will be
+ * required to trigger a full discovery. Do the reset here then
+ * retransition to Ready. A hard reset might have already been done,
+ * but it doesn't hurt to do it again. Only do this if attaching, not
+ * for a Diag Reset.
+ */
+ if (attaching) {
+ if ((sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) {
+ mpr_diag_reset(sc, NO_SLEEP);
+ if ((error = mpr_transition_ready(sc)) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "%s failed to "
+ "transition to ready with error %d\n",
+ __func__, error);
+ return (error);
+ }
+ }
+ }
+
+ /*
+ * Set flag if IR Firmware is loaded. If the RAID Capability has
+ * changed from the previous IOC Facts, log a warning, but only if
+ * checking this after a Diag Reset and not during attach.
+ */
+ saved_mode = sc->ir_firmware;
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
+ sc->ir_firmware = 1;
+ if (!attaching) {
+ if (sc->ir_firmware != saved_mode) {
+ mpr_dprint(sc, MPR_FAULT, "%s new IR/IT mode in IOC "
+ "Facts does not match previous mode\n", __func__);
+ }
+ }
+
+ /* Only deallocate and reallocate if relevant IOC Facts have changed */
+ reallocating = FALSE;
+ if ((!attaching) &&
+ ((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
+ (saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
+ (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
+ (saved_facts.RequestCredit != sc->facts->RequestCredit) ||
+ (saved_facts.ProductID != sc->facts->ProductID) ||
+ (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
+ (saved_facts.IOCRequestFrameSize !=
+ sc->facts->IOCRequestFrameSize) ||
+ (saved_facts.MaxTargets != sc->facts->MaxTargets) ||
+ (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
+ (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
+ (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
+ (saved_facts.MaxReplyDescriptorPostQueueDepth !=
+ sc->facts->MaxReplyDescriptorPostQueueDepth) ||
+ (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
+ (saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
+ (saved_facts.MaxPersistentEntries !=
+ sc->facts->MaxPersistentEntries))) {
+ reallocating = TRUE;
+ }
+
+ /*
+ * Some things should be done if attaching or re-allocating after a Diag
+ * Reset, but are not needed after a Diag Reset if the FW has not
+ * changed.
+ */
+ if (attaching || reallocating) {
+ /*
+ * Check if controller supports FW diag buffers and set flag to
+ * enable each type.
+ */
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
+ enabled = TRUE;
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
+ enabled = TRUE;
+ if (sc->facts->IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
+ enabled = TRUE;
+
+ /*
+ * Set flag if EEDP is supported and if TLR is supported.
+ */
+ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
+ sc->eedp_enabled = TRUE;
+ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
+ sc->control_TLR = TRUE;
+
+ /*
+ * Size the queues. Since the reply queues always need one free
+ * entry, we'll just deduct one reply message here.
+ */
+ sc->num_reqs = MIN(MPR_REQ_FRAMES, sc->facts->RequestCredit);
+ sc->num_replies = MIN(MPR_REPLY_FRAMES + MPR_EVT_REPLY_FRAMES,
+ sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
+
+ /*
+ * Initialize all Tail Queues
+ */
+ TAILQ_INIT(&sc->req_list);
+ TAILQ_INIT(&sc->high_priority_req_list);
+ TAILQ_INIT(&sc->chain_list);
+ TAILQ_INIT(&sc->tm_list);
+ }
+
+ /*
+ * If doing a Diag Reset and the FW is significantly different
+ * (reallocating will be set above in IOC Facts comparison), then all
+ * buffers based on the IOC Facts will need to be freed before they are
+ * reallocated.
+ */
+ if (reallocating) {
+ mpr_iocfacts_free(sc);
+
+ /*
+ * The number of targets is based on IOC Facts, so free all of
+ * the allocated LUNs for each target and then the target buffer
+ * itself.
+ */
+ for (i=0; i< saved_facts.MaxTargets; i++) {
+ targ = &sc->sassc->targets[i];
+ SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link,
+ lun_tmp) {
+ free(lun, M_MPR);
+ }
+ }
+ free(sc->sassc->targets, M_MPR);
+
+ sc->sassc->targets = malloc(sizeof(struct mprsas_target) *
+ sc->facts->MaxTargets, M_MPR, M_WAITOK|M_ZERO);
+ if (!sc->sassc->targets) {
+ panic("%s failed to alloc targets with error %d\n",
+ __func__, ENOMEM);
+ }
+ }
+
+ /*
+ * Any deallocation has been completed. Now start reallocating
+ * if needed. Will only need to reallocate if attaching or if the new
+ * IOC Facts are different from the previous IOC Facts after a Diag
+ * Reset. Targets have already been allocated above if needed.
+ */
+ if (attaching || reallocating) {
+ if (((error = mpr_alloc_queues(sc)) != 0) ||
+ ((error = mpr_alloc_replies(sc)) != 0) ||
+ ((error = mpr_alloc_requests(sc)) != 0)) {
+ if (attaching ) {
+ mpr_dprint(sc, MPR_FAULT, "%s failed to alloc "
+ "queues with error %d\n", __func__, error);
+ mpr_free(sc);
+ return (error);
+ } else {
+ panic("%s failed to alloc queues with error "
+ "%d\n", __func__, error);
+ }
+ }
+ }
+
+ /* Always initialize the queues */
+ bzero(sc->free_queue, sc->fqdepth * 4);
+ mpr_init_queues(sc);
+
+ /*
+ * Always get the chip out of the reset state, but only panic if not
+ * attaching. If attaching and there is an error, that is handled by
+ * the OS.
+ */
+ error = mpr_transition_operational(sc);
+ if (error != 0) {
+ if (attaching) {
+ mpr_printf(sc, "%s failed to transition to "
+ "operational with error %d\n", __func__, error);
+ mpr_free(sc);
+ return (error);
+ } else {
+ panic("%s failed to transition to operational with "
+ "error %d\n", __func__, error);
+ }
+ }
+
+ /*
+ * Finish the queue initialization.
+ * These are set here instead of in mpr_init_queues() because the
+ * IOC resets these values during the state transition in
+ * mpr_transition_operational(). The free index is set to 1
+ * because the corresponding index in the IOC is set to 0, and the
+ * IOC treats the queues as full if both are set to the same value.
+ * Hence the reason that the queue can't hold all of the possible
+ * replies.
+ */
+ sc->replypostindex = 0;
+ mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
+ mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
+
+ /*
+ * Attach the subsystems so they can prepare their event masks.
+ */
+ /* XXX Should be dynamic so that IM/IR and user modules can attach */
+ if (attaching) {
+ if (((error = mpr_attach_log(sc)) != 0) ||
+ ((error = mpr_attach_sas(sc)) != 0) ||
+ ((error = mpr_attach_user(sc)) != 0)) {
+ mpr_printf(sc, "%s failed to attach all subsystems: "
+ "error %d\n", __func__, error);
+ mpr_free(sc);
+ return (error);
+ }
+
+ if ((error = mpr_pci_setup_interrupts(sc)) != 0) {
+ mpr_printf(sc, "%s failed to setup interrupts\n",
+ __func__);
+ mpr_free(sc);
+ return (error);
+ }
+ }
+
+ return (error);
+}
+
+/*
+ * This is called if memory is being free (during detach for example) and when
+ * buffers need to be reallocated due to a Diag Reset.
+ */
+static void
+mpr_iocfacts_free(struct mpr_softc *sc)
+{
+ struct mpr_command *cm;
+ int i;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if (sc->free_busaddr != 0)
+ bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
+ if (sc->free_queue != NULL)
+ bus_dmamem_free(sc->queues_dmat, sc->free_queue,
+ sc->queues_map);
+ if (sc->queues_dmat != NULL)
+ bus_dma_tag_destroy(sc->queues_dmat);
+
+ if (sc->chain_busaddr != 0)
+ bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
+ if (sc->chain_frames != NULL)
+ bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
+ sc->chain_map);
+ if (sc->chain_dmat != NULL)
+ bus_dma_tag_destroy(sc->chain_dmat);
+
+ if (sc->sense_busaddr != 0)
+ bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
+ if (sc->sense_frames != NULL)
+ bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
+ sc->sense_map);
+ if (sc->sense_dmat != NULL)
+ bus_dma_tag_destroy(sc->sense_dmat);
+
+ if (sc->reply_busaddr != 0)
+ bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
+ if (sc->reply_frames != NULL)
+ bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
+ sc->reply_map);
+ if (sc->reply_dmat != NULL)
+ bus_dma_tag_destroy(sc->reply_dmat);
+
+ if (sc->req_busaddr != 0)
+ bus_dmamap_unload(sc->req_dmat, sc->req_map);
+ if (sc->req_frames != NULL)
+ bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
+ if (sc->req_dmat != NULL)
+ bus_dma_tag_destroy(sc->req_dmat);
+
+ if (sc->chains != NULL)
+ free(sc->chains, M_MPR);
+ if (sc->commands != NULL) {
+ for (i = 1; i < sc->num_reqs; i++) {
+ cm = &sc->commands[i];
+ bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
+ }
+ free(sc->commands, M_MPR);
+ }
+ if (sc->buffer_dmat != NULL)
+ bus_dma_tag_destroy(sc->buffer_dmat);
+}
+
+/*
+ * The terms diag reset and hard reset are used interchangeably in the MPI
+ * docs to mean resetting the controller chip. In this code diag reset
+ * cleans everything up, and the hard reset function just sends the reset
+ * sequence to the chip. This should probably be refactored so that every
+ * subsystem gets a reset notification of some sort, and can clean up
+ * appropriately.
+ */
+int
+mpr_reinit(struct mpr_softc *sc)
+{
+ int error;
+ struct mprsas_softc *sassc;
+
+ sassc = sc->sassc;
+
+ MPR_FUNCTRACE(sc);
+
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) {
+ mpr_dprint(sc, MPR_INIT, "%s reset already in progress\n",
+ __func__);
+ return 0;
+ }
+
+ mpr_dprint(sc, MPR_INFO, "Reinitializing controller,\n");
+ /* make sure the completion callbacks can recognize they're getting
+ * a NULL cm_reply due to a reset.
+ */
+ sc->mpr_flags |= MPR_FLAGS_DIAGRESET;
+
+ /*
+ * Mask interrupts here.
+ */
+ mpr_dprint(sc, MPR_INIT, "%s mask interrupts\n", __func__);
+ mpr_mask_intr(sc);
+
+ error = mpr_diag_reset(sc, CAN_SLEEP);
+ if (error != 0) {
+ panic("%s hard reset failed with error %d\n", __func__, error);
+ }
+
+ /* Restore the PCI state, including the MSI-X registers */
+ mpr_pci_restore(sc);
+
+ /* Give the I/O subsystem special priority to get itself prepared */
+ mprsas_handle_reinit(sc);
+
+ /*
+ * Get IOC Facts and allocate all structures based on this information.
+ * The attach function will also call mpr_iocfacts_allocate at startup.
+ * If relevant values have changed in IOC Facts, this function will free
+ * all of the memory based on IOC Facts and reallocate that memory.
+ */
+ if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) {
+ panic("%s IOC Facts based allocation failed with error %d\n",
+ __func__, error);
+ }
+
+ /*
+ * Mapping structures will be re-allocated after getting IOC Page8, so
+ * free these structures here.
+ */
+ mpr_mapping_exit(sc);
+
+ /*
+ * The static page function currently read is IOC Page8. Others can be
+ * added in future. It's possible that the values in IOC Page8 have
+ * changed after a Diag Reset due to user modification, so always read
+ * these. Interrupts are masked, so unmask them before getting config
+ * pages.
+ */
+ mpr_unmask_intr(sc);
+ sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET;
+ mpr_base_static_config_pages(sc);
+
+ /*
+ * Some mapping info is based in IOC Page8 data, so re-initialize the
+ * mapping tables.
+ */
+ mpr_mapping_initialize(sc);
+
+ /*
+ * Restart will reload the event masks clobbered by the reset, and
+ * then enable the port.
+ */
+ mpr_reregister_events(sc);
+
+ /* the end of discovery will release the simq, so we're done. */
+ mpr_dprint(sc, MPR_INFO, "%s finished sc %p post %u free %u\n",
+ __func__, sc, sc->replypostindex, sc->replyfreeindex);
+ mprsas_release_simq_reinit(sassc);
+
+ return 0;
+}
+
+/* Wait for the chip to ACK a word that we've put into its FIFO
+ * Wait for <timeout> seconds. In single loop wait for busy loop
+ * for 500 microseconds.
+ * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
+ * */
+static int
+mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+ u32 doorbell;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
+ if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
+ mpr_dprint(sc, MPR_INIT, "%s: successful count(%d), "
+ "timeout(%d)\n", __func__, count, timeout);
+ return 0;
+ } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpr_dprint(sc, MPR_FAULT,
+ "fault_state(0x%04x)!\n", doorbell);
+ return (EFAULT);
+ }
+ } else if (int_status == 0xFFFFFFFF)
+ goto out;
+
+ /*
+ * If it can sleep, sleep for 1 milisecond, else busy loop for
+ * 0.5 milisecond
+ */
+ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP)
+ msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", hz/1000);
+ else if (sleep_flag == CAN_SLEEP)
+ pause("mprdba", hz/1000);
+ else
+ DELAY(500);
+ count++;
+ } while (--cntdn);
+
+ out:
+ mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), "
+ "int_status(%x)!\n", __func__, count, int_status);
+ return (ETIMEDOUT);
+}
+
+/* Wait for the chip to signal that the next word in its FIFO can be fetched */
+static int
+mpr_wait_db_int(struct mpr_softc *sc)
+{
+ int retry;
+
+ for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) {
+ if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
+ MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
+ return (0);
+ DELAY(2000);
+ }
+ return (ETIMEDOUT);
+}
+
+/* Step through the synchronous command state machine, i.e. "Doorbell mode" */
+static int
+mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
+ int req_sz, int reply_sz, int timeout)
+{
+ uint32_t *data32;
+ uint16_t *data16;
+ int i, count, ioc_sz, residual;
+ int sleep_flags = CAN_SLEEP;
+
+#if __FreeBSD_version >= 1000029
+ if (curthread->td_no_sleeping)
+#else //__FreeBSD_version < 1000029
+ if (curthread->td_pflags & TDP_NOSLEEPING)
+#endif //__FreeBSD_version >= 1000029
+ sleep_flags = NO_SLEEP;
+
+ /* Step 1 */
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+
+ /* Step 2 */
+ if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
+ return (EBUSY);
+
+ /* Step 3
+ * Announce that a message is coming through the doorbell. Messages
+ * are pushed at 32bit words, so round up if needed.
+ */
+ count = (req_sz + 3) / 4;
+ mpr_regwrite(sc, MPI2_DOORBELL_OFFSET,
+ (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
+ (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
+
+ /* Step 4 */
+ if (mpr_wait_db_int(sc) ||
+ (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
+ mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n");
+ return (ENXIO);
+ }
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+ if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n");
+ return (ENXIO);
+ }
+
+ /* Step 5 */
+ /* Clock out the message data synchronously in 32-bit dwords*/
+ data32 = (uint32_t *)req;
+ for (i = 0; i < count; i++) {
+ mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
+ if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) {
+ mpr_dprint(sc, MPR_FAULT,
+ "Timeout while writing doorbell\n");
+ return (ENXIO);
+ }
+ }
+
+ /* Step 6 */
+ /* Clock in the reply in 16-bit words. The total length of the
+ * message is always in the 4th byte, so clock out the first 2 words
+ * manually, then loop the rest.
+ */
+ data16 = (uint16_t *)reply;
+ if (mpr_wait_db_int(sc) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n");
+ return (ENXIO);
+ }
+ data16[0] =
+ mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+ if (mpr_wait_db_int(sc) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n");
+ return (ENXIO);
+ }
+ data16[1] =
+ mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+
+ /* Number of 32bit words in the message */
+ ioc_sz = reply->MsgLength;
+
+ /*
+ * Figure out how many 16bit words to clock in without overrunning.
+ * The precision loss with dividing reply_sz can safely be
+ * ignored because the messages can only be multiples of 32bits.
+ */
+ residual = 0;
+ count = MIN((reply_sz / 4), ioc_sz) * 2;
+ if (count < ioc_sz * 2) {
+ residual = ioc_sz * 2 - count;
+ mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d "
+ "residual message words\n", residual);
+ }
+
+ for (i = 2; i < count; i++) {
+ if (mpr_wait_db_int(sc) != 0) {
+ mpr_dprint(sc, MPR_FAULT,
+ "Timeout reading doorbell %d\n", i);
+ return (ENXIO);
+ }
+ data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) &
+ MPI2_DOORBELL_DATA_MASK;
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+ }
+
+ /*
+ * Pull out residual words that won't fit into the provided buffer.
+ * This keeps the chip from hanging due to a driver programming
+ * error.
+ */
+ while (residual--) {
+ if (mpr_wait_db_int(sc) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n");
+ return (ENXIO);
+ }
+ (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET);
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+ }
+
+ /* Step 7 */
+ if (mpr_wait_db_int(sc) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n");
+ return (ENXIO);
+ }
+ if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
+ mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n");
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
+
+ return (0);
+}
+
+static void
+mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ reply_descriptor rd;
+
+ MPR_FUNCTRACE(sc);
+ mpr_dprint(sc, MPR_TRACE, "%s SMID %u cm %p ccb %p\n", __func__,
+ cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
+
+ if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags &
+ MPR_FLAGS_SHUTDOWN))
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ if (++sc->io_cmds_active > sc->io_cmds_highwater)
+ sc->io_cmds_highwater++;
+
+ rd.u.low = cm->cm_desc.Words.Low;
+ rd.u.high = cm->cm_desc.Words.High;
+ rd.word = htole64(rd.word);
+ /* TODO-We may need to make below regwrite atomic */
+ mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
+ rd.u.low);
+ mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
+ rd.u.high);
+}
+
+/*
+ * Just the FACTS, ma'am.
+ */
+static int
+mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
+{
+ MPI2_DEFAULT_REPLY *reply;
+ MPI2_IOC_FACTS_REQUEST request;
+ int error, req_sz, reply_sz;
+
+ MPR_FUNCTRACE(sc);
+
+ req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
+ reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
+ reply = (MPI2_DEFAULT_REPLY *)facts;
+
+ bzero(&request, req_sz);
+ request.Function = MPI2_FUNCTION_IOC_FACTS;
+ error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
+
+ return (error);
+}
+
+static int
+mpr_send_iocinit(struct mpr_softc *sc)
+{
+ MPI2_IOC_INIT_REQUEST init;
+ MPI2_DEFAULT_REPLY reply;
+ int req_sz, reply_sz, error;
+ struct timeval now;
+ uint64_t time_in_msec;
+
+ MPR_FUNCTRACE(sc);
+
+ req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
+ reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
+ bzero(&init, req_sz);
+ bzero(&reply, reply_sz);
+
+ /*
+ * Fill in the init block. Note that most addresses are
+ * deliberately in the lower 32bits of memory. This is a micro-
+ * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
+ */
+ init.Function = MPI2_FUNCTION_IOC_INIT;
+ init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ init.MsgVersion = htole16(MPI2_VERSION);
+ init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
+ init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize);
+ init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
+ init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
+ init.SenseBufferAddressHigh = 0;
+ init.SystemReplyAddressHigh = 0;
+ init.SystemRequestFrameBaseAddress.High = 0;
+ init.SystemRequestFrameBaseAddress.Low =
+ htole32((uint32_t)sc->req_busaddr);
+ init.ReplyDescriptorPostQueueAddress.High = 0;
+ init.ReplyDescriptorPostQueueAddress.Low =
+ htole32((uint32_t)sc->post_busaddr);
+ init.ReplyFreeQueueAddress.High = 0;
+ init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
+ getmicrotime(&now);
+ time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
+ init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
+ init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
+
+ error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
+ if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
+ error = ENXIO;
+
+ mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
+ return (error);
+}
+
+void
+mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
+}
+
+static int
+mpr_alloc_queues(struct mpr_softc *sc)
+{
+ bus_addr_t queues_busaddr;
+ uint8_t *queues;
+ int qsize, fqsize, pqsize;
+
+ /*
+ * The reply free queue contains 4 byte entries in multiples of 16 and
+ * aligned on a 16 byte boundary. There must always be an unused entry.
+ * This queue supplies fresh reply frames for the firmware to use.
+ *
+ * The reply descriptor post queue contains 8 byte entries in
+ * multiples of 16 and aligned on a 16 byte boundary. This queue
+ * contains filled-in reply frames sent from the firmware to the host.
+ *
+ * These two queues are allocated together for simplicity.
+ */
+ sc->fqdepth = roundup2((sc->num_replies + 1), 16);
+ sc->pqdepth = roundup2((sc->num_replies + 1), 16);
+ fqsize= sc->fqdepth * 4;
+ pqsize = sc->pqdepth * 8;
+ qsize = fqsize + pqsize;
+
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 16, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ qsize, /* maxsize */
+ 1, /* nsegments */
+ qsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->queues_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate queues DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
+ &sc->queues_map)) {
+ device_printf(sc->mpr_dev, "Cannot allocate queues memory\n");
+ return (ENOMEM);
+ }
+ bzero(queues, qsize);
+ bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
+ mpr_memaddr_cb, &queues_busaddr, 0);
+
+ sc->free_queue = (uint32_t *)queues;
+ sc->free_busaddr = queues_busaddr;
+ sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
+ sc->post_busaddr = queues_busaddr + fqsize;
+
+ return (0);
+}
+
+static int
+mpr_alloc_replies(struct mpr_softc *sc)
+{
+ int rsize, num_replies;
+
+ /*
+ * sc->num_replies should be one less than sc->fqdepth. We need to
+ * allocate space for sc->fqdepth replies, but only sc->num_replies
+ * replies can be used at once.
+ */
+ num_replies = max(sc->fqdepth, sc->num_replies);
+
+ rsize = sc->facts->ReplyFrameSize * num_replies * 4;
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 4, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ rsize, /* maxsize */
+ 1, /* nsegments */
+ rsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->reply_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate replies DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
+ BUS_DMA_NOWAIT, &sc->reply_map)) {
+ device_printf(sc->mpr_dev, "Cannot allocate replies memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->reply_frames, rsize);
+ bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
+ mpr_memaddr_cb, &sc->reply_busaddr, 0);
+
+ return (0);
+}
+
+static int
+mpr_alloc_requests(struct mpr_softc *sc)
+{
+ struct mpr_command *cm;
+ struct mpr_chain *chain;
+ int i, rsize, nsegs;
+
+ rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4;
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 16, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ rsize, /* maxsize */
+ 1, /* nsegments */
+ rsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->req_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate request DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
+ BUS_DMA_NOWAIT, &sc->req_map)) {
+ device_printf(sc->mpr_dev, "Cannot allocate request memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->req_frames, rsize);
+ bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
+ mpr_memaddr_cb, &sc->req_busaddr, 0);
+
+ rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4;
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 16, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ rsize, /* maxsize */
+ 1, /* nsegments */
+ rsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->chain_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate chain DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
+ BUS_DMA_NOWAIT, &sc->chain_map)) {
+ device_printf(sc->mpr_dev, "Cannot allocate chain memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->chain_frames, rsize);
+ bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize,
+ mpr_memaddr_cb, &sc->chain_busaddr, 0);
+
+ rsize = MPR_SENSE_LEN * sc->num_reqs;
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ rsize, /* maxsize */
+ 1, /* nsegments */
+ rsize, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sense_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate sense DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
+ BUS_DMA_NOWAIT, &sc->sense_map)) {
+ device_printf(sc->mpr_dev, "Cannot allocate sense memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->sense_frames, rsize);
+ bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
+ mpr_memaddr_cb, &sc->sense_busaddr, 0);
+
+ sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR,
+ M_WAITOK | M_ZERO);
+ if (!sc->chains) {
+ device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ for (i = 0; i < sc->max_chains; i++) {
+ chain = &sc->chains[i];
+ chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
+ i * sc->facts->IOCRequestFrameSize * 4);
+ chain->chain_busaddr = sc->chain_busaddr +
+ i * sc->facts->IOCRequestFrameSize * 4;
+ mpr_free_chain(sc, chain);
+ sc->chain_free_lowwater++;
+ }
+
+ /* XXX Need to pick a more precise value */
+ nsegs = (MAXPHYS / PAGE_SIZE) + 1;
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
+ nsegs, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ busdma_lock_mutex, /* lockfunc */
+ &sc->mpr_mtx, /* lockarg */
+ &sc->buffer_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate buffer DMA tag\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * SMID 0 cannot be used as a free command per the firmware spec.
+ * Just drop that command instead of risking accounting bugs.
+ */
+ sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs,
+ M_MPR, M_WAITOK | M_ZERO);
+ if (!sc->commands) {
+ device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ for (i = 1; i < sc->num_reqs; i++) {
+ cm = &sc->commands[i];
+ cm->cm_req = sc->req_frames +
+ i * sc->facts->IOCRequestFrameSize * 4;
+ cm->cm_req_busaddr = sc->req_busaddr +
+ i * sc->facts->IOCRequestFrameSize * 4;
+ cm->cm_sense = &sc->sense_frames[i];
+ cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN;
+ cm->cm_desc.Default.SMID = i;
+ cm->cm_sc = sc;
+ TAILQ_INIT(&cm->cm_chain_list);
+ callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0);
+
+ /* XXX Is a failure here a critical problem? */
+ if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
+ if (i <= sc->facts->HighPriorityCredit)
+ mpr_free_high_priority_command(sc, cm);
+ else
+ mpr_free_command(sc, cm);
+ else {
+ panic("failed to allocate command %d\n", i);
+ sc->num_reqs = i;
+ break;
+ }
+ }
+
+ return (0);
+}
+
+static int
+mpr_init_queues(struct mpr_softc *sc)
+{
+ int i;
+
+ memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
+
+ /*
+ * According to the spec, we need to use one less reply than we
+ * have space for on the queue. So sc->num_replies (the number we
+ * use) should be less than sc->fqdepth (allocated size).
+ */
+ if (sc->num_replies >= sc->fqdepth)
+ return (EINVAL);
+
+ /*
+ * Initialize all of the free queue entries.
+ */
+ for (i = 0; i < sc->fqdepth; i++)
+ sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4);
+ sc->replyfreeindex = sc->num_replies;
+
+ return (0);
+}
+
+/* Get the driver parameter tunables. Lowest priority are the driver defaults.
+ * Next are the global settings, if they exist. Highest are the per-unit
+ * settings, if they exist.
+ */
+static void
+mpr_get_tunables(struct mpr_softc *sc)
+{
+ char tmpstr[80];
+
+ /* XXX default to some debugging for now */
+ sc->mpr_debug = MPR_INFO | MPR_FAULT;
+ sc->disable_msix = 0;
+ sc->disable_msi = 0;
+ sc->max_chains = MPR_CHAIN_FRAMES;
+
+ /*
+ * Grab the global variables.
+ */
+ TUNABLE_INT_FETCH("hw.mpr.debug_level", &sc->mpr_debug);
+ TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix);
+ TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi);
+ TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains);
+
+ /* Grab the unit-instance variables */
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->mpr_debug);
+
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
+
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
+
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
+
+ bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
+}
+
+static void
+mpr_setup_sysctl(struct mpr_softc *sc)
+{
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+ char tmpstr[80], tmpstr2[80];
+
+ /*
+ * Setup the sysctl variable so the user can change the debug level
+ * on the fly.
+ */
+ snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d",
+ device_get_unit(sc->mpr_dev));
+ snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev));
+
+ sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev);
+ if (sysctl_ctx != NULL)
+ sysctl_tree = device_get_sysctl_tree(sc->mpr_dev);
+
+ if (sysctl_tree == NULL) {
+ sysctl_ctx_init(&sc->sysctl_ctx);
+ sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (sc->sysctl_tree == NULL)
+ return;
+ sysctl_ctx = &sc->sysctl_ctx;
+ sysctl_tree = sc->sysctl_tree;
+ }
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mpr_debug, 0,
+ "mpr debug level");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
+ "Disable the use of MSI-X interrupts");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0,
+ "Disable the use of MSI interrupts");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "firmware_version", CTLFLAG_RW, &sc->fw_version,
+ strlen(sc->fw_version), "firmware version");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION,
+ strlen(MPR_DRIVER_VERSION), "driver version");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "io_cmds_active", CTLFLAG_RD,
+ &sc->io_cmds_active, 0, "number of currently active commands");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
+ &sc->io_cmds_highwater, 0, "maximum active commands seen");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "chain_free", CTLFLAG_RD,
+ &sc->chain_free, 0, "number of free chain elements");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
+ &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "max_chains", CTLFLAG_RD,
+ &sc->max_chains, 0,"maximum chain frames that will be allocated");
+
+#if __FreeBSD_version >= 900030
+ SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
+ &sc->chain_alloc_fail, "chain allocation failures");
+#endif //FreeBSD_version >= 900030
+}
+
+int
+mpr_attach(struct mpr_softc *sc)
+{
+ int error;
+
+ mpr_get_tunables(sc);
+
+ MPR_FUNCTRACE(sc);
+
+ mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF);
+ callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0);
+ TAILQ_INIT(&sc->event_list);
+ timevalclear(&sc->lastfail);
+
+ if ((error = mpr_transition_ready(sc)) != 0) {
+ mpr_printf(sc, "%s failed to transition ready\n", __func__);
+ return (error);
+ }
+
+ sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR,
+ M_ZERO|M_NOWAIT);
+ if (!sc->facts) {
+ device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+
+ /*
+ * Get IOC Facts and allocate all structures based on this information.
+ * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC
+ * Facts. If relevant values have changed in IOC Facts, this function
+ * will free all of the memory based on IOC Facts and reallocate that
+ * memory. If this fails, any allocated memory should already be freed.
+ */
+ if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) {
+ mpr_dprint(sc, MPR_FAULT, "%s IOC Facts based allocation "
+ "failed with error %d\n", __func__, error);
+ return (error);
+ }
+
+ /* Start the periodic watchdog check on the IOC Doorbell */
+ mpr_periodic(sc);
+
+ /*
+ * The portenable will kick off discovery events that will drive the
+ * rest of the initialization process. The CAM/SAS module will
+ * hold up the boot sequence until discovery is complete.
+ */
+ sc->mpr_ich.ich_func = mpr_startup;
+ sc->mpr_ich.ich_arg = sc;
+ if (config_intrhook_establish(&sc->mpr_ich) != 0) {
+ mpr_dprint(sc, MPR_ERROR, "Cannot establish MPR config hook\n");
+ error = EINVAL;
+ }
+
+ /*
+ * Allow IR to shutdown gracefully when shutdown occurs.
+ */
+ sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
+ mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
+
+ if (sc->shutdown_eh == NULL)
+ mpr_dprint(sc, MPR_ERROR, "shutdown event registration "
+ "failed\n");
+
+ mpr_setup_sysctl(sc);
+
+ sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE;
+
+ return (error);
+}
+
+/* Run through any late-start handlers. */
+static void
+mpr_startup(void *arg)
+{
+ struct mpr_softc *sc;
+
+ sc = (struct mpr_softc *)arg;
+
+ mpr_lock(sc);
+ mpr_unmask_intr(sc);
+
+ /* initialize device mapping tables */
+ mpr_base_static_config_pages(sc);
+ mpr_mapping_initialize(sc);
+ mprsas_startup(sc);
+ mpr_unlock(sc);
+}
+
+/* Periodic watchdog. Is called with the driver lock already held. */
+static void
+mpr_periodic(void *arg)
+{
+ struct mpr_softc *sc;
+ uint32_t db;
+
+ sc = (struct mpr_softc *)arg;
+ if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN)
+ return;
+
+ db = mpr_regread(sc, MPI2_DOORBELL_OFFSET);
+ if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) ==
+ IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) {
+ panic("TEMPERATURE FAULT: STOPPING.");
+ }
+ mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
+ mpr_reinit(sc);
+ }
+
+ callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc);
+}
+
+static void
+mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *event)
+{
+ MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
+
+ mpr_print_event(sc, event);
+
+ switch (event->Event) {
+ case MPI2_EVENT_LOG_DATA:
+ mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n");
+ if (sc->mpr_debug & MPR_EVENT)
+ hexdump(event->EventData, event->EventDataLength, NULL,
+ 0);
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
+ mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
+ "0x%x Sequence %d:\n", entry->LogEntryQualifier,
+ entry->LogSequence);
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static int
+mpr_attach_log(struct mpr_softc *sc)
+{
+ uint8_t events[16];
+
+ bzero(events, 16);
+ setbit(events, MPI2_EVENT_LOG_DATA);
+ setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
+
+ mpr_register_events(sc, events, mpr_log_evt_handler, NULL,
+ &sc->mpr_log_eh);
+
+ return (0);
+}
+
+static int
+mpr_detach_log(struct mpr_softc *sc)
+{
+
+ if (sc->mpr_log_eh != NULL)
+ mpr_deregister_events(sc, sc->mpr_log_eh);
+ return (0);
+}
+
+/*
+ * Free all of the driver resources and detach submodules. Should be called
+ * without the lock held.
+ */
+int
+mpr_free(struct mpr_softc *sc)
+{
+ int error;
+
+ /* Turn off the watchdog */
+ mpr_lock(sc);
+ sc->mpr_flags |= MPR_FLAGS_SHUTDOWN;
+ mpr_unlock(sc);
+ /* Lock must not be held for this */
+ callout_drain(&sc->periodic);
+
+ if (((error = mpr_detach_log(sc)) != 0) ||
+ ((error = mpr_detach_sas(sc)) != 0))
+ return (error);
+
+ mpr_detach_user(sc);
+
+ /* Put the IOC back in the READY state. */
+ mpr_lock(sc);
+ if ((error = mpr_transition_ready(sc)) != 0) {
+ mpr_unlock(sc);
+ return (error);
+ }
+ mpr_unlock(sc);
+
+ if (sc->facts != NULL)
+ free(sc->facts, M_MPR);
+
+ /*
+ * Free all buffers that are based on IOC Facts. A Diag Reset may need
+ * to free these buffers too.
+ */
+ mpr_iocfacts_free(sc);
+
+ if (sc->sysctl_tree != NULL)
+ sysctl_ctx_free(&sc->sysctl_ctx);
+
+ /* Deregister the shutdown function */
+ if (sc->shutdown_eh != NULL)
+ EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
+
+ mtx_destroy(&sc->mpr_mtx);
+
+ return (0);
+}
+
+static __inline void
+mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ MPR_FUNCTRACE(sc);
+
+ if (cm == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n");
+ return;
+ }
+
+ if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
+ cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
+
+ if (cm->cm_complete != NULL) {
+ mpr_dprint(sc, MPR_TRACE,
+ "%s cm %p calling cm_complete %p data %p reply %p\n",
+ __func__, cm, cm->cm_complete, cm->cm_complete_data,
+ cm->cm_reply);
+ cm->cm_complete(sc, cm);
+ }
+
+ if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
+ mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm);
+ wakeup(cm);
+ }
+
+ if (sc->io_cmds_active != 0) {
+ sc->io_cmds_active--;
+ } else {
+ mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is "
+ "out of sync - resynching to 0\n");
+ }
+}
+
+static void
+mpr_sas_log_info(struct mpr_softc *sc , u32 log_info)
+{
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
+ union loginfo_type sas_loginfo;
+ char *originator_str = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
+ /* eat the loginfos associated with task aborts */
+ if ((log_info == 30050000) || (log_info == 0x31140000) ||
+ (log_info == 0x31130000))
+ return;
+
+ switch (sas_loginfo.dw.originator) {
+ case 0:
+ originator_str = "IOP";
+ break;
+ case 1:
+ originator_str = "PL";
+ break;
+ case 2:
+ originator_str = "IR";
+ break;
+ }
+
+ mpr_dprint(sc, MPR_INFO, "log_info(0x%08x): originator(%s), "
+ "code(0x%02x), sub_code(0x%04x)\n", log_info,
+ originator_str, sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
+static void
+mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 sc_status;
+
+ mpi_reply = (MPI2DefaultReply_t*)reply;
+ sc_status = le16toh(mpi_reply->IOCStatus);
+ if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
+}
+
+void
+mpr_intr(void *data)
+{
+ struct mpr_softc *sc;
+ uint32_t status;
+
+ sc = (struct mpr_softc *)data;
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ /*
+ * Check interrupt status register to flush the bus. This is
+ * needed for both INTx interrupts and driver-driven polling
+ */
+ status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
+ if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
+ return;
+
+ mpr_lock(sc);
+ mpr_intr_locked(data);
+ mpr_unlock(sc);
+ return;
+}
+
+/*
+ * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
+ * chip. Hopefully this theory is correct.
+ */
+void
+mpr_intr_msi(void *data)
+{
+ struct mpr_softc *sc;
+
+ sc = (struct mpr_softc *)data;
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+ mpr_lock(sc);
+ mpr_intr_locked(data);
+ mpr_unlock(sc);
+ return;
+}
+
+/*
+ * The locking is overly broad and simplistic, but easy to deal with for now.
+ */
+void
+mpr_intr_locked(void *data)
+{
+ MPI2_REPLY_DESCRIPTORS_UNION *desc;
+ struct mpr_softc *sc;
+ struct mpr_command *cm = NULL;
+ uint8_t flags;
+ u_int pq;
+ MPI2_DIAG_RELEASE_REPLY *rel_rep;
+ mpr_fw_diagnostic_buffer_t *pBuffer;
+
+ sc = (struct mpr_softc *)data;
+
+ pq = sc->replypostindex;
+ mpr_dprint(sc, MPR_TRACE,
+ "%s sc %p starting with replypostindex %u\n",
+ __func__, sc, sc->replypostindex);
+
+ for ( ;; ) {
+ cm = NULL;
+ desc = &sc->post_queue[sc->replypostindex];
+ flags = desc->Default.ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) ||
+ (le32toh(desc->Words.High) == 0xffffffff))
+ break;
+
+ /* increment the replypostindex now, so that event handlers
+ * and cm completion handlers which decide to do a diag
+ * reset can zero it without it getting incremented again
+ * afterwards, and we break out of this loop on the next
+ * iteration since the reply post queue has been cleared to
+ * 0xFF and all descriptors look unused (which they are).
+ */
+ if (++sc->replypostindex >= sc->pqdepth)
+ sc->replypostindex = 0;
+
+ switch (flags) {
+ case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
+ case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS:
+ cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
+ cm->cm_reply = NULL;
+ break;
+ case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
+ {
+ uint32_t baddr;
+ uint8_t *reply;
+
+ /*
+ * Re-compose the reply address from the address
+ * sent back from the chip. The ReplyFrameAddress
+ * is the lower 32 bits of the physical address of
+ * particular reply frame. Convert that address to
+ * host format, and then use that to provide the
+ * offset against the virtual address base
+ * (sc->reply_frames).
+ */
+ baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
+ reply = sc->reply_frames +
+ (baddr - ((uint32_t)sc->reply_busaddr));
+ /*
+ * Make sure the reply we got back is in a valid
+ * range. If not, go ahead and panic here, since
+ * we'll probably panic as soon as we deference the
+ * reply pointer anyway.
+ */
+ if ((reply < sc->reply_frames)
+ || (reply > (sc->reply_frames +
+ (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) {
+ printf("%s: WARNING: reply %p out of range!\n",
+ __func__, reply);
+ printf("%s: reply_frames %p, fqdepth %d, "
+ "frame size %d\n", __func__,
+ sc->reply_frames, sc->fqdepth,
+ sc->facts->ReplyFrameSize * 4);
+ printf("%s: baddr %#x,\n", __func__, baddr);
+ /* LSI-TODO. See Linux Code for Graceful exit */
+ panic("Reply address out of range");
+ }
+ if (le16toh(desc->AddressReply.SMID) == 0) {
+ if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
+ MPI2_FUNCTION_DIAG_BUFFER_POST) {
+ /*
+ * If SMID is 0 for Diag Buffer Post,
+ * this implies that the reply is due to
+ * a release function with a status that
+ * the buffer has been released. Set
+ * the buffer flags accordingly.
+ */
+ rel_rep =
+ (MPI2_DIAG_RELEASE_REPLY *)reply;
+ if (le16toh(rel_rep->IOCStatus) ==
+ MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
+ {
+ pBuffer =
+ &sc->fw_diag_buffer_list[
+ rel_rep->BufferType];
+ pBuffer->valid_data = TRUE;
+ pBuffer->owned_by_firmware =
+ FALSE;
+ pBuffer->immediate = FALSE;
+ }
+ } else
+ mpr_dispatch_event(sc, baddr,
+ (MPI2_EVENT_NOTIFICATION_REPLY *)
+ reply);
+ } else {
+ cm = &sc->commands[
+ le16toh(desc->AddressReply.SMID)];
+ cm->cm_reply = reply;
+ cm->cm_reply_data =
+ le32toh(desc->AddressReply.
+ ReplyFrameAddress);
+ }
+ break;
+ }
+ case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
+ case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
+ case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
+ default:
+ /* Unhandled */
+ mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n",
+ desc->Default.ReplyFlags);
+ cm = NULL;
+ break;
+ }
+
+ if (cm != NULL) {
+ // Print Error reply frame
+ if (cm->cm_reply)
+ mpr_display_reply_info(sc,cm->cm_reply);
+ mpr_complete_command(sc, cm);
+ }
+
+ desc->Words.Low = 0xffffffff;
+ desc->Words.High = 0xffffffff;
+ }
+
+ if (pq != sc->replypostindex) {
+ mpr_dprint(sc, MPR_TRACE,
+ "%s sc %p writing postindex %d\n",
+ __func__, sc, sc->replypostindex);
+ mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET,
+ sc->replypostindex);
+ }
+
+ return;
+}
+
+static void
+mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *reply)
+{
+ struct mpr_event_handle *eh;
+ int event, handled = 0;
+
+ event = le16toh(reply->Event);
+ TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
+ if (isset(eh->mask, event)) {
+ eh->callback(sc, data, reply);
+ handled++;
+ }
+ }
+
+ if (handled == 0)
+ mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n",
+ le16toh(event));
+
+ /*
+ * This is the only place that the event/reply should be freed.
+ * Anything wanting to hold onto the event data should have
+ * already copied it into their own storage.
+ */
+ mpr_free_reply(sc, data);
+}
+
+static void
+mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if (cm->cm_reply)
+ mpr_print_event(sc,
+ (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
+
+ mpr_free_command(sc, cm);
+
+ /* next, send a port enable */
+ mprsas_startup(sc);
+}
+
+/*
+ * For both register_events and update_events, the caller supplies a bitmap
+ * of events that it _wants_. These functions then turn that into a bitmask
+ * suitable for the controller.
+ */
+int
+mpr_register_events(struct mpr_softc *sc, uint8_t *mask,
+ mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle)
+{
+ struct mpr_event_handle *eh;
+ int error = 0;
+
+ eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO);
+ if (!eh) {
+ device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ eh->callback = cb;
+ eh->data = data;
+ TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
+ if (mask != NULL)
+ error = mpr_update_events(sc, eh, mask);
+ *handle = eh;
+
+ return (error);
+}
+
+int
+mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle,
+ uint8_t *mask)
+{
+ MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
+ MPI2_EVENT_NOTIFICATION_REPLY *reply;
+ struct mpr_command *cm;
+ struct mpr_event_handle *eh;
+ int error, i;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((mask != NULL) && (handle != NULL))
+ bcopy(mask, &handle->mask[0], 16);
+ memset(sc->event_mask, 0xff, 16);
+
+ TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
+ for (i = 0; i < 16; i++)
+ sc->event_mask[i] &= ~eh->mask[i];
+ }
+
+ if ((cm = mpr_alloc_command(sc)) == NULL)
+ return (EBUSY);
+ evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
+ evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ evtreq->MsgFlags = 0;
+ evtreq->SASBroadcastPrimitiveMasks = 0;
+#ifdef MPR_DEBUG_ALL_EVENTS
+ {
+ u_char fullmask[16];
+ memset(fullmask, 0x00, 16);
+ bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
+ }
+#else
+ bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
+#endif
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+
+ error = mpr_request_polled(sc, cm);
+ reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
+ if ((reply == NULL) ||
+ (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
+ error = ENXIO;
+
+ if(reply)
+ mpr_print_event(sc, reply);
+
+ mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error);
+
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+static int
+mpr_reregister_events(struct mpr_softc *sc)
+{
+ MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
+ struct mpr_command *cm;
+ struct mpr_event_handle *eh;
+ int error, i;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ /* first, reregister events */
+
+ memset(sc->event_mask, 0xff, 16);
+
+ TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
+ for (i = 0; i < 16; i++)
+ sc->event_mask[i] &= ~eh->mask[i];
+ }
+
+ if ((cm = mpr_alloc_command(sc)) == NULL)
+ return (EBUSY);
+ evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
+ evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ evtreq->MsgFlags = 0;
+ evtreq->SASBroadcastPrimitiveMasks = 0;
+#ifdef MPR_DEBUG_ALL_EVENTS
+ {
+ u_char fullmask[16];
+ memset(fullmask, 0x00, 16);
+ bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
+ }
+#else
+ bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
+#endif
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ cm->cm_complete = mpr_reregister_events_complete;
+
+ error = mpr_map_command(sc, cm);
+
+ mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__,
+ error);
+ return (error);
+}
+
+int
+mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle)
+{
+
+ TAILQ_REMOVE(&sc->event_list, handle, eh_list);
+ free(handle, M_MPR);
+ return (mpr_update_events(sc, NULL, NULL));
+}
+
+/*
+ * Add a chain element as the next SGE for the specified command.
+ * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are
+ * only required for IEEE commands. Therefore there is no code for commands
+ * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands shouldn't
+ * be requesting chains).
+ */
+static int
+mpr_add_chain(struct mpr_command *cm, int segsleft)
+{
+ struct mpr_softc *sc = cm->cm_sc;
+ MPI2_REQUEST_HEADER *req;
+ MPI25_IEEE_SGE_CHAIN64 *ieee_sgc;
+ struct mpr_chain *chain;
+ int space, sgc_size, current_segs, rem_segs, segs_per_frame;
+ uint8_t next_chain_offset = 0;
+
+ /*
+ * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3
+ * only IEEE commands should be requesting chains. Return some error
+ * code other than 0.
+ */
+ if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) {
+ mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to "
+ "an MPI SGL.\n");
+ return(ENOBUFS);
+ }
+
+ sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64);
+ if (cm->cm_sglsize < sgc_size)
+ panic("MPR: Need SGE Error Code\n");
+
+ chain = mpr_alloc_chain(cm->cm_sc);
+ if (chain == NULL)
+ return (ENOBUFS);
+
+ space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
+
+ /*
+ * Note: a double-linked list is used to make it easier to walk for
+ * debugging.
+ */
+ TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
+
+ /*
+ * Need to know if the number of frames left is more than 1 or not. If
+ * more than 1 frame is required, NextChainOffset will need to be set,
+ * which will just be the last segment of the frame.
+ */
+ rem_segs = 0;
+ if (cm->cm_sglsize < (sgc_size * segsleft)) {
+ /*
+ * rem_segs is the number of segements remaining after the
+ * segments that will go into the current frame. Since it is
+ * known that at least one more frame is required, account for
+ * the chain element. To know if more than one more frame is
+ * required, just check if there will be a remainder after using
+ * the current frame (with this chain) and the next frame. If
+ * so the NextChainOffset must be the last element of the next
+ * frame.
+ */
+ current_segs = (cm->cm_sglsize / sgc_size) - 1;
+ rem_segs = segsleft - current_segs;
+ segs_per_frame = space / sgc_size;
+ if (rem_segs > segs_per_frame) {
+ next_chain_offset = segs_per_frame - 1;
+ }
+ }
+ ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain;
+ ieee_sgc->Length = next_chain_offset ? htole32((uint32_t)space) :
+ htole32((uint32_t)rem_segs * (uint32_t)sgc_size);
+ ieee_sgc->NextChainOffset = next_chain_offset;
+ ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
+ ieee_sgc->Address.Low = htole32(chain->chain_busaddr);
+ ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32);
+ cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple;
+ req = (MPI2_REQUEST_HEADER *)cm->cm_req;
+ req->ChainOffset = ((sc->facts->IOCRequestFrameSize * 4) -
+ sgc_size) >> 4;
+
+ cm->cm_sglsize = space;
+ return (0);
+}
+
+/*
+ * Add one scatter-gather element to the scatter-gather list for a command.
+ * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the next
+ * SGE to fill in, respectively. In Gen3, the MPI SGL does not have a chain,
+ * so don't consider any chain additions.
+ */
+int
+mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len,
+ int segsleft)
+{
+ uint32_t saved_buf_len, saved_address_low, saved_address_high;
+ u32 sge_flags;
+
+ /*
+ * case 1: >=1 more segment, no room for anything (error)
+ * case 2: 1 more segment and enough room for it
+ */
+
+ if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) {
+ mpr_dprint(cm->cm_sc, MPR_ERROR,
+ "%s: warning: Not enough room for MPI SGL in frame.\n",
+ __func__);
+ return(ENOBUFS);
+ }
+
+ KASSERT(segsleft == 1,
+ ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n",
+ segsleft));
+
+ /*
+ * There is one more segment left to add for the MPI SGL and there is
+ * enough room in the frame to add it. This is the normal case because
+ * MPI SGL's don't have chains, otherwise something is wrong.
+ *
+ * If this is a bi-directional request, need to account for that
+ * here. Save the pre-filled sge values. These will be used
+ * either for the 2nd SGL or for a single direction SGL. If
+ * cm_out_len is non-zero, this is a bi-directional request, so
+ * fill in the OUT SGL first, then the IN SGL, otherwise just
+ * fill in the IN SGL. Note that at this time, when filling in
+ * 2 SGL's for a bi-directional request, they both use the same
+ * DMA buffer (same cm command).
+ */
+ saved_buf_len = sge->FlagsLength & 0x00FFFFFF;
+ saved_address_low = sge->Address.Low;
+ saved_address_high = sge->Address.High;
+ if (cm->cm_out_len) {
+ sge->FlagsLength = cm->cm_out_len |
+ ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_HOST_TO_IOC |
+ MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ cm->cm_sglsize -= len;
+ /* Endian Safe code */
+ sge_flags = sge->FlagsLength;
+ sge->FlagsLength = htole32(sge_flags);
+ sge->Address.High = htole32(sge->Address.High);
+ sge->Address.Low = htole32(sge->Address.Low);
+ bcopy(sge, cm->cm_sge, len);
+ cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
+ }
+ sge->FlagsLength = saved_buf_len |
+ ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
+ sge->FlagsLength |=
+ ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ } else {
+ sge->FlagsLength |=
+ ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ }
+ sge->Address.Low = saved_address_low;
+ sge->Address.High = saved_address_high;
+
+ cm->cm_sglsize -= len;
+ /* Endian Safe code */
+ sge_flags = sge->FlagsLength;
+ sge->FlagsLength = htole32(sge_flags);
+ sge->Address.High = htole32(sge->Address.High);
+ sge->Address.Low = htole32(sge->Address.Low);
+ bcopy(sge, cm->cm_sge, len);
+ cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
+ return (0);
+}
+
+/*
+ * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
+ * gather list for a command. Maintain cm_sglsize and cm_sge as the
+ * remaining size and pointer to the next SGE to fill in, respectively.
+ */
+int
+mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft)
+{
+ MPI2_IEEE_SGE_SIMPLE64 *sge = sgep;
+ int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION);
+ uint32_t saved_buf_len, saved_address_low, saved_address_high;
+ uint32_t sge_length;
+
+ /*
+ * case 1: No room for chain or segment (error).
+ * case 2: Two or more segments left but only room for chain.
+ * case 3: Last segment and room for it, so set flags.
+ */
+
+ /*
+ * There should be room for at least one element, or there is a big
+ * problem.
+ */
+ if (cm->cm_sglsize < ieee_sge_size)
+ panic("MPR: Need SGE Error Code\n");
+
+ if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) {
+ if ((error = mpr_add_chain(cm, segsleft)) != 0)
+ return (error);
+ }
+
+ if (segsleft == 1) {
+ /*
+ * If this is a bi-directional request, need to account for that
+ * here. Save the pre-filled sge values. These will be used
+ * either for the 2nd SGL or for a single direction SGL. If
+ * cm_out_len is non-zero, this is a bi-directional request, so
+ * fill in the OUT SGL first, then the IN SGL, otherwise just
+ * fill in the IN SGL. Note that at this time, when filling in
+ * 2 SGL's for a bi-directional request, they both use the same
+ * DMA buffer (same cm command).
+ */
+ saved_buf_len = sge->Length;
+ saved_address_low = sge->Address.Low;
+ saved_address_high = sge->Address.High;
+ if (cm->cm_out_len) {
+ sge->Length = cm->cm_out_len;
+ sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
+ cm->cm_sglsize -= ieee_sge_size;
+ /* Endian Safe code */
+ sge_length = sge->Length;
+ sge->Length = htole32(sge_length);
+ sge->Address.High = htole32(sge->Address.High);
+ sge->Address.Low = htole32(sge->Address.Low);
+ bcopy(sgep, cm->cm_sge, ieee_sge_size);
+ cm->cm_sge =
+ (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
+ ieee_sge_size);
+ }
+ sge->Length = saved_buf_len;
+ sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+ sge->Address.Low = saved_address_low;
+ sge->Address.High = saved_address_high;
+ }
+
+ cm->cm_sglsize -= ieee_sge_size;
+ /* Endian Safe code */
+ sge_length = sge->Length;
+ sge->Length = htole32(sge_length);
+ sge->Address.High = htole32(sge->Address.High);
+ sge->Address.Low = htole32(sge->Address.Low);
+ bcopy(sgep, cm->cm_sge, ieee_sge_size);
+ cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
+ ieee_sge_size);
+ return (0);
+}
+
+/*
+ * Add one dma segment to the scatter-gather list for a command.
+ */
+int
+mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags,
+ int segsleft)
+{
+ MPI2_SGE_SIMPLE64 sge;
+ MPI2_IEEE_SGE_SIMPLE64 ieee_sge;
+
+ if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
+ ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
+ ieee_sge.Length = len;
+ mpr_from_u64(pa, &ieee_sge.Address);
+
+ return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft));
+ } else {
+ /*
+ * This driver always uses 64-bit address elements for
+ * simplicity.
+ */
+ flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+ /* Set Endian safe macro in mpr_push_sge */
+ sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
+ mpr_from_u64(pa, &sge.Address);
+
+ return (mpr_push_sge(cm, &sge, sizeof sge, segsleft));
+ }
+}
+
+static void
+mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct mpr_softc *sc;
+ struct mpr_command *cm;
+ u_int i, dir, sflags;
+
+ cm = (struct mpr_command *)arg;
+ sc = cm->cm_sc;
+
+ /*
+ * In this case, just print out a warning and let the chip tell the
+ * user they did the wrong thing.
+ */
+ if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
+ mpr_dprint(sc, MPR_ERROR,
+ "%s: warning: busdma returned %d segments, "
+ "more than the %d allowed\n", __func__, nsegs,
+ cm->cm_max_segs);
+ }
+
+ /*
+ * Set up DMA direction flags. Bi-directional requests are also handled
+ * here. In that case, both direction flags will be set.
+ */
+ sflags = 0;
+ if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) {
+ /*
+ * We have to add a special case for SMP passthrough, there
+ * is no easy way to generically handle it. The first
+ * S/G element is used for the command (therefore the
+ * direction bit needs to be set). The second one is used
+ * for the reply. We'll leave it to the caller to make
+ * sure we only have two buffers.
+ */
+ /*
+ * Even though the busdma man page says it doesn't make
+ * sense to have both direction flags, it does in this case.
+ * We have one s/g element being accessed in each direction.
+ */
+ dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
+
+ /*
+ * Set the direction flag on the first buffer in the SMP
+ * passthrough request. We'll clear it for the second one.
+ */
+ sflags |= MPI2_SGE_FLAGS_DIRECTION |
+ MPI2_SGE_FLAGS_END_OF_BUFFER;
+ } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) {
+ sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
+ dir = BUS_DMASYNC_PREWRITE;
+ } else
+ dir = BUS_DMASYNC_PREREAD;
+
+ for (i = 0; i < nsegs; i++) {
+ if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) {
+ sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
+ }
+ error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
+ sflags, nsegs - i);
+ if (error != 0) {
+ /* Resource shortage, roll back! */
+ if (ratecheck(&sc->lastfail, &mpr_chainfail_interval))
+ mpr_dprint(sc, MPR_INFO, "Out of chain frames, "
+ "consider increasing hw.mpr.max_chains.\n");
+ cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED;
+ mpr_complete_command(sc, cm);
+ return;
+ }
+ }
+
+ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
+ mpr_enqueue_request(sc, cm);
+
+ return;
+}
+
+static void
+mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
+ int error)
+{
+ mpr_data_cb(arg, segs, nsegs, error);
+}
+
+/*
+ * This is the routine to enqueue commands ansynchronously.
+ * Note that the only error path here is from bus_dmamap_load(), which can
+ * return EINPROGRESS if it is waiting for resources. Other than this, it's
+ * assumed that if you have a command in-hand, then you have enough credits
+ * to use it.
+ */
+int
+mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ int error = 0;
+
+ if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) {
+ error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
+ &cm->cm_uio, mpr_data_cb2, cm, 0);
+ } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) {
+ error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
+ cm->cm_data, mpr_data_cb, cm, 0);
+ } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
+ error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
+ cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0);
+ } else {
+ /* Add a zero-length element as needed */
+ if (cm->cm_sge != NULL)
+ mpr_add_dmaseg(cm, 0, 0, 0, 1);
+ mpr_enqueue_request(sc, cm);
+ }
+
+ return (error);
+}
+
+/*
+ * This is the routine to enqueue commands synchronously. An error of
+ * EINPROGRESS from mpr_map_command() is ignored since the command will
+ * be executed and enqueued automatically. Other errors come from msleep().
+ */
+int
+mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm, int timeout,
+ int sleep_flag)
+{
+ int error, rc;
+ struct timeval cur_time, start_time;
+
+ if (sc->mpr_flags & MPR_FLAGS_DIAGRESET)
+ return EBUSY;
+
+ cm->cm_complete = NULL;
+ cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED);
+ error = mpr_map_command(sc, cm);
+ if ((error != 0) && (error != EINPROGRESS))
+ return (error);
+
+ // Check for context and wait for 50 mSec at a time until time has
+ // expired or the command has finished. If msleep can't be used, need
+ // to poll.
+#if __FreeBSD_version >= 1000029
+ if (curthread->td_no_sleeping)
+#else //__FreeBSD_version < 1000029
+ if (curthread->td_pflags & TDP_NOSLEEPING)
+#endif //__FreeBSD_version >= 1000029
+ sleep_flag = NO_SLEEP;
+ getmicrotime(&start_time);
+ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) {
+ error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz);
+ } else {
+ while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
+ mpr_intr_locked(sc);
+ if (sleep_flag == CAN_SLEEP)
+ pause("mprwait", hz/20);
+ else
+ DELAY(50000);
+
+ getmicrotime(&cur_time);
+ if ((cur_time.tv_sec - start_time.tv_sec) > timeout) {
+ error = EWOULDBLOCK;
+ break;
+ }
+ }
+ }
+
+ if (error == EWOULDBLOCK) {
+ mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__);
+ rc = mpr_reinit(sc);
+ mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
+ "failed");
+ error = ETIMEDOUT;
+ }
+ return (error);
+}
+
+/*
+ * This is the routine to enqueue a command synchonously and poll for
+ * completion. Its use should be rare.
+ */
+int
+mpr_request_polled(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ int error, timeout = 0, rc;
+ struct timeval cur_time, start_time;
+
+ error = 0;
+
+ cm->cm_flags |= MPR_CM_FLAGS_POLLED;
+ cm->cm_complete = NULL;
+ mpr_map_command(sc, cm);
+
+ getmicrotime(&start_time);
+ while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
+ mpr_intr_locked(sc);
+
+ if (mtx_owned(&sc->mpr_mtx))
+ msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0,
+ "mprpoll", hz/20);
+ else
+ pause("mprpoll", hz/20);
+
+ /*
+ * Check for real-time timeout and fail if more than 60 seconds.
+ */
+ getmicrotime(&cur_time);
+ timeout = cur_time.tv_sec - start_time.tv_sec;
+ if (timeout > 60) {
+ mpr_dprint(sc, MPR_FAULT, "polling failed\n");
+ error = ETIMEDOUT;
+ break;
+ }
+ }
+
+ if(error) {
+ mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__);
+ rc = mpr_reinit(sc);
+ mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ?
+ "success" : "failed");
+ }
+ return (error);
+}
+
+/*
+ * The MPT driver had a verbose interface for config pages. In this driver,
+ * reduce it to much simplier terms, similar to the Linux driver.
+ */
+int
+mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
+{
+ MPI2_CONFIG_REQUEST *req;
+ struct mpr_command *cm;
+ int error;
+
+ if (sc->mpr_flags & MPR_FLAGS_BUSY) {
+ return (EBUSY);
+ }
+
+ cm = mpr_alloc_command(sc);
+ if (cm == NULL) {
+ return (EBUSY);
+ }
+
+ req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ req->Function = MPI2_FUNCTION_CONFIG;
+ req->Action = params->action;
+ req->SGLFlags = 0;
+ req->ChainOffset = 0;
+ req->PageAddress = params->page_address;
+ if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
+
+ hdr = &params->hdr.Ext;
+ req->ExtPageType = hdr->ExtPageType;
+ req->ExtPageLength = hdr->ExtPageLength;
+ req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ req->Header.PageLength = 0; /* Must be set to zero */
+ req->Header.PageNumber = hdr->PageNumber;
+ req->Header.PageVersion = hdr->PageVersion;
+ } else {
+ MPI2_CONFIG_PAGE_HEADER *hdr;
+
+ hdr = &params->hdr.Struct;
+ req->Header.PageType = hdr->PageType;
+ req->Header.PageNumber = hdr->PageNumber;
+ req->Header.PageLength = hdr->PageLength;
+ req->Header.PageVersion = hdr->PageVersion;
+ }
+
+ cm->cm_data = params->buffer;
+ cm->cm_length = params->length;
+ cm->cm_sge = &req->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+
+ cm->cm_complete_data = params;
+ if (params->callback != NULL) {
+ cm->cm_complete = mpr_config_complete;
+ return (mpr_map_command(sc, cm));
+ } else {
+ error = mpr_wait_command(sc, cm, 0, CAN_SLEEP);
+ if (error) {
+ mpr_dprint(sc, MPR_FAULT,
+ "Error %d reading config page\n", error);
+ mpr_free_command(sc, cm);
+ return (error);
+ }
+ mpr_config_complete(sc, cm);
+ }
+
+ return (0);
+}
+
+int
+mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params)
+{
+ return (EINVAL);
+}
+
+static void
+mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_config_params *params;
+
+ MPR_FUNCTRACE(sc);
+ params = cm->cm_complete_data;
+
+ if (cm->cm_data != NULL) {
+ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
+ }
+
+ /*
+ * XXX KDM need to do more error recovery? This results in the
+ * device in question not getting probed.
+ */
+ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ params->status = MPI2_IOCSTATUS_BUSY;
+ goto done;
+ }
+
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (reply == NULL) {
+ params->status = MPI2_IOCSTATUS_BUSY;
+ goto done;
+ }
+ params->status = reply->IOCStatus;
+ if (params->hdr.Ext.ExtPageType != 0) {
+ params->hdr.Ext.ExtPageType = reply->ExtPageType;
+ params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
+ } else {
+ params->hdr.Struct.PageType = reply->Header.PageType;
+ params->hdr.Struct.PageNumber = reply->Header.PageNumber;
+ params->hdr.Struct.PageLength = reply->Header.PageLength;
+ params->hdr.Struct.PageVersion = reply->Header.PageVersion;
+ }
+
+done:
+ mpr_free_command(sc, cm);
+ if (params->callback != NULL)
+ params->callback(sc, params);
+
+ return;
+}
diff --git a/sys/dev/mpr/mpr_config.c b/sys/dev/mpr/mpr_config.c
new file mode 100644
index 0000000000000..125451608dee7
--- /dev/null
+++ b/sys/dev/mpr/mpr_config.c
@@ -0,0 +1,1302 @@
+/*-
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/sysctl.h>
+#include <sys/eventhandler.h>
+#include <sys/uio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_sas.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+
+/**
+ * mpr_config_get_ioc_pg8 - obtain ioc page 8
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_ioc_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2IOCPage8_t *config_page)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ MPI2_CONFIG_PAGE_IOC_8 *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ request->Header.PageNumber = 8;
+ request->Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ request->Header.PageNumber = 8;
+ request->Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc((cm->cm_length), M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length, (sizeof(Mpi2IOCPage8_t))));
+
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_get_iounit_pg8 - obtain iounit page 8
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_iounit_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2IOUnitPage8_t *config_page)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ MPI2_CONFIG_PAGE_IO_UNIT_8 *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ request->Header.PageNumber = 8;
+ request->Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ request->Header.PageNumber = 8;
+ request->Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc((cm->cm_length), M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ (sizeof(Mpi2IOUnitPage8_t))));
+
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_base_static_config_pages - static start of day config pages.
+ * @sc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpr_base_static_config_pages(struct mpr_softc *sc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ int retry;
+
+ retry = 0;
+ while (mpr_config_get_ioc_pg8(sc, &mpi_reply, &sc->ioc_pg8)) {
+ retry++;
+ if (retry > 5) {
+ /* We need to Handle this situation */
+ /*FIXME*/
+ break;
+ }
+ }
+ retry = 0;
+ while (mpr_config_get_iounit_pg8(sc, &mpi_reply, &sc->iounit_pg8)) {
+ retry++;
+ if (retry > 5) {
+ /* We need to Handle this situation */
+ /*FIXME*/
+ break;
+ }
+ }
+}
+
+/**
+ * mpr_config_get_dpm_pg0 - obtain driver persistent mapping page0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_dpm_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2DriverMappingPage0_t *config_page, u16 sz)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ Mpi2DriverMappingPage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ memset(config_page, 0, sz);
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->PageAddress = sc->max_dpm_entries <<
+ MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_NVRAM;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->PageAddress = sc->max_dpm_entries <<
+ MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ request->ExtPageLength = mpi_reply->ExtPageLength;
+ cm->cm_length = le16toh(request->ExtPageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO|M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length, sz));
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_set_dpm_pg0 - write an entry in driver persistent mapping page0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @entry_idx: entry index in DPM Page0 to be modified
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+int mpr_config_set_dpm_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2DriverMappingPage0_t *config_page, u16 entry_idx)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ /* We can remove below two lines ????*/
+ request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ request->PageAddress |= htole16(entry_idx);
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION;
+ request->ExtPageLength = mpi_reply->ExtPageLength;
+ request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT;
+ request->PageAddress |= htole16(entry_idx);
+ cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAOUT;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ bcopy(config_page, page, MIN(cm->cm_length,
+ (sizeof(Mpi2DriverMappingPage0_t))));
+ cm->cm_data = page;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request to write page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page written with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_get_sas_device_pg0 - obtain sas device page 0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_sas_device_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasDevicePage0_t *config_page, u32 form, u16 handle)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ Mpi2SasDevicePage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ request->ExtPageLength = mpi_reply->ExtPageLength;
+ request->PageAddress = htole32(form | handle);
+ cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ sizeof(Mpi2SasDevicePage0_t)));
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_get_bios_pg3 - obtain BIOS page 3
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_bios_pg3(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2BiosPage3_t *config_page)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ Mpi2BiosPage3_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ request->Header.PageNumber = 3;
+ request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ request->Header.PageNumber = 3;
+ request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2BiosPage3_t)));
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @page_address: form and handle value used to get page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_raid_volume_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ Mpi2RaidVolPage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+
+ /*
+ * This page must be polled because the IOC isn't ready yet when this
+ * page is needed.
+ */
+ error = mpr_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 0;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ request->PageAddress = page_address;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ /*
+ * This page must be polled because the IOC isn't ready yet when this
+ * page is needed.
+ */
+ error = mpr_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, cm->cm_length);
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_raid_volume_pg1(struct mpr_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, u16 handle)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ Mpi2RaidVolPage1_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 1;
+ request->Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ request->Header.PageNumber = 1;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ request->PageAddress = htole32(form | handle);
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ sizeof(Mpi2RaidVolPage1_t)));
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
+
+/**
+ * mpr_config_get_volume_wwid - returns wwid given the volume handle
+ * @sc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_volume_wwid(struct mpr_softc *sc, u16 volume_handle, u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mpr_config_get_raid_volume_pg1(sc, &mpi_reply, &raid_vol_pg1,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, volume_handle))) {
+ *wwid = le64toh((u64)raid_vol_pg1.WWID.High << 32 |
+ raid_vol_pg1.WWID.Low);
+ return 0;
+ } else
+ return -1;
+}
+
+/**
+ * mpr_config_get_pd_pg0 - obtain raid phys disk page 0
+ * @sc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @page_address: form and handle value used to get page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpr_config_get_raid_pd_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t *mpi_reply,
+ Mpi2RaidPhysDiskPage0_t *config_page, u32 page_address)
+{
+ MPI2_CONFIG_REQUEST *request;
+ MPI2_CONFIG_REPLY *reply;
+ struct mpr_command *cm;
+ Mpi2RaidPhysDiskPage0_t *page = NULL;
+ int error = 0;
+ u16 ioc_status;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ request->Header.PageNumber = 0;
+ request->Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = NULL;
+
+ /*
+ * This page must be polled because the IOC isn't ready yet when this
+ * page is needed.
+ */
+ error = mpr_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for header completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: header read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ /* We have to do free and alloc for the reply-free and reply-post
+ * counters to match - Need to review the reply FIFO handling.
+ */
+ mpr_free_command(sc, cm);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed @ line %d\n", __func__,
+ __LINE__);
+ error = EBUSY;
+ goto out;
+ }
+ request = (MPI2_CONFIG_REQUEST *)cm->cm_req;
+ bzero(request, sizeof(MPI2_CONFIG_REQUEST));
+ request->Function = MPI2_FUNCTION_CONFIG;
+ request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ request->Header.PageNumber = 0;
+ request->Header.PageLength = mpi_reply->Header.PageLength;
+ request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ request->PageAddress = page_address;
+ cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4;
+ cm->cm_sge = &request->PageBufferSGE;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ page = malloc(cm->cm_length, M_MPR, M_ZERO | M_NOWAIT);
+ if (!page) {
+ printf("%s: page alloc failed\n", __func__);
+ error = ENOMEM;
+ goto out;
+ }
+ cm->cm_data = page;
+
+ /*
+ * This page must be polled because the IOC isn't ready yet when this
+ * page is needed.
+ */
+ error = mpr_request_polled(sc, cm);
+ reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: poll for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY));
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ /* FIXME */
+ /* If the poll returns error then we need to do diag reset */
+ printf("%s: page read with error; iocstatus = 0x%x\n",
+ __func__, ioc_status);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(page, config_page, MIN(cm->cm_length,
+ sizeof(Mpi2RaidPhysDiskPage0_t)));
+out:
+ free(page, M_MPR);
+ if (cm)
+ mpr_free_command(sc, cm);
+ return (error);
+}
diff --git a/sys/dev/mpr/mpr_ioctl.h b/sys/dev/mpr/mpr_ioctl.h
new file mode 100644
index 0000000000000..5ec482f311b01
--- /dev/null
+++ b/sys/dev/mpr/mpr_ioctl.h
@@ -0,0 +1,386 @@
+/*-
+ * Copyright (c) 2008 Yahoo!, Inc.
+ * All rights reserved.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD userland interface
+ *
+ * $FreeBSD$
+ */
+/*-
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MPR_IOCTL_H_
+#define _MPR_IOCTL_H_
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_sas.h>
+
+/*
+ * For the read header requests, the header should include the page
+ * type or extended page type, page number, and page version. The
+ * buffer and length are unused. The completed header is returned in
+ * the 'header' member.
+ *
+ * For the read page and write page requests, 'buf' should point to a
+ * buffer of 'len' bytes which holds the entire page (including the
+ * header).
+ *
+ * All requests specify the page address in 'page_address'.
+ */
+struct mpr_cfg_page_req {
+ MPI2_CONFIG_PAGE_HEADER header;
+ uint32_t page_address;
+ void *buf;
+ int len;
+ uint16_t ioc_status;
+};
+
+struct mpr_ext_cfg_page_req {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER header;
+ uint32_t page_address;
+ void *buf;
+ int len;
+ uint16_t ioc_status;
+};
+
+struct mpr_raid_action {
+ uint8_t action;
+ uint8_t volume_bus;
+ uint8_t volume_id;
+ uint8_t phys_disk_num;
+ uint32_t action_data_word;
+ void *buf;
+ int len;
+ uint32_t volume_status;
+ uint32_t action_data[4];
+ uint16_t action_status;
+ uint16_t ioc_status;
+ uint8_t write;
+};
+
+struct mpr_usr_command {
+ void *req;
+ uint32_t req_len;
+ void *rpl;
+ uint32_t rpl_len;
+ void *buf;
+ int len;
+ uint32_t flags;
+};
+
+typedef struct mpr_pci_bits
+{
+ union {
+ struct {
+ uint32_t DeviceNumber :5;
+ uint32_t FunctionNumber :3;
+ uint32_t BusNumber :24;
+ } bits;
+ uint32_t AsDWORD;
+ } u;
+ uint32_t PciSegmentId;
+} mpr_pci_bits_t;
+
+/*
+ * The following is the MPRIOCTL_GET_ADAPTER_DATA data structure. This data
+ * structure is setup so that we hopefully are properly aligned for both
+ * 32-bit and 64-bit mode applications.
+ *
+ * Adapter Type - Value = 6 = SCSI Protocol through SAS-3 adapter
+ *
+ * MPI Port Number - The PCI Function number for this device
+ *
+ * PCI Device HW Id - The PCI device number for this device
+ *
+ */
+#define MPRIOCTL_ADAPTER_TYPE_SAS3 6
+typedef struct mpr_adapter_data
+{
+ uint32_t StructureLength;
+ uint32_t AdapterType;
+ uint32_t MpiPortNumber;
+ uint32_t PCIDeviceHwId;
+ uint32_t PCIDeviceHwRev;
+ uint32_t SubSystemId;
+ uint32_t SubsystemVendorId;
+ uint32_t Reserved1;
+ uint32_t MpiFirmwareVersion;
+ uint32_t BiosVersion;
+ uint8_t DriverVersion[32];
+ uint8_t Reserved2;
+ uint8_t ScsiId;
+ uint16_t Reserved3;
+ mpr_pci_bits_t PciInformation;
+} mpr_adapter_data_t;
+
+
+typedef struct mpr_update_flash
+{
+ uint64_t PtrBuffer;
+ uint32_t ImageChecksum;
+ uint32_t ImageOffset;
+ uint32_t ImageSize;
+ uint32_t ImageType;
+} mpr_update_flash_t;
+
+
+#define MPR_PASS_THRU_DIRECTION_NONE 0
+#define MPR_PASS_THRU_DIRECTION_READ 1
+#define MPR_PASS_THRU_DIRECTION_WRITE 2
+#define MPR_PASS_THRU_DIRECTION_BOTH 3
+
+typedef struct mpr_pass_thru
+{
+ uint64_t PtrRequest;
+ uint64_t PtrReply;
+ uint64_t PtrData;
+ uint32_t RequestSize;
+ uint32_t ReplySize;
+ uint32_t DataSize;
+ uint32_t DataDirection;
+ uint64_t PtrDataOut;
+ uint32_t DataOutSize;
+ uint32_t Timeout;
+} mpr_pass_thru_t;
+
+
+/*
+ * Event queue defines
+ */
+#define MPR_EVENT_QUEUE_SIZE (50) /* Max Events stored in driver */
+#define MPR_MAX_EVENT_DATA_LENGTH (48) /* Size of each event in Dwords */
+
+typedef struct mpr_event_query
+{
+ uint16_t Entries;
+ uint16_t Reserved;
+ uint32_t Types[4];
+} mpr_event_query_t;
+
+typedef struct mpr_event_enable
+{
+ uint32_t Types[4];
+} mpr_event_enable_t;
+
+/*
+ * Event record entry for ioctl.
+ */
+typedef struct mpr_event_entry
+{
+ uint32_t Type;
+ uint32_t Number;
+ uint32_t Data[MPR_MAX_EVENT_DATA_LENGTH];
+} mpr_event_entry_t;
+
+typedef struct mpr_event_report
+{
+ uint32_t Size;
+ uint64_t PtrEvents;
+} mpr_event_report_t;
+
+
+typedef struct mpr_pci_info
+{
+ uint32_t BusNumber;
+ uint8_t DeviceNumber;
+ uint8_t FunctionNumber;
+ uint16_t InterruptVector;
+ uint8_t PciHeader[256];
+} mpr_pci_info_t;
+
+
+typedef struct mpr_diag_action
+{
+ uint32_t Action;
+ uint32_t Length;
+ uint64_t PtrDiagAction;
+ uint32_t ReturnCode;
+} mpr_diag_action_t;
+
+#define MPR_FW_DIAGNOSTIC_UID_NOT_FOUND (0xFF)
+
+#define MPR_FW_DIAG_NEW (0x806E6577)
+
+#define MPR_FW_DIAG_TYPE_REGISTER (0x00000001)
+#define MPR_FW_DIAG_TYPE_UNREGISTER (0x00000002)
+#define MPR_FW_DIAG_TYPE_QUERY (0x00000003)
+#define MPR_FW_DIAG_TYPE_READ_BUFFER (0x00000004)
+#define MPR_FW_DIAG_TYPE_RELEASE (0x00000005)
+
+#define MPR_FW_DIAG_INVALID_UID (0x00000000)
+
+#define MPR_DIAG_SUCCESS 0
+#define MPR_DIAG_FAILURE 1
+
+#define MPR_FW_DIAG_ERROR_SUCCESS (0x00000000)
+#define MPR_FW_DIAG_ERROR_FAILURE (0x00000001)
+#define MPR_FW_DIAG_ERROR_INVALID_PARAMETER (0x00000002)
+#define MPR_FW_DIAG_ERROR_POST_FAILED (0x00000010)
+#define MPR_FW_DIAG_ERROR_INVALID_UID (0x00000011)
+#define MPR_FW_DIAG_ERROR_RELEASE_FAILED (0x00000012)
+#define MPR_FW_DIAG_ERROR_NO_BUFFER (0x00000013)
+#define MPR_FW_DIAG_ERROR_ALREADY_RELEASED (0x00000014)
+
+
+typedef struct mpr_fw_diag_register
+{
+ uint8_t ExtendedType;
+ uint8_t BufferType;
+ uint16_t ApplicationFlags;
+ uint32_t DiagnosticFlags;
+ uint32_t ProductSpecific[23];
+ uint32_t RequestedBufferSize;
+ uint32_t UniqueId;
+} mpr_fw_diag_register_t;
+
+typedef struct mpr_fw_diag_unregister
+{
+ uint32_t UniqueId;
+} mpr_fw_diag_unregister_t;
+
+#define MPR_FW_DIAG_FLAG_APP_OWNED (0x0001)
+#define MPR_FW_DIAG_FLAG_BUFFER_VALID (0x0002)
+#define MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS (0x0004)
+
+typedef struct mpr_fw_diag_query
+{
+ uint8_t ExtendedType;
+ uint8_t BufferType;
+ uint16_t ApplicationFlags;
+ uint32_t DiagnosticFlags;
+ uint32_t ProductSpecific[23];
+ uint32_t TotalBufferSize;
+ uint32_t DriverAddedBufferSize;
+ uint32_t UniqueId;
+} mpr_fw_diag_query_t;
+
+typedef struct mpr_fw_diag_release
+{
+ uint32_t UniqueId;
+} mpr_fw_diag_release_t;
+
+#define MPR_FW_DIAG_FLAG_REREGISTER (0x0001)
+#define MPR_FW_DIAG_FLAG_FORCE_RELEASE (0x0002)
+
+typedef struct mpr_diag_read_buffer
+{
+ uint8_t Status;
+ uint8_t Reserved;
+ uint16_t Flags;
+ uint32_t StartingOffset;
+ uint32_t BytesToRead;
+ uint32_t UniqueId;
+ uint64_t PtrDataBuffer;
+} mpr_diag_read_buffer_t;
+
+/*
+ * Register Access
+ */
+#define REG_IO_READ 1
+#define REG_IO_WRITE 2
+#define REG_MEM_READ 3
+#define REG_MEM_WRITE 4
+
+typedef struct mpr_reg_access
+{
+ uint32_t Command;
+ uint32_t RegOffset;
+ uint32_t RegData;
+} mpr_reg_access_t;
+
+typedef struct mpr_btdh_mapping
+{
+ uint16_t TargetID;
+ uint16_t Bus;
+ uint16_t DevHandle;
+ uint16_t Reserved;
+} mpr_btdh_mapping_t;
+
+#define MPRIO_MPR_COMMAND_FLAG_VERBOSE 0x01
+#define MPRIO_MPR_COMMAND_FLAG_DEBUG 0x02
+#define MPRIO_READ_CFG_HEADER _IOWR('M', 200, struct mpr_cfg_page_req)
+#define MPRIO_READ_CFG_PAGE _IOWR('M', 201, struct mpr_cfg_page_req)
+#define MPRIO_READ_EXT_CFG_HEADER _IOWR('M', 202, struct mpr_ext_cfg_page_req)
+#define MPRIO_READ_EXT_CFG_PAGE _IOWR('M', 203, struct mpr_ext_cfg_page_req)
+#define MPRIO_WRITE_CFG_PAGE _IOWR('M', 204, struct mpr_cfg_page_req)
+#define MPRIO_RAID_ACTION _IOWR('M', 205, struct mpr_raid_action)
+#define MPRIO_MPR_COMMAND _IOWR('M', 210, struct mpr_usr_command)
+
+#define MPTIOCTL ('I')
+#define MPTIOCTL_GET_ADAPTER_DATA _IOWR(MPTIOCTL, 1,\
+ struct mpr_adapter_data)
+#define MPTIOCTL_UPDATE_FLASH _IOWR(MPTIOCTL, 2,\
+ struct mpr_update_flash)
+#define MPTIOCTL_RESET_ADAPTER _IO(MPTIOCTL, 3)
+#define MPTIOCTL_PASS_THRU _IOWR(MPTIOCTL, 4,\
+ struct mpr_pass_thru)
+#define MPTIOCTL_EVENT_QUERY _IOWR(MPTIOCTL, 5,\
+ struct mpr_event_query)
+#define MPTIOCTL_EVENT_ENABLE _IOWR(MPTIOCTL, 6,\
+ struct mpr_event_enable)
+#define MPTIOCTL_EVENT_REPORT _IOWR(MPTIOCTL, 7,\
+ struct mpr_event_report)
+#define MPTIOCTL_GET_PCI_INFO _IOWR(MPTIOCTL, 8,\
+ struct mpr_pci_info)
+#define MPTIOCTL_DIAG_ACTION _IOWR(MPTIOCTL, 9,\
+ struct mpr_diag_action)
+#define MPTIOCTL_REG_ACCESS _IOWR(MPTIOCTL, 10,\
+ struct mpr_reg_access)
+#define MPTIOCTL_BTDH_MAPPING _IOWR(MPTIOCTL, 11,\
+ struct mpr_btdh_mapping)
+
+#endif /* !_MPR_IOCTL_H_ */
diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c
new file mode 100644
index 0000000000000..7f0fc00a9c52a
--- /dev/null
+++ b/sys/dev/mpr/mpr_mapping.c
@@ -0,0 +1,2269 @@
+/*-
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/sysctl.h>
+#include <sys/eventhandler.h>
+#include <sys/uio.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_sas.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+#include <dev/mpr/mpr_mapping.h>
+
+/**
+ * _mapping_clear_entry - Clear a particular mapping entry.
+ * @map_entry: map table entry
+ *
+ * Returns nothing.
+ */
+static inline void
+_mapping_clear_map_entry(struct dev_mapping_table *map_entry)
+{
+ map_entry->physical_id = 0;
+ map_entry->device_info = 0;
+ map_entry->phy_bits = 0;
+ map_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
+ map_entry->dev_handle = 0;
+ map_entry->channel = -1;
+ map_entry->id = -1;
+ map_entry->missing_count = 0;
+ map_entry->init_complete = 0;
+ map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
+}
+
+/**
+ * _mapping_clear_enc_entry - Clear a particular enclosure table entry.
+ * @enc_entry: enclosure table entry
+ *
+ * Returns nothing.
+ */
+static inline void
+_mapping_clear_enc_entry(struct enc_mapping_table *enc_entry)
+{
+ enc_entry->enclosure_id = 0;
+ enc_entry->start_index = MPR_MAPTABLE_BAD_IDX;
+ enc_entry->phy_bits = 0;
+ enc_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
+ enc_entry->enc_handle = 0;
+ enc_entry->num_slots = 0;
+ enc_entry->start_slot = 0;
+ enc_entry->missing_count = 0;
+ enc_entry->removal_flag = 0;
+ enc_entry->skip_search = 0;
+ enc_entry->init_complete = 0;
+}
+
+/**
+ * _mapping_commit_enc_entry - write a particular enc entry in DPM page0.
+ * @sc: per adapter object
+ * @enc_entry: enclosure table entry
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_mapping_commit_enc_entry(struct mpr_softc *sc,
+ struct enc_mapping_table *et_entry)
+{
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ struct dev_mapping_table *mt_entry;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2DriverMappingPage0_t config_page;
+
+ if (!sc->is_dpm_enable)
+ return 0;
+
+ memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
+ memcpy(&config_page.Header, (u8 *) sc->dpm_pg0,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += et_entry->dpm_entry_num;
+ dpm_entry->PhysicalIdentifier.Low =
+ ( 0xFFFFFFFF & et_entry->enclosure_id);
+ dpm_entry->PhysicalIdentifier.High =
+ ( et_entry->enclosure_id >> 32);
+ mt_entry = &sc->mapping_table[et_entry->start_index];
+ dpm_entry->DeviceIndex = htole16(mt_entry->id);
+ dpm_entry->MappingInformation = et_entry->num_slots;
+ dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ dpm_entry->MappingInformation |= et_entry->missing_count;
+ dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation);
+ dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits);
+ dpm_entry->Reserved1 = 0;
+
+ memcpy(&config_page.Entry, (u8 *)dpm_entry,
+ sizeof(Mpi2DriverMap0Entry_t));
+ if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
+ et_entry->dpm_entry_num)) {
+ printf("%s: write of dpm entry %d for enclosure failed\n",
+ __func__, et_entry->dpm_entry_num);
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping =
+ le32toh(dpm_entry->PhysicalBitsMapping);
+ return -1;
+ }
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping =
+ le32toh(dpm_entry->PhysicalBitsMapping);
+ return 0;
+}
+
+/**
+ * _mapping_commit_map_entry - write a particular map table entry in DPM page0.
+ * @sc: per adapter object
+ * @enc_entry: enclosure table entry
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+static int
+_mapping_commit_map_entry(struct mpr_softc *sc,
+ struct dev_mapping_table *mt_entry)
+{
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2DriverMappingPage0_t config_page;
+
+ if (!sc->is_dpm_enable)
+ return 0;
+
+ memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
+ memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = dpm_entry + mt_entry->dpm_entry_num;
+ dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF &
+ mt_entry->physical_id);
+ dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32);
+ dpm_entry->DeviceIndex = htole16(mt_entry->id);
+ dpm_entry->MappingInformation = htole16(mt_entry->missing_count);
+ dpm_entry->PhysicalBitsMapping = 0;
+ dpm_entry->Reserved1 = 0;
+ dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation);
+ memcpy(&config_page.Entry, (u8 *)dpm_entry,
+ sizeof(Mpi2DriverMap0Entry_t));
+ if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
+ mt_entry->dpm_entry_num)) {
+ printf("%s: write of dpm entry %d for device failed\n",
+ __func__, mt_entry->dpm_entry_num);
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ return -1;
+ }
+
+ dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ return 0;
+}
+
+/**
+ * _mapping_get_ir_maprange - get start and end index for IR map range.
+ * @sc: per adapter object
+ * @start_idx: place holder for start index
+ * @end_idx: place holder for end index
+ *
+ * The IR volumes can be mapped either at start or end of the mapping table
+ * this function gets the detail of where IR volume mapping starts and ends
+ * in the device mapping table
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_get_ir_maprange(struct mpr_softc *sc, u32 *start_idx, u32 *end_idx)
+{
+ u16 volume_mapping_flags;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ *start_idx = 0;
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ *start_idx = 1;
+ } else
+ *start_idx = sc->max_devices - sc->max_volumes;
+ *end_idx = *start_idx + sc->max_volumes - 1;
+}
+
+/**
+ * _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID
+ * @sc: per adapter object
+ * @enc_id: enclosure logical identifier
+ *
+ * Returns the index of enclosure entry on success or bad index.
+ */
+static u8
+_mapping_get_enc_idx_from_id(struct mpr_softc *sc, u64 enc_id,
+ u64 phy_bits)
+{
+ struct enc_mapping_table *et_entry;
+ u8 enc_idx = 0;
+
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if ((et_entry->enclosure_id == le64toh(enc_id)) &&
+ (!et_entry->phy_bits || (et_entry->phy_bits &
+ le32toh(phy_bits))))
+ return enc_idx;
+ }
+ return MPR_ENCTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_enc_idx_from_handle - get enclosure index from handle
+ * @sc: per adapter object
+ * @enc_id: enclosure handle
+ *
+ * Returns the index of enclosure entry on success or bad index.
+ */
+static u8
+_mapping_get_enc_idx_from_handle(struct mpr_softc *sc, u16 handle)
+{
+ struct enc_mapping_table *et_entry;
+ u8 enc_idx = 0;
+
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->missing_count)
+ continue;
+ if (et_entry->enc_handle == handle)
+ return enc_idx;
+ }
+ return MPR_ENCTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_high_missing_et_idx - get missing enclosure index
+ * @sc: per adapter object
+ *
+ * Search through the enclosure table and identifies the enclosure entry
+ * with high missing count and returns it's index
+ *
+ * Returns the index of enclosure entry on success or bad index.
+ */
+static u8
+_mapping_get_high_missing_et_idx(struct mpr_softc *sc)
+{
+ struct enc_mapping_table *et_entry;
+ u8 high_missing_count = 0;
+ u8 enc_idx, high_idx = MPR_ENCTABLE_BAD_IDX;
+
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if ((et_entry->missing_count > high_missing_count) &&
+ !et_entry->skip_search) {
+ high_missing_count = et_entry->missing_count;
+ high_idx = enc_idx;
+ }
+ }
+ return high_idx;
+}
+
+/**
+ * _mapping_get_high_missing_mt_idx - get missing map table index
+ * @sc: per adapter object
+ *
+ * Search through the map table and identifies the device entry
+ * with high missing count and returns it's index
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_high_missing_mt_idx(struct mpr_softc *sc)
+{
+ u32 map_idx, high_idx = MPR_ENCTABLE_BAD_IDX;
+ u8 high_missing_count = 0;
+ u32 start_idx, end_idx, start_idx_ir = 0, end_idx_ir;
+ struct dev_mapping_table *mt_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ start_idx = 0;
+ end_idx = sc->max_devices;
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ start_idx = 1;
+ if (sc->ir_firmware) {
+ _mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir);
+ if (start_idx == start_idx_ir)
+ start_idx = end_idx_ir + 1;
+ else
+ end_idx = start_idx_ir;
+ }
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) {
+ if (mt_entry->missing_count > high_missing_count) {
+ high_missing_count = mt_entry->missing_count;
+ high_idx = map_idx;
+ }
+ }
+ return high_idx;
+}
+
+/**
+ * _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID
+ * @sc: per adapter object
+ * @wwid: world wide unique ID of the volume
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_ir_mt_idx_from_wwid(struct mpr_softc *sc, u64 wwid)
+{
+ u32 start_idx, end_idx, map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
+ if (mt_entry->physical_id == wwid)
+ return map_idx;
+
+ return MPR_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_mt_idx_from_id - get map table index from a device ID
+ * @sc: per adapter object
+ * @dev_id: device identifer (SAS Address)
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_mt_idx_from_id(struct mpr_softc *sc, u64 dev_id)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->physical_id == dev_id)
+ return map_idx;
+ }
+ return MPR_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle
+ * @sc: per adapter object
+ * @wwid: volume device handle
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_ir_mt_idx_from_handle(struct mpr_softc *sc, u16 volHandle)
+{
+ u32 start_idx, end_idx, map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
+ if (mt_entry->dev_handle == volHandle)
+ return map_idx;
+
+ return MPR_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_mt_idx_from_handle - get map table index from handle
+ * @sc: per adapter object
+ * @dev_id: device handle
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_mt_idx_from_handle(struct mpr_softc *sc, u16 handle)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle == handle)
+ return map_idx;
+ }
+ return MPR_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_free_ir_mt_idx - get first free index for a volume
+ * @sc: per adapter object
+ *
+ * Search through mapping table for free index for a volume and if no free
+ * index then looks for a volume with high mapping index
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_free_ir_mt_idx(struct mpr_softc *sc)
+{
+ u8 high_missing_count = 0;
+ u32 start_idx, end_idx, map_idx;
+ u32 high_idx = MPR_MAPTABLE_BAD_IDX;
+ struct dev_mapping_table *mt_entry;
+
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++)
+ if (!(mt_entry->device_info & MPR_MAP_IN_USE))
+ return map_idx;
+
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) {
+ if (mt_entry->missing_count > high_missing_count) {
+ high_missing_count = mt_entry->missing_count;
+ high_idx = map_idx;
+ }
+ }
+ return high_idx;
+}
+
+/**
+ * _mapping_get_free_mt_idx - get first free index for a device
+ * @sc: per adapter object
+ * @start_idx: offset in the table to start search
+ *
+ * Returns the index of map table entry on success or bad index.
+ */
+static u32
+_mapping_get_free_mt_idx(struct mpr_softc *sc, u32 start_idx)
+{
+ u32 map_idx, max_idx = sc->max_devices;
+ struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx];
+ u16 volume_mapping_flags;
+
+ volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (sc->ir_firmware && (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING))
+ max_idx -= sc->max_volumes;
+ for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++)
+ if (!(mt_entry->device_info & (MPR_MAP_IN_USE |
+ MPR_DEV_RESERVED)))
+ return map_idx;
+
+ return MPR_MAPTABLE_BAD_IDX;
+}
+
+/**
+ * _mapping_get_dpm_idx_from_id - get DPM index from ID
+ * @sc: per adapter object
+ * @id: volume WWID or enclosure ID or device ID
+ *
+ * Returns the index of DPM entry on success or bad index.
+ */
+static u16
+_mapping_get_dpm_idx_from_id(struct mpr_softc *sc, u64 id, u32 phy_bits)
+{
+ u16 entry_num;
+ uint64_t PhysicalIdentifier;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High;
+ PhysicalIdentifier = (PhysicalIdentifier << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
+ dpm_entry++)
+ if ((id == PhysicalIdentifier) &&
+ (!phy_bits || !dpm_entry->PhysicalBitsMapping ||
+ (phy_bits & dpm_entry->PhysicalBitsMapping)))
+ return entry_num;
+
+ return MPR_DPM_BAD_IDX;
+}
+
+
+/**
+ * _mapping_get_free_dpm_idx - get first available DPM index
+ * @sc: per adapter object
+ *
+ * Returns the index of DPM entry on success or bad index.
+ */
+static u32
+_mapping_get_free_dpm_idx(struct mpr_softc *sc)
+{
+ u16 entry_num;
+
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
+ if (!sc->dpm_entry_used[entry_num])
+ return entry_num;
+ }
+ return MPR_DPM_BAD_IDX;
+}
+
+/**
+ * _mapping_update_ir_missing_cnt - Updates missing count for a volume
+ * @sc: per adapter object
+ * @map_idx: map table index of the volume
+ * @element: IR configuration change element
+ * @wwid: IR volume ID.
+ *
+ * Updates the missing count in the map table and in the DPM entry for a volume
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_update_ir_missing_cnt(struct mpr_softc *sc, u32 map_idx,
+ Mpi2EventIrConfigElement_t *element, u64 wwid)
+{
+ struct dev_mapping_table *mt_entry;
+ u8 missing_cnt, reason = element->ReasonCode;
+ u16 dpm_idx;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+
+ if (!sc->is_dpm_enable)
+ return;
+ mt_entry = &sc->mapping_table[map_idx];
+ if (reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) {
+ mt_entry->missing_count = 0;
+ } else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
+ mt_entry->missing_count = 0;
+ mt_entry->init_complete = 0;
+ } else if ((reason == MPI2_EVENT_IR_CHANGE_RC_REMOVED) ||
+ (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)) {
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT)
+ mt_entry->missing_count++;
+ else
+ mt_entry->init_complete = 1;
+ }
+ if (!mt_entry->missing_count)
+ mt_entry->missing_count++;
+ mt_entry->dev_handle = 0;
+ }
+
+ dpm_idx = mt_entry->dpm_entry_num;
+ if (dpm_idx == MPR_DPM_BAD_IDX) {
+ if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) ||
+ (reason == MPI2_EVENT_IR_CHANGE_RC_REMOVED))
+ dpm_idx = _mapping_get_dpm_idx_from_id(sc,
+ mt_entry->physical_id, 0);
+ else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED)
+ return;
+ }
+ if (dpm_idx != MPR_DPM_BAD_IDX) {
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += dpm_idx;
+ missing_cnt = dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
+ if ((mt_entry->physical_id ==
+ le64toh((u64)dpm_entry->PhysicalIdentifier.High |
+ dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt ==
+ mt_entry->missing_count))
+ mt_entry->init_complete = 1;
+ } else {
+ dpm_idx = _mapping_get_free_dpm_idx(sc);
+ mt_entry->init_complete = 0;
+ }
+
+ if ((dpm_idx != MPR_DPM_BAD_IDX) && !mt_entry->init_complete) {
+ mt_entry->init_complete = 1;
+ mt_entry->dpm_entry_num = dpm_idx;
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += dpm_idx;
+ dpm_entry->PhysicalIdentifier.Low =
+ (0xFFFFFFFF & mt_entry->physical_id);
+ dpm_entry->PhysicalIdentifier.High =
+ (mt_entry->physical_id >> 32);
+ dpm_entry->DeviceIndex = map_idx;
+ dpm_entry->MappingInformation = mt_entry->missing_count;
+ dpm_entry->PhysicalBitsMapping = 0;
+ dpm_entry->Reserved1 = 0;
+ sc->dpm_flush_entry[dpm_idx] = 1;
+ sc->dpm_entry_used[dpm_idx] = 1;
+ } else if (dpm_idx == MPR_DPM_BAD_IDX) {
+ printf("%s: no space to add entry in DPM table\n", __func__);
+ mt_entry->init_complete = 1;
+ }
+}
+
+/**
+ * _mapping_add_to_removal_table - mark an entry for removal
+ * @sc: per adapter object
+ * @handle: Handle of enclosures/device/volume
+ *
+ * Adds the handle or DPM entry number in removal table.
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_add_to_removal_table(struct mpr_softc *sc, u16 handle,
+ u16 dpm_idx)
+{
+ struct map_removal_table *remove_entry;
+ u32 i;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ remove_entry = sc->removal_table;
+
+ for (i = 0; i < sc->max_devices; i++, remove_entry++) {
+ if (remove_entry->dev_handle || remove_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX)
+ continue;
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ if (dpm_idx)
+ remove_entry->dpm_entry_num = dpm_idx;
+ if (remove_entry->dpm_entry_num == MPR_DPM_BAD_IDX)
+ remove_entry->dev_handle = handle;
+ } else if ((ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING)
+ remove_entry->dev_handle = handle;
+ break;
+ }
+
+}
+
+/**
+ * _mapping_update_missing_count - Update missing count for a device
+ * @sc: per adapter object
+ * @topo_change: Topology change event entry
+ *
+ * Search through the topology change list and if any device is found not
+ * responding it's associated map table entry and DPM entry is updated
+ *
+ * Returns nothing.
+ */
+static void
+_mapping_update_missing_count(struct mpr_softc *sc,
+ struct _map_topology_change *topo_change)
+{
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u8 entry;
+ struct _map_phy_change *phy_change;
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+
+ for (entry = 0; entry < topo_change->num_entries; entry++) {
+ phy_change = &topo_change->phy_details[entry];
+ if (!phy_change->dev_handle || (phy_change->reason !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change->
+ dev_handle);
+ phy_change->is_processed = 1;
+ if (map_idx == MPR_MAPTABLE_BAD_IDX) {
+ printf("%s: device is already removed from mapping "
+ "table\n", __func__);
+ continue;
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT)
+ mt_entry->missing_count++;
+ else
+ mt_entry->init_complete = 1;
+ }
+ if (!mt_entry->missing_count)
+ mt_entry->missing_count++;
+ _mapping_add_to_removal_table(sc, mt_entry->dev_handle, 0);
+ mt_entry->dev_handle = 0;
+
+ if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) &&
+ sc->is_dpm_enable && !mt_entry->init_complete &&
+ mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
+ dpm_entry =
+ (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += mt_entry->dpm_entry_num;
+ dpm_entry->MappingInformation = mt_entry->missing_count;
+ sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1;
+ }
+ mt_entry->init_complete = 1;
+ }
+}
+
+/**
+ * _mapping_find_enc_map_space -find map table entries for enclosure
+ * @sc: per adapter object
+ * @et_entry: enclosure entry
+ *
+ * Search through the mapping table defragment it and provide contiguous
+ * space in map table for a particular enclosure entry
+ *
+ * Returns start index in map table or bad index.
+ */
+static u32
+_mapping_find_enc_map_space(struct mpr_softc *sc,
+ struct enc_mapping_table *et_entry)
+{
+ u16 vol_mapping_flags;
+ u32 skip_count, end_of_table, map_idx, enc_idx;
+ u16 num_found;
+ u32 start_idx = MPR_MAPTABLE_BAD_IDX;
+ struct dev_mapping_table *mt_entry;
+ struct enc_mapping_table *enc_entry;
+ unsigned char done_flag = 0, found_space;
+ u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
+
+ skip_count = sc->num_rsvd_entries;
+ num_found = 0;
+
+ vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+
+ if (!sc->ir_firmware)
+ end_of_table = sc->max_devices;
+ else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)
+ end_of_table = sc->max_devices;
+ else
+ end_of_table = sc->max_devices - sc->max_volumes;
+
+ for (map_idx = (max_num_phy_ids + skip_count);
+ map_idx < end_of_table; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if ((et_entry->enclosure_id == mt_entry->physical_id) &&
+ (!mt_entry->phy_bits || (mt_entry->phy_bits &
+ et_entry->phy_bits))) {
+ num_found += 1;
+ if (num_found == et_entry->num_slots) {
+ start_idx = (map_idx - num_found) + 1;
+ return start_idx;
+ }
+ } else
+ num_found = 0;
+ }
+ for (map_idx = (max_num_phy_ids + skip_count);
+ map_idx < end_of_table; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (!(mt_entry->device_info & MPR_DEV_RESERVED)) {
+ num_found += 1;
+ if (num_found == et_entry->num_slots) {
+ start_idx = (map_idx - num_found) + 1;
+ return start_idx;
+ }
+ } else
+ num_found = 0;
+ }
+
+ while (!done_flag) {
+ enc_idx = _mapping_get_high_missing_et_idx(sc);
+ if (enc_idx == MPR_ENCTABLE_BAD_IDX)
+ return MPR_MAPTABLE_BAD_IDX;
+ enc_entry = &sc->enclosure_table[enc_idx];
+ /*VSP FIXME*/
+ enc_entry->skip_search = 1;
+ mt_entry = &sc->mapping_table[enc_entry->start_index];
+ for (map_idx = enc_entry->start_index; map_idx <
+ (enc_entry->start_index + enc_entry->num_slots); map_idx++,
+ mt_entry++)
+ mt_entry->device_info &= ~MPR_DEV_RESERVED;
+ found_space = 0;
+ for (map_idx = (max_num_phy_ids +
+ skip_count); map_idx < end_of_table; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (!(mt_entry->device_info & MPR_DEV_RESERVED)) {
+ num_found += 1;
+ if (num_found == et_entry->num_slots) {
+ start_idx = (map_idx - num_found) + 1;
+ found_space = 1;
+ }
+ } else
+ num_found = 0;
+ }
+
+ if (!found_space)
+ continue;
+ for (map_idx = start_idx; map_idx < (start_idx + num_found);
+ map_idx++) {
+ enc_entry = sc->enclosure_table;
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
+ enc_idx++, enc_entry++) {
+ if (map_idx < enc_entry->start_index ||
+ map_idx > (enc_entry->start_index +
+ enc_entry->num_slots))
+ continue;
+ if (!enc_entry->removal_flag) {
+ enc_entry->removal_flag = 1;
+ _mapping_add_to_removal_table(sc, 0,
+ enc_entry->dpm_entry_num);
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->device_info &
+ MPR_MAP_IN_USE) {
+ _mapping_add_to_removal_table(sc,
+ mt_entry->dev_handle, 0);
+ _mapping_clear_map_entry(mt_entry);
+ }
+ if (map_idx == (enc_entry->start_index +
+ enc_entry->num_slots - 1))
+ _mapping_clear_enc_entry(et_entry);
+ }
+ }
+ enc_entry = sc->enclosure_table;
+ for (enc_idx = 0; enc_idx < sc->num_enc_table_entries;
+ enc_idx++, enc_entry++) {
+ if (!enc_entry->removal_flag) {
+ mt_entry = &sc->mapping_table[enc_entry->
+ start_index];
+ for (map_idx = enc_entry->start_index; map_idx <
+ (enc_entry->start_index +
+ enc_entry->num_slots); map_idx++,
+ mt_entry++)
+ mt_entry->device_info |=
+ MPR_DEV_RESERVED;
+ et_entry->skip_search = 0;
+ }
+ }
+ done_flag = 1;
+ }
+ return start_idx;
+}
+
+/**
+ * _mapping_get_dev_info -get information about newly added devices
+ * @sc: per adapter object
+ * @topo_change: Topology change event entry
+ *
+ * Search through the topology change event list and issues sas device pg0
+ * requests for the newly added device and reserved entries in tables
+ *
+ * Returns nothing
+ */
+static void
+_mapping_get_dev_info(struct mpr_softc *sc,
+ struct _map_topology_change *topo_change)
+{
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u8 entry, enc_idx, phy_idx;
+ u32 map_idx, index, device_info;
+ struct _map_phy_change *phy_change, *tmp_phy_change;
+ uint64_t sas_address;
+ struct enc_mapping_table *et_entry;
+ struct dev_mapping_table *mt_entry;
+ u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED;
+ int rc;
+
+ for (entry = 0; entry < topo_change->num_entries; entry++) {
+ phy_change = &topo_change->phy_details[entry];
+ if (phy_change->is_processed || !phy_change->dev_handle ||
+ phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED)
+ continue;
+ if (mpr_config_get_sas_device_pg0(sc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ phy_change->dev_handle)) {
+ phy_change->is_processed = 1;
+ continue;
+ }
+
+ device_info = le32toh(sas_device_pg0.DeviceInfo);
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) {
+ rc = mprsas_get_sas_address_for_sata_disk(sc,
+ &sas_address, phy_change->dev_handle,
+ device_info);
+ if (rc) {
+ printf("%s: failed to compute the "
+ "hashed SAS Address for SATA "
+ "device with handle 0x%04x\n",
+ __func__, phy_change->dev_handle);
+ sas_address =
+ sas_device_pg0.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ sas_device_pg0.SASAddress.Low;
+ }
+ mpr_dprint(sc, MPR_INFO, "SAS Address for SATA "
+ "device = %jx\n", sas_address);
+ } else {
+ sas_address =
+ sas_device_pg0.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ sas_device_pg0.SASAddress.Low;
+ }
+ } else {
+ sas_address = sas_device_pg0.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ sas_device_pg0.SASAddress.Low;
+ }
+ phy_change->physical_id = sas_address;
+ phy_change->slot = le16toh(sas_device_pg0.Slot);
+ phy_change->device_info =
+ le32toh(sas_device_pg0.DeviceInfo);
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ enc_idx = _mapping_get_enc_idx_from_handle(sc,
+ topo_change->enc_handle);
+ if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because the enclosure is "
+ "not in the mapping table\n", __func__,
+ phy_change->dev_handle);
+ continue;
+ }
+ if (!((phy_change->device_info &
+ MPI2_SAS_DEVICE_INFO_END_DEVICE) &&
+ (phy_change->device_info &
+ (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
+ MPI2_SAS_DEVICE_INFO_STP_TARGET |
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) {
+ phy_change->is_processed = 1;
+ continue;
+ }
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->start_index != MPR_MAPTABLE_BAD_IDX)
+ continue;
+ if (!topo_change->exp_handle) {
+ map_idx = sc->num_rsvd_entries;
+ et_entry->start_index = map_idx;
+ } else {
+ map_idx = _mapping_find_enc_map_space(sc,
+ et_entry);
+ et_entry->start_index = map_idx;
+ if (et_entry->start_index ==
+ MPR_MAPTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ for (phy_idx = 0; phy_idx <
+ topo_change->num_entries;
+ phy_idx++) {
+ tmp_phy_change =
+ &topo_change->phy_details
+ [phy_idx];
+ if (tmp_phy_change->reason ==
+ add_code)
+ tmp_phy_change->
+ is_processed = 1;
+ }
+ break;
+ }
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ for (index = map_idx; index < (et_entry->num_slots
+ + map_idx); index++, mt_entry++) {
+ mt_entry->device_info = MPR_DEV_RESERVED;
+ mt_entry->physical_id = et_entry->enclosure_id;
+ mt_entry->phy_bits = et_entry->phy_bits;
+ }
+ }
+ }
+}
+
+/**
+ * _mapping_set_mid_to_eid -set map table data from enclosure table
+ * @sc: per adapter object
+ * @et_entry: enclosure entry
+ *
+ * Returns nothing
+ */
+static inline void
+_mapping_set_mid_to_eid(struct mpr_softc *sc,
+ struct enc_mapping_table *et_entry)
+{
+ struct dev_mapping_table *mt_entry;
+ u16 slots = et_entry->num_slots, map_idx;
+ u32 start_idx = et_entry->start_index;
+ if (start_idx != MPR_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[start_idx];
+ for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++)
+ mt_entry->physical_id = et_entry->enclosure_id;
+ }
+}
+
+/**
+ * _mapping_clear_removed_entries - mark the entries to be cleared
+ * @sc: per adapter object
+ *
+ * Search through the removal table and mark the entries which needs to be
+ * flushed to DPM and also updates the map table and enclosure table by
+ * clearing the corresponding entries.
+ *
+ * Returns nothing
+ */
+static void
+_mapping_clear_removed_entries(struct mpr_softc *sc)
+{
+ u32 remove_idx;
+ struct map_removal_table *remove_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ u8 done_flag = 0, num_entries, m, i;
+ struct enc_mapping_table *et_entry, *from, *to;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ if (sc->is_dpm_enable) {
+ remove_entry = sc->removal_table;
+ for (remove_idx = 0; remove_idx < sc->max_devices;
+ remove_idx++, remove_entry++) {
+ if (remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
+ dpm_entry = (Mpi2DriverMap0Entry_t *)
+ ((u8 *) sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += remove_entry->dpm_entry_num;
+ dpm_entry->PhysicalIdentifier.Low = 0;
+ dpm_entry->PhysicalIdentifier.High = 0;
+ dpm_entry->DeviceIndex = 0;
+ dpm_entry->MappingInformation = 0;
+ dpm_entry->PhysicalBitsMapping = 0;
+ sc->dpm_flush_entry[remove_entry->
+ dpm_entry_num] = 1;
+ sc->dpm_entry_used[remove_entry->dpm_entry_num]
+ = 0;
+ remove_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
+ }
+ }
+ }
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ num_entries = sc->num_enc_table_entries;
+ while (!done_flag) {
+ done_flag = 1;
+ et_entry = sc->enclosure_table;
+ for (i = 0; i < num_entries; i++, et_entry++) {
+ if (!et_entry->enc_handle && et_entry->
+ init_complete) {
+ done_flag = 0;
+ if (i != (num_entries - 1)) {
+ from = &sc->enclosure_table
+ [i+1];
+ to = &sc->enclosure_table[i];
+ for (m = i; m < (num_entries -
+ 1); m++, from++, to++) {
+ _mapping_set_mid_to_eid
+ (sc, to);
+ *to = *from;
+ }
+ _mapping_clear_enc_entry(to);
+ sc->num_enc_table_entries--;
+ num_entries =
+ sc->num_enc_table_entries;
+ } else {
+ _mapping_clear_enc_entry
+ (et_entry);
+ sc->num_enc_table_entries--;
+ num_entries =
+ sc->num_enc_table_entries;
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * _mapping_add_new_device -Add the new device into mapping table
+ * @sc: per adapter object
+ * @topo_change: Topology change event entry
+ *
+ * Search through the topology change event list and updates map table,
+ * enclosure table and DPM pages for for the newly added devices.
+ *
+ * Returns nothing
+ */
+static void
+_mapping_add_new_device(struct mpr_softc *sc,
+ struct _map_topology_change *topo_change)
+{
+ u8 enc_idx, missing_cnt, is_removed = 0;
+ u16 dpm_idx;
+ u32 search_idx, map_idx;
+ u32 entry;
+ struct dev_mapping_table *mt_entry;
+ struct enc_mapping_table *et_entry;
+ struct _map_phy_change *phy_change;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ uint64_t temp64_var;
+ u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER);
+ u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
+
+ for (entry = 0; entry < topo_change->num_entries; entry++) {
+ phy_change = &topo_change->phy_details[entry];
+ if (phy_change->is_processed)
+ continue;
+ if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED ||
+ !phy_change->dev_handle) {
+ phy_change->is_processed = 1;
+ continue;
+ }
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ enc_idx = _mapping_get_enc_idx_from_handle
+ (sc, topo_change->enc_handle);
+ if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because the enclosure is "
+ "not in the mapping table\n", __func__,
+ phy_change->dev_handle);
+ continue;
+ }
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) {
+ phy_change->is_processed = 1;
+ if (!sc->mt_full_retry) {
+ sc->mt_add_device_failed = 1;
+ continue;
+ }
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because there is no free "
+ "space available in the mapping table\n",
+ __func__, phy_change->dev_handle);
+ continue;
+ }
+ map_idx = et_entry->start_index + phy_change->slot -
+ et_entry->start_slot;
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->physical_id = phy_change->physical_id;
+ mt_entry->channel = 0;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = phy_change->dev_handle;
+ mt_entry->missing_count = 0;
+ mt_entry->dpm_entry_num = et_entry->dpm_entry_num;
+ mt_entry->device_info = phy_change->device_info |
+ (MPR_DEV_RESERVED | MPR_MAP_IN_USE);
+ if (sc->is_dpm_enable) {
+ dpm_idx = et_entry->dpm_entry_num;
+ if (dpm_idx == MPR_DPM_BAD_IDX)
+ dpm_idx = _mapping_get_dpm_idx_from_id
+ (sc, et_entry->enclosure_id,
+ et_entry->phy_bits);
+ if (dpm_idx == MPR_DPM_BAD_IDX) {
+ dpm_idx = _mapping_get_free_dpm_idx(sc);
+ if (dpm_idx != MPR_DPM_BAD_IDX) {
+ dpm_entry =
+ (Mpi2DriverMap0Entry_t *)
+ ((u8 *) sc->dpm_pg0 +
+ hdr_sz);
+ dpm_entry += dpm_idx;
+ dpm_entry->
+ PhysicalIdentifier.Low =
+ (0xFFFFFFFF &
+ et_entry->enclosure_id);
+ dpm_entry->
+ PhysicalIdentifier.High =
+ ( et_entry->enclosure_id
+ >> 32);
+ dpm_entry->DeviceIndex =
+ (U16)et_entry->start_index;
+ dpm_entry->MappingInformation =
+ et_entry->num_slots;
+ dpm_entry->MappingInformation
+ <<= map_shift;
+ dpm_entry->PhysicalBitsMapping
+ = et_entry->phy_bits;
+ et_entry->dpm_entry_num =
+ dpm_idx;
+ /* FIXME Do I need to set the dpm_idxin mt_entry too */
+ sc->dpm_entry_used[dpm_idx] = 1;
+ sc->dpm_flush_entry[dpm_idx] =
+ 1;
+ phy_change->is_processed = 1;
+ } else {
+ phy_change->is_processed = 1;
+ mpr_dprint(sc, MPR_INFO, "%s: "
+ "failed to add the device "
+ "with handle 0x%04x to "
+ "persistent table because "
+ "there is no free space "
+ "available\n", __func__,
+ phy_change->dev_handle);
+ }
+ } else {
+ et_entry->dpm_entry_num = dpm_idx;
+ mt_entry->dpm_entry_num = dpm_idx;
+ }
+ }
+ /* FIXME Why not mt_entry too? */
+ et_entry->init_complete = 1;
+ } else if ((ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ map_idx = _mapping_get_mt_idx_from_id
+ (sc, phy_change->physical_id);
+ if (map_idx == MPR_MAPTABLE_BAD_IDX) {
+ search_idx = sc->num_rsvd_entries;
+ if (topo_change->exp_handle)
+ search_idx += max_num_phy_ids;
+ map_idx = _mapping_get_free_mt_idx(sc,
+ search_idx);
+ }
+ if (map_idx == MPR_MAPTABLE_BAD_IDX) {
+ map_idx = _mapping_get_high_missing_mt_idx(sc);
+ if (map_idx != MPR_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle) {
+ _mapping_add_to_removal_table
+ (sc, mt_entry->dev_handle,
+ 0);
+ is_removed = 1;
+ }
+ mt_entry->init_complete = 0;
+ }
+ }
+ if (map_idx != MPR_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->physical_id = phy_change->physical_id;
+ mt_entry->channel = 0;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = phy_change->dev_handle;
+ mt_entry->missing_count = 0;
+ mt_entry->device_info = phy_change->device_info
+ | (MPR_DEV_RESERVED | MPR_MAP_IN_USE);
+ } else {
+ phy_change->is_processed = 1;
+ if (!sc->mt_full_retry) {
+ sc->mt_add_device_failed = 1;
+ continue;
+ }
+ printf("%s: failed to add the device with "
+ "handle 0x%04x because there is no free "
+ "space available in the mapping table\n",
+ __func__, phy_change->dev_handle);
+ continue;
+ }
+ if (sc->is_dpm_enable) {
+ if (mt_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX) {
+ dpm_idx = mt_entry->dpm_entry_num;
+ dpm_entry = (Mpi2DriverMap0Entry_t *)
+ ((u8 *)sc->dpm_pg0 + hdr_sz);
+ dpm_entry += dpm_idx;
+ missing_cnt = dpm_entry->
+ MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
+ temp64_var = dpm_entry->
+ PhysicalIdentifier.High;
+ temp64_var = (temp64_var << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ if ((mt_entry->physical_id ==
+ temp64_var) && !missing_cnt)
+ mt_entry->init_complete = 1;
+ } else {
+ dpm_idx = _mapping_get_free_dpm_idx(sc);
+ mt_entry->init_complete = 0;
+ }
+ if (dpm_idx != MPR_DPM_BAD_IDX &&
+ !mt_entry->init_complete) {
+ mt_entry->init_complete = 1;
+ mt_entry->dpm_entry_num = dpm_idx;
+ dpm_entry = (Mpi2DriverMap0Entry_t *)
+ ((u8 *)sc->dpm_pg0 + hdr_sz);
+ dpm_entry += dpm_idx;
+ dpm_entry->PhysicalIdentifier.Low =
+ (0xFFFFFFFF &
+ mt_entry->physical_id);
+ dpm_entry->PhysicalIdentifier.High =
+ (mt_entry->physical_id >> 32);
+ dpm_entry->DeviceIndex = (U16) map_idx;
+ dpm_entry->MappingInformation = 0;
+ dpm_entry->PhysicalBitsMapping = 0;
+ sc->dpm_entry_used[dpm_idx] = 1;
+ sc->dpm_flush_entry[dpm_idx] = 1;
+ phy_change->is_processed = 1;
+ } else if (dpm_idx == MPR_DPM_BAD_IDX) {
+ phy_change->is_processed = 1;
+ mpr_dprint(sc, MPR_INFO, "%s: "
+ "failed to add the device "
+ "with handle 0x%04x to "
+ "persistent table because "
+ "there is no free space "
+ "available\n", __func__,
+ phy_change->dev_handle);
+ }
+ }
+ mt_entry->init_complete = 1;
+ }
+
+ phy_change->is_processed = 1;
+ }
+ if (is_removed)
+ _mapping_clear_removed_entries(sc);
+}
+
+/**
+ * _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM
+ * @sc: per adapter object
+ *
+ * Returns nothing
+ */
+static void
+_mapping_flush_dpm_pages(struct mpr_softc *sc)
+{
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2DriverMappingPage0_t config_page;
+ u16 entry_num;
+
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) {
+ if (!sc->dpm_flush_entry[entry_num])
+ continue;
+ memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t));
+ memcpy(&config_page.Header, (u8 *)sc->dpm_pg0,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ dpm_entry += entry_num;
+ dpm_entry->MappingInformation = htole16(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping = htole32(dpm_entry->
+ PhysicalBitsMapping);
+ memcpy(&config_page.Entry, (u8 *)dpm_entry,
+ sizeof(Mpi2DriverMap0Entry_t));
+ /* TODO-How to handle failed writes? */
+ if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page,
+ entry_num)) {
+ printf("%s: write of dpm entry %d for device failed\n",
+ __func__, entry_num);
+ } else
+ sc->dpm_flush_entry[entry_num] = 0;
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex);
+ dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->
+ PhysicalBitsMapping);
+ }
+}
+
+/**
+ * _mapping_allocate_memory- allocates the memory required for mapping tables
+ * @sc: per adapter object
+ *
+ * Allocates the memory for all the tables required for host mapping
+ *
+ * Return 0 on success or non-zero on failure.
+ */
+int
+mpr_mapping_allocate_memory(struct mpr_softc *sc)
+{
+ uint32_t dpm_pg0_sz;
+
+ sc->mapping_table = malloc((sizeof(struct dev_mapping_table) *
+ sc->max_devices), M_MPR, M_ZERO|M_NOWAIT);
+ if (!sc->mapping_table)
+ goto free_resources;
+
+ sc->removal_table = malloc((sizeof(struct map_removal_table) *
+ sc->max_devices), M_MPR, M_ZERO|M_NOWAIT);
+ if (!sc->removal_table)
+ goto free_resources;
+
+ sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) *
+ sc->max_enclosures), M_MPR, M_ZERO|M_NOWAIT);
+ if (!sc->enclosure_table)
+ goto free_resources;
+
+ sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries),
+ M_MPR, M_ZERO|M_NOWAIT);
+ if (!sc->dpm_entry_used)
+ goto free_resources;
+
+ sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries),
+ M_MPR, M_ZERO|M_NOWAIT);
+ if (!sc->dpm_flush_entry)
+ goto free_resources;
+
+ dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
+ (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
+
+ sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPR, M_ZERO|M_NOWAIT);
+ if (!sc->dpm_pg0) {
+ printf("%s: memory alloc failed for dpm page; disabling dpm\n",
+ __func__);
+ sc->is_dpm_enable = 0;
+ }
+
+ return 0;
+
+free_resources:
+ free(sc->mapping_table, M_MPR);
+ free(sc->removal_table, M_MPR);
+ free(sc->enclosure_table, M_MPR);
+ free(sc->dpm_entry_used, M_MPR);
+ free(sc->dpm_flush_entry, M_MPR);
+ free(sc->dpm_pg0, M_MPR);
+ printf("%s: device initialization failed due to failure in mapping "
+ "table memory allocation\n", __func__);
+ return -1;
+}
+
+/**
+ * mpr_mapping_free_memory- frees the memory allocated for mapping tables
+ * @sc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mpr_mapping_free_memory(struct mpr_softc *sc)
+{
+ free(sc->mapping_table, M_MPR);
+ free(sc->removal_table, M_MPR);
+ free(sc->enclosure_table, M_MPR);
+ free(sc->dpm_entry_used, M_MPR);
+ free(sc->dpm_flush_entry, M_MPR);
+ free(sc->dpm_pg0, M_MPR);
+}
+
+
+static void
+_mapping_process_dpm_pg0(struct mpr_softc *sc)
+{
+ u8 missing_cnt, enc_idx;
+ u16 slot_id, entry_num, num_slots;
+ u32 map_idx, dev_idx, start_idx, end_idx;
+ struct dev_mapping_table *mt_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs);
+ struct enc_mapping_table *et_entry;
+ u64 physical_id;
+ u32 phy_bits = 0;
+
+ if (sc->ir_firmware)
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+
+ dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++,
+ dpm_entry++) {
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ physical_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ if (!physical_id) {
+ sc->dpm_entry_used[entry_num] = 0;
+ continue;
+ }
+ sc->dpm_entry_used[entry_num] = 1;
+ dpm_entry->MappingInformation = le16toh(dpm_entry->
+ MappingInformation);
+ missing_cnt = dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK;
+ dev_idx = le16toh(dpm_entry->DeviceIndex);
+ phy_bits = le32toh(dpm_entry->PhysicalBitsMapping);
+ if (sc->ir_firmware && (dev_idx >= start_idx) &&
+ (dev_idx <= end_idx)) {
+ mt_entry = &sc->mapping_table[dev_idx];
+ mt_entry->physical_id = dpm_entry->PhysicalIdentifier.High;
+ mt_entry->physical_id = (mt_entry->physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ mt_entry->channel = MPR_RAID_CHANNEL;
+ mt_entry->id = dev_idx;
+ mt_entry->missing_count = missing_cnt;
+ mt_entry->dpm_entry_num = entry_num;
+ mt_entry->device_info = MPR_DEV_RESERVED;
+ continue;
+ }
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ if (dev_idx < (sc->num_rsvd_entries +
+ max_num_phy_ids)) {
+ slot_id = 0;
+ if (ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1)
+ slot_id = 1;
+ num_slots = max_num_phy_ids;
+ } else {
+ slot_id = 0;
+ num_slots = dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_SLOT_MASK;
+ num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ }
+ enc_idx = sc->num_enc_table_entries;
+ if (enc_idx >= sc->max_enclosures) {
+ printf("%s: enclosure entries exceed max "
+ "enclosures of %d\n", __func__,
+ sc->max_enclosures);
+ break;
+ }
+ sc->num_enc_table_entries++;
+ et_entry = &sc->enclosure_table[enc_idx];
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ et_entry->enclosure_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ et_entry->start_index = dev_idx;
+ et_entry->dpm_entry_num = entry_num;
+ et_entry->num_slots = num_slots;
+ et_entry->start_slot = slot_id;
+ et_entry->missing_count = missing_cnt;
+ et_entry->phy_bits = phy_bits;
+
+ mt_entry = &sc->mapping_table[dev_idx];
+ for (map_idx = dev_idx; map_idx < (dev_idx + num_slots);
+ map_idx++, mt_entry++) {
+ if (mt_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX) {
+ printf("%s: conflict in mapping table "
+ "for enclosure %d\n", __func__,
+ enc_idx);
+ break;
+ }
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ mt_entry->physical_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ mt_entry->phy_bits = phy_bits;
+ mt_entry->channel = 0;
+ mt_entry->id = dev_idx;
+ mt_entry->dpm_entry_num = entry_num;
+ mt_entry->missing_count = missing_cnt;
+ mt_entry->device_info = MPR_DEV_RESERVED;
+ }
+ } else if ((ioc_pg8_flags &
+ MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ map_idx = dev_idx;
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
+ printf("%s: conflict in mapping table for "
+ "device %d\n", __func__, map_idx);
+ break;
+ }
+ physical_id = dpm_entry->PhysicalIdentifier.High;
+ mt_entry->physical_id = (physical_id << 32) |
+ dpm_entry->PhysicalIdentifier.Low;
+ mt_entry->phy_bits = phy_bits;
+ mt_entry->channel = 0;
+ mt_entry->id = dev_idx;
+ mt_entry->missing_count = missing_cnt;
+ mt_entry->dpm_entry_num = entry_num;
+ mt_entry->device_info = MPR_DEV_RESERVED;
+ }
+ } /*close the loop for DPM table */
+}
+
+/*
+ * mpr_mapping_check_devices - start of the day check for device availabilty
+ * @sc: per adapter object
+ * @sleep_flag: Flag indicating whether this function can sleep or not
+ *
+ * Returns nothing.
+ */
+void
+mpr_mapping_check_devices(struct mpr_softc *sc, int sleep_flag)
+{
+ u32 i;
+/* u32 cntdn, i;
+ u32 timeout = 60;*/
+ struct dev_mapping_table *mt_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ struct enc_mapping_table *et_entry;
+ u32 start_idx, end_idx;
+
+ /* We need to ucomment this when this function is called
+ * from the port enable complete */
+#if 0
+ sc->track_mapping_events = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ if (!sc->pending_map_events)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ pause("mpr_pause", (hz/1000));/* 1msec sleep */
+ else
+ DELAY(500); /* 500 useconds delay */
+ } while (--cntdn);
+
+
+ if (!cntdn)
+ printf("%s: there are %d"
+ " pending events after %d seconds of delay\n",
+ __func__, sc->pending_map_events, timeout);
+#endif
+ sc->pending_map_events = 0;
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) {
+ et_entry = sc->enclosure_table;
+ for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) {
+ if (!et_entry->init_complete) {
+ if (et_entry->missing_count <
+ MPR_MAX_MISSING_COUNT) {
+ et_entry->missing_count++;
+ if (et_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX)
+ _mapping_commit_enc_entry(sc,
+ et_entry);
+ }
+ et_entry->init_complete = 1;
+ }
+ }
+ if (!sc->ir_firmware)
+ return;
+ _mapping_get_ir_maprange(sc, &start_idx, &end_idx);
+ mt_entry = &sc->mapping_table[start_idx];
+ for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) {
+ if (mt_entry->device_info & MPR_DEV_RESERVED
+ && !mt_entry->physical_id)
+ mt_entry->init_complete = 1;
+ else if (mt_entry->device_info & MPR_DEV_RESERVED) {
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count <
+ MPR_MAX_MISSING_COUNT) {
+ mt_entry->missing_count++;
+ if (mt_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX)
+ _mapping_commit_map_entry(sc,
+ mt_entry);
+ }
+ mt_entry->init_complete = 1;
+ }
+ }
+ }
+ } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) ==
+ MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ mt_entry = sc->mapping_table;
+ for (i = 0; i < sc->max_devices; i++, mt_entry++) {
+ if (mt_entry->device_info & MPR_DEV_RESERVED
+ && !mt_entry->physical_id)
+ mt_entry->init_complete = 1;
+ else if (mt_entry->device_info & MPR_DEV_RESERVED) {
+ if (!mt_entry->init_complete) {
+ if (mt_entry->missing_count <
+ MPR_MAX_MISSING_COUNT) {
+ mt_entry->missing_count++;
+ if (mt_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX)
+ _mapping_commit_map_entry(sc,
+ mt_entry);
+ }
+ mt_entry->init_complete = 1;
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ * mpr_mapping_is_reinit_required - check whether event replay required
+ * @sc: per adapter object
+ *
+ * Checks the per ioc flags and decide whether reinit of events required
+ *
+ * Returns 1 for reinit of ioc 0 for not.
+ */
+int mpr_mapping_is_reinit_required(struct mpr_softc *sc)
+{
+ if (!sc->mt_full_retry && sc->mt_add_device_failed) {
+ sc->mt_full_retry = 1;
+ sc->mt_add_device_failed = 0;
+ _mapping_flush_dpm_pages(sc);
+ return 1;
+ }
+ sc->mt_full_retry = 1;
+ return 0;
+}
+
+/**
+ * mpr_mapping_initialize - initialize mapping tables
+ * @sc: per adapter object
+ *
+ * Read controller persitant mapping tables into internal data area.
+ *
+ * Return 0 for success or non-zero for failure.
+ */
+int
+mpr_mapping_initialize(struct mpr_softc *sc)
+{
+ uint16_t volume_mapping_flags, dpm_pg0_sz;
+ uint32_t i;
+ Mpi2ConfigReply_t mpi_reply;
+ int error;
+ uint8_t retry_count;
+ uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+
+ /* The additional 1 accounts for the virtual enclosure
+ * created for the controller
+ */
+ sc->max_enclosures = sc->facts->MaxEnclosures + 1;
+ sc->max_expanders = sc->facts->MaxSasExpanders;
+ sc->max_volumes = sc->facts->MaxVolumes;
+ sc->max_devices = sc->facts->MaxTargets + sc->max_volumes;
+ sc->pending_map_events = 0;
+ sc->num_enc_table_entries = 0;
+ sc->num_rsvd_entries = 0;
+ sc->num_channels = 1;
+ sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries;
+ sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0;
+ sc->track_mapping_events = 0;
+
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING)
+ sc->is_dpm_enable = 0;
+
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ sc->num_rsvd_entries = 1;
+
+ volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (sc->ir_firmware && (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING))
+ sc->num_rsvd_entries += sc->max_volumes;
+
+ error = mpr_mapping_allocate_memory(sc);
+ if (error)
+ return (error);
+
+ for (i = 0; i < sc->max_devices; i++)
+ _mapping_clear_map_entry(sc->mapping_table + i);
+
+ for (i = 0; i < sc->max_enclosures; i++)
+ _mapping_clear_enc_entry(sc->enclosure_table + i);
+
+ for (i = 0; i < sc->max_devices; i++) {
+ sc->removal_table[i].dev_handle = 0;
+ sc->removal_table[i].dpm_entry_num = MPR_DPM_BAD_IDX;
+ }
+
+ memset(sc->dpm_entry_used, 0, sc->max_dpm_entries);
+ memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries);
+
+ if (sc->is_dpm_enable) {
+ dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) +
+ (sc->max_dpm_entries *
+ sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY));
+ retry_count = 0;
+
+retry_read_dpm:
+ if (mpr_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0,
+ dpm_pg0_sz)) {
+ printf("%s: dpm page read failed; disabling dpm\n",
+ __func__);
+ if (retry_count < 3) {
+ retry_count++;
+ goto retry_read_dpm;
+ }
+ sc->is_dpm_enable = 0;
+ }
+ }
+
+ if (sc->is_dpm_enable)
+ _mapping_process_dpm_pg0(sc);
+
+ sc->track_mapping_events = 1;
+ return 0;
+}
+
+/**
+ * mpr_mapping_exit - clear mapping table and associated memory
+ * @sc: per adapter object
+ *
+ * Returns nothing.
+ */
+void
+mpr_mapping_exit(struct mpr_softc *sc)
+{
+ _mapping_flush_dpm_pages(sc);
+ mpr_mapping_free_memory(sc);
+}
+
+/**
+ * mpr_mapping_get_sas_id - assign a target id for sas device
+ * @sc: per adapter object
+ * @sas_address: sas address of the device
+ * @handle: device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mpr_mapping_get_sas_id(struct mpr_softc *sc, uint64_t sas_address, u16 handle)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
+ sas_address)
+ return mt_entry->id;
+ }
+
+ return MPR_MAP_BAD_ID;
+}
+
+/**
+ * mpr_mapping_get_sas_id_from_handle - find a target id in mapping table using
+ * only the dev handle. This is just a wrapper function for the local function
+ * _mapping_get_mt_idx_from_handle.
+ * @sc: per adapter object
+ * @handle: device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mpr_mapping_get_sas_id_from_handle(struct mpr_softc *sc, u16 handle)
+{
+ return (_mapping_get_mt_idx_from_handle(sc, handle));
+}
+
+/**
+ * mpr_mapping_get_raid_id - assign a target id for raid device
+ * @sc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mpr_mapping_get_raid_id(struct mpr_softc *sc, u64 wwid, u16 handle)
+{
+ u32 map_idx;
+ struct dev_mapping_table *mt_entry;
+
+ for (map_idx = 0; map_idx < sc->max_devices; map_idx++) {
+ mt_entry = &sc->mapping_table[map_idx];
+ if (mt_entry->dev_handle == handle && mt_entry->physical_id ==
+ wwid)
+ return mt_entry->id;
+ }
+
+ return MPR_MAP_BAD_ID;
+}
+
+/**
+ * mpr_mapping_get_raid_id_from_handle - find raid device in mapping table
+ * using only the volume dev handle. This is just a wrapper function for the
+ * local function _mapping_get_ir_mt_idx_from_handle.
+ * @sc: per adapter object
+ * @volHandle: volume device handle
+ *
+ * Returns valid ID on success or BAD_ID.
+ */
+unsigned int
+mpr_mapping_get_raid_id_from_handle(struct mpr_softc *sc, u16 volHandle)
+{
+ return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle));
+}
+
+/**
+ * mpr_mapping_enclosure_dev_status_change_event - handle enclosure events
+ * @sc: per adapter object
+ * @event_data: event data payload
+ *
+ * Return nothing.
+ */
+void
+mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ u8 enc_idx, missing_count;
+ struct enc_mapping_table *et_entry;
+ Mpi2DriverMap0Entry_t *dpm_entry;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT;
+ u8 update_phy_bits = 0;
+ u32 saved_phy_bits;
+ uint64_t temp64_var;
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) !=
+ MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING)
+ goto out;
+
+ dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 +
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) {
+ if (!event_data->NumSlots) {
+ printf("%s: enclosure with handle = 0x%x reported 0 "
+ "slots\n", __func__,
+ le16toh(event_data->EnclosureHandle));
+ goto out;
+ }
+ temp64_var = event_data->EnclosureLogicalID.High;
+ temp64_var = (temp64_var << 32) |
+ event_data->EnclosureLogicalID.Low;
+ enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var,
+ event_data->PhyBits);
+ if (enc_idx != MPR_ENCTABLE_BAD_IDX) {
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (et_entry->init_complete &&
+ !et_entry->missing_count) {
+ printf("%s: enclosure %d is already present "
+ "with handle = 0x%x\n",__func__, enc_idx,
+ et_entry->enc_handle);
+ goto out;
+ }
+ et_entry->enc_handle = le16toh(event_data->
+ EnclosureHandle);
+ et_entry->start_slot = le16toh(event_data->StartSlot);
+ saved_phy_bits = et_entry->phy_bits;
+ et_entry->phy_bits |= le32toh(event_data->PhyBits);
+ if (saved_phy_bits != et_entry->phy_bits)
+ update_phy_bits = 1;
+ if (et_entry->missing_count || update_phy_bits) {
+ et_entry->missing_count = 0;
+ if (sc->is_dpm_enable &&
+ et_entry->dpm_entry_num !=
+ MPR_DPM_BAD_IDX) {
+ dpm_entry += et_entry->dpm_entry_num;
+ missing_count =
+ (u8)(dpm_entry->MappingInformation &
+ MPI2_DRVMAP0_MAPINFO_MISSING_MASK);
+ if (!et_entry->init_complete && (
+ missing_count || update_phy_bits)) {
+ dpm_entry->MappingInformation
+ = et_entry->num_slots;
+ dpm_entry->MappingInformation
+ <<= map_shift;
+ dpm_entry->PhysicalBitsMapping
+ = et_entry->phy_bits;
+ sc->dpm_flush_entry[et_entry->
+ dpm_entry_num] = 1;
+ }
+ }
+ }
+ } else {
+ enc_idx = sc->num_enc_table_entries;
+ if (enc_idx >= sc->max_enclosures) {
+ printf("%s: enclosure can not be added; "
+ "mapping table is full\n", __func__);
+ goto out;
+ }
+ sc->num_enc_table_entries++;
+ et_entry = &sc->enclosure_table[enc_idx];
+ et_entry->enc_handle = le16toh(event_data->
+ EnclosureHandle);
+ et_entry->enclosure_id = event_data->
+ EnclosureLogicalID.High;
+ et_entry->enclosure_id = ( et_entry->enclosure_id <<
+ 32) | event_data->EnclosureLogicalID.Low;
+ et_entry->start_index = MPR_MAPTABLE_BAD_IDX;
+ et_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
+ et_entry->num_slots = le16toh(event_data->NumSlots);
+ et_entry->start_slot = le16toh(event_data->StartSlot);
+ et_entry->phy_bits = le32toh(event_data->PhyBits);
+ }
+ et_entry->init_complete = 1;
+ } else if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) {
+ enc_idx = _mapping_get_enc_idx_from_handle(sc,
+ le16toh(event_data->EnclosureHandle));
+ if (enc_idx == MPR_ENCTABLE_BAD_IDX) {
+ printf("%s: cannot unmap enclosure %d because it has "
+ "already been deleted", __func__, enc_idx);
+ goto out;
+ }
+ et_entry = &sc->enclosure_table[enc_idx];
+ if (!et_entry->init_complete) {
+ if (et_entry->missing_count < MPR_MAX_MISSING_COUNT)
+ et_entry->missing_count++;
+ else
+ et_entry->init_complete = 1;
+ }
+ if (!et_entry->missing_count)
+ et_entry->missing_count++;
+ if (sc->is_dpm_enable && !et_entry->init_complete &&
+ et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) {
+ dpm_entry += et_entry->dpm_entry_num;
+ dpm_entry->MappingInformation = et_entry->num_slots;
+ dpm_entry->MappingInformation <<= map_shift;
+ dpm_entry->MappingInformation |=
+ et_entry->missing_count;
+ sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1;
+ }
+ et_entry->init_complete = 1;
+ }
+
+out:
+ _mapping_flush_dpm_pages(sc);
+ if (sc->pending_map_events)
+ sc->pending_map_events--;
+}
+
+/**
+ * mpr_mapping_topology_change_event - handle topology change events
+ * @sc: per adapter object
+ * @event_data: event data payload
+ *
+ * Returns nothing.
+ */
+void
+mpr_mapping_topology_change_event(struct mpr_softc *sc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct _map_topology_change topo_change;
+ struct _map_phy_change *phy_change;
+ Mpi2EventSasTopoPhyEntry_t *event_phy_change;
+ u8 i, num_entries;
+
+ topo_change.enc_handle = le16toh(event_data->EnclosureHandle);
+ topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle);
+ num_entries = event_data->NumEntries;
+ topo_change.num_entries = num_entries;
+ topo_change.start_phy_num = event_data->StartPhyNum;
+ topo_change.num_phys = event_data->NumPhys;
+ topo_change.exp_status = event_data->ExpStatus;
+ event_phy_change = event_data->PHY;
+ topo_change.phy_details = NULL;
+
+ if (!num_entries)
+ goto out;
+ phy_change = malloc(sizeof(struct _map_phy_change) * num_entries,
+ M_MPR, M_NOWAIT|M_ZERO);
+ topo_change.phy_details = phy_change;
+ if (!phy_change)
+ goto out;
+ for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) {
+ phy_change->dev_handle = le16toh(event_phy_change->
+ AttachedDevHandle);
+ phy_change->reason = event_phy_change->PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ }
+ _mapping_update_missing_count(sc, &topo_change);
+ _mapping_get_dev_info(sc, &topo_change);
+ _mapping_clear_removed_entries(sc);
+ _mapping_add_new_device(sc, &topo_change);
+
+out:
+ free(topo_change.phy_details, M_MPR);
+ _mapping_flush_dpm_pages(sc);
+ if (sc->pending_map_events)
+ sc->pending_map_events--;
+}
+
+/**
+ * _mapping_check_update_ir_mt_idx - Check and update IR map table index
+ * @sc: per adapter object
+ * @event_data: event data payload
+ * @evt_idx: current event index
+ * @map_idx: current index and the place holder for new map table index
+ * @wwid_table: world wide name for volumes in the element table
+ *
+ * pass through IR events and find whether any events matches and if so
+ * tries to find new index if not returns failure
+ *
+ * Returns 0 on success and 1 on failure
+ */
+static int
+_mapping_check_update_ir_mt_idx(struct mpr_softc *sc,
+ Mpi2EventDataIrConfigChangeList_t *event_data, int evt_idx, u32 *map_idx,
+ u64 *wwid_table)
+{
+ struct dev_mapping_table *mt_entry;
+ u32 st_idx, end_idx, mt_idx = *map_idx;
+ u8 match = 0;
+ Mpi2EventIrConfigElement_t *element;
+ u16 element_flags;
+ int i;
+
+ mt_entry = &sc->mapping_table[mt_idx];
+ _mapping_get_ir_maprange(sc, &st_idx, &end_idx);
+search_again:
+ match = 0;
+ for (i = evt_idx + 1; i < event_data->NumElements; i++) {
+ element = (Mpi2EventIrConfigElement_t *)
+ &event_data->ConfigElement[i];
+ element_flags = le16toh(element->ElementFlags);
+ if ((element_flags &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) !=
+ MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_ADDED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
+ if (mt_entry->physical_id == wwid_table[i]) {
+ match = 1;
+ break;
+ }
+ }
+ }
+
+ if (match) {
+ do {
+ mt_idx++;
+ if (mt_idx > end_idx)
+ return 1;
+ mt_entry = &sc->mapping_table[mt_idx];
+ } while (mt_entry->device_info & MPR_MAP_IN_USE);
+ goto search_again;
+ }
+ *map_idx = mt_idx;
+ return 0;
+}
+
+/**
+ * mpr_mapping_ir_config_change_event - handle IR config change list events
+ * @sc: per adapter object
+ * @event_data: event data payload
+ *
+ * Returns nothing.
+ */
+void
+mpr_mapping_ir_config_change_event(struct mpr_softc *sc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u64 *wwid_table;
+ u32 map_idx, flags;
+ struct dev_mapping_table *mt_entry;
+ u16 element_flags;
+ u8 log_full_error = 0;
+
+ wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPR,
+ M_NOWAIT | M_ZERO);
+ if (!wwid_table)
+ goto out;
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ flags = le32toh(event_data->Flags);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ element_flags = le16toh(element->ElementFlags);
+ if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) &&
+ (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) &&
+ (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE)
+ && (element->ReasonCode !=
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED))
+ continue;
+ if ((element_flags &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) ==
+ MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) {
+ mpr_config_get_volume_wwid(sc,
+ le16toh(element->VolDevHandle), &wwid_table[i]);
+ map_idx = _mapping_get_ir_mt_idx_from_wwid(sc,
+ wwid_table[i]);
+ if (map_idx != MPR_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->device_info |= MPR_MAP_IN_USE;
+ }
+ }
+ }
+ if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ goto out;
+ else {
+ element = (Mpi2EventIrConfigElement_t *)&event_data->
+ ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_ADDED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) {
+ map_idx = _mapping_get_ir_mt_idx_from_wwid
+ (sc, wwid_table[i]);
+ if (map_idx != MPR_MAPTABLE_BAD_IDX) {
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->channel = MPR_RAID_CHANNEL;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = le16toh
+ (element->VolDevHandle);
+ mt_entry->device_info =
+ MPR_DEV_RESERVED | MPR_MAP_IN_USE;
+ _mapping_update_ir_missing_cnt(sc,
+ map_idx, element, wwid_table[i]);
+ continue;
+ }
+ map_idx = _mapping_get_free_ir_mt_idx(sc);
+ if (map_idx == MPR_MAPTABLE_BAD_IDX)
+ log_full_error = 1;
+ else if (i < (event_data->NumElements - 1)) {
+ log_full_error =
+ _mapping_check_update_ir_mt_idx
+ (sc, event_data, i, &map_idx,
+ wwid_table);
+ }
+ if (log_full_error) {
+ printf("%s: no space to add the RAID "
+ "volume with handle 0x%04x in "
+ "mapping table\n", __func__, le16toh
+ (element->VolDevHandle));
+ continue;
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ mt_entry->physical_id = wwid_table[i];
+ mt_entry->channel = MPR_RAID_CHANNEL;
+ mt_entry->id = map_idx;
+ mt_entry->dev_handle = le16toh(element->
+ VolDevHandle);
+ mt_entry->device_info = MPR_DEV_RESERVED |
+ MPR_MAP_IN_USE;
+ mt_entry->init_complete = 0;
+ _mapping_update_ir_missing_cnt(sc, map_idx,
+ element, wwid_table[i]);
+ } else if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ map_idx = _mapping_get_ir_mt_idx_from_wwid(sc,
+ wwid_table[i]);
+ if (map_idx == MPR_MAPTABLE_BAD_IDX) {
+ printf("%s: failed to remove a volume "
+ "because it has already been "
+ "removed\n", __func__);
+ continue;
+ }
+ _mapping_update_ir_missing_cnt(sc, map_idx,
+ element, wwid_table[i]);
+ } else if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) {
+ map_idx = _mapping_get_mt_idx_from_handle(sc,
+ le16toh(element->VolDevHandle));
+ if (map_idx == MPR_MAPTABLE_BAD_IDX) {
+ printf("%s: failed to remove volume "
+ "with handle 0x%04x because it has "
+ "already been removed\n", __func__,
+ le16toh(element->VolDevHandle));
+ continue;
+ }
+ mt_entry = &sc->mapping_table[map_idx];
+ _mapping_update_ir_missing_cnt(sc, map_idx,
+ element, mt_entry->physical_id);
+ }
+ }
+ }
+
+out:
+ _mapping_flush_dpm_pages(sc);
+ free(wwid_table, M_MPR);
+ if (sc->pending_map_events)
+ sc->pending_map_events--;
+}
diff --git a/sys/dev/lindev/lindev.c b/sys/dev/mpr/mpr_mapping.h
index cf876042ae954..3250c424e2a4f 100644
--- a/sys/dev/lindev/lindev.c
+++ b/sys/dev/mpr/mpr_mapping.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009 "Bjoern A. Zeeb" <bz@FreeBSD.org>
+ * Copyright (c) 2011-2014 LSI Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -22,52 +22,50 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
*/
-/*
- * "lindev" is supposed to be a collection of linux-specific devices
- * that we also support, just not by default.
- * While currently there is only "/dev/full", we are planning to see
- * more in the future.
- * This file is only the container to load/unload all supported devices;
- * the implementation of each should go into its own file.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
+#ifndef _MPR_MAPPING_H
+#define _MPR_MAPPING_H
-#include <dev/lindev/lindev.h>
-
-/* ARGSUSED */
-static int
-lindev_modevent(module_t mod, int type, void *data)
-{
- int error;
-
- switch(type) {
- case MOD_LOAD:
- error = lindev_modevent_full(mod, type, data);
- break;
-
- case MOD_UNLOAD:
- error = lindev_modevent_full(mod, type, data);
- break;
+/**
+ * struct _map_phy_change - PHY entries recieved in Topology change list
+ * @physical_id: SAS address of the device attached with the associate PHY
+ * @device_info: bitfield provides detailed info about the device
+ * @dev_handle: device handle for the device pointed by this entry
+ * @slot: slot ID
+ * @is_processed: Flag to indicate whether this entry is processed or not
+ */
+struct _map_phy_change {
+ uint64_t physical_id;
+ uint32_t device_info;
+ uint16_t dev_handle;
+ uint16_t slot;
+ uint8_t reason;
+ uint8_t is_processed;
+};
- case MOD_SHUTDOWN:
- error = lindev_modevent_full(mod, type, data);
- break;
+/**
+ * struct _map_topology_change - entries to be removed from mapping table
+ * @dpm_entry_num: index of this device in device persistent map table
+ * @dev_handle: device handle for the device pointed by this entry
+ */
+struct _map_topology_change {
+ uint16_t enc_handle;
+ uint16_t exp_handle;
+ uint8_t num_entries;
+ uint8_t start_phy_num;
+ uint8_t num_phys;
+ uint8_t exp_status;
+ struct _map_phy_change *phy_details;
+};
- default:
- return (EOPNOTSUPP);
- }
- return (error);
-}
+extern int
+mprsas_get_sas_address_for_sata_disk(struct mpr_softc *ioc,
+ u64 *sas_address, u16 handle, u32 device_info);
-DEV_MODULE(lindev, lindev_modevent, NULL);
-MODULE_VERSION(lindev, 1);
+#endif
diff --git a/sys/dev/mpr/mpr_pci.c b/sys/dev/mpr/mpr_pci.c
new file mode 100644
index 0000000000000..e19f33adceb4c
--- /dev/null
+++ b/sys/dev/mpr/mpr_pci.c
@@ -0,0 +1,350 @@
+/*-
+ * Copyright (c) 2009 Yahoo! Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* PCI/PCI-X/PCIe bus interface for the LSI MPT2 controllers */
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+
+static int mpr_pci_probe(device_t);
+static int mpr_pci_attach(device_t);
+static int mpr_pci_detach(device_t);
+static int mpr_pci_suspend(device_t);
+static int mpr_pci_resume(device_t);
+static void mpr_pci_free(struct mpr_softc *);
+static int mpr_alloc_msix(struct mpr_softc *sc, int msgs);
+static int mpr_alloc_msi(struct mpr_softc *sc, int msgs);
+
+static device_method_t mpr_methods[] = {
+ DEVMETHOD(device_probe, mpr_pci_probe),
+ DEVMETHOD(device_attach, mpr_pci_attach),
+ DEVMETHOD(device_detach, mpr_pci_detach),
+ DEVMETHOD(device_suspend, mpr_pci_suspend),
+ DEVMETHOD(device_resume, mpr_pci_resume),
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+ { 0, 0 }
+};
+
+static driver_t mpr_pci_driver = {
+ "mpr",
+ mpr_methods,
+ sizeof(struct mpr_softc)
+};
+
+static devclass_t mpr_devclass;
+DRIVER_MODULE(mpr, pci, mpr_pci_driver, mpr_devclass, 0, 0);
+MODULE_DEPEND(mpr, cam, 1, 1, 1);
+
+struct mpr_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ u_int flags;
+ const char *desc;
+} mpr_identifiers[] = {
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
+ 0xffff, 0xffff, 0, "LSI SAS3004" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
+ 0xffff, 0xffff, 0, "LSI SAS3008" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
+ 0xffff, 0xffff, 0, "LSI SAS3108_1" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
+ 0xffff, 0xffff, 0, "LSI SAS3108_2" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
+ 0xffff, 0xffff, 0, "LSI SAS3108_5" },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
+ 0xffff, 0xffff, 0, "LSI SAS3108_6" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+static struct mpr_ident *
+mpr_find_ident(device_t dev)
+{
+ struct mpr_ident *m;
+
+ for (m = mpr_identifiers; m->vendor != 0; m++) {
+ if (m->vendor != pci_get_vendor(dev))
+ continue;
+ if (m->device != pci_get_device(dev))
+ continue;
+ if ((m->subvendor != 0xffff) &&
+ (m->subvendor != pci_get_subvendor(dev)))
+ continue;
+ if ((m->subdevice != 0xffff) &&
+ (m->subdevice != pci_get_subdevice(dev)))
+ continue;
+ return (m);
+ }
+
+ return (NULL);
+}
+
+static int
+mpr_pci_probe(device_t dev)
+{
+ struct mpr_ident *id;
+
+ if ((id = mpr_find_ident(dev)) != NULL) {
+ device_set_desc(dev, id->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ return (ENXIO);
+}
+
+static int
+mpr_pci_attach(device_t dev)
+{
+ struct mpr_softc *sc;
+ struct mpr_ident *m;
+ int error;
+
+ sc = device_get_softc(dev);
+ bzero(sc, sizeof(*sc));
+ sc->mpr_dev = dev;
+ m = mpr_find_ident(dev);
+ sc->mpr_flags = m->flags;
+
+ /* Twiddle basic PCI config bits for a sanity check */
+ pci_enable_busmaster(dev);
+
+ /* Allocate the System Interface Register Set */
+ sc->mpr_regs_rid = PCIR_BAR(1);
+ if ((sc->mpr_regs_resource = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &sc->mpr_regs_rid, RF_ACTIVE)) == NULL) {
+ mpr_printf(sc, "Cannot allocate PCI registers\n");
+ return (ENXIO);
+ }
+ sc->mpr_btag = rman_get_bustag(sc->mpr_regs_resource);
+ sc->mpr_bhandle = rman_get_bushandle(sc->mpr_regs_resource);
+
+ /* Allocate the parent DMA tag */
+ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
+ BUS_SPACE_UNRESTRICTED, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mpr_parent_dmat)) {
+ mpr_printf(sc, "Cannot allocate parent DMA tag\n");
+ mpr_pci_free(sc);
+ return (ENOMEM);
+ }
+
+ if ((error = mpr_attach(sc)) != 0)
+ mpr_pci_free(sc);
+
+ return (error);
+}
+
+int
+mpr_pci_setup_interrupts(struct mpr_softc *sc)
+{
+ device_t dev;
+ int i, error, msgs;
+
+ dev = sc->mpr_dev;
+ error = ENXIO;
+ if ((sc->disable_msix == 0) &&
+ ((msgs = pci_msix_count(dev)) >= MPR_MSI_COUNT))
+ error = mpr_alloc_msix(sc, MPR_MSI_COUNT);
+ if ((error != 0) && (sc->disable_msi == 0) &&
+ ((msgs = pci_msi_count(dev)) >= MPR_MSI_COUNT))
+ error = mpr_alloc_msi(sc, MPR_MSI_COUNT);
+
+ if (error != 0) {
+ sc->mpr_flags |= MPR_FLAGS_INTX;
+ sc->mpr_irq_rid[0] = 0;
+ sc->mpr_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &sc->mpr_irq_rid[0], RF_SHAREABLE | RF_ACTIVE);
+ if (sc->mpr_irq[0] == NULL) {
+ mpr_printf(sc, "Cannot allocate INTx interrupt\n");
+ return (ENXIO);
+ }
+ error = bus_setup_intr(dev, sc->mpr_irq[0],
+ INTR_TYPE_BIO | INTR_MPSAFE, NULL, mpr_intr, sc,
+ &sc->mpr_intrhand[0]);
+ if (error)
+ mpr_printf(sc, "Cannot setup INTx interrupt\n");
+ } else {
+ sc->mpr_flags |= MPR_FLAGS_MSI;
+ for (i = 0; i < MPR_MSI_COUNT; i++) {
+ sc->mpr_irq_rid[i] = i + 1;
+ sc->mpr_irq[i] = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &sc->mpr_irq_rid[i], RF_ACTIVE);
+ if (sc->mpr_irq[i] == NULL) {
+ mpr_printf(sc,
+ "Cannot allocate MSI interrupt\n");
+ return (ENXIO);
+ }
+ error = bus_setup_intr(dev, sc->mpr_irq[i],
+ INTR_TYPE_BIO | INTR_MPSAFE, NULL, mpr_intr_msi,
+ sc, &sc->mpr_intrhand[i]);
+ if (error) {
+ mpr_printf(sc,
+ "Cannot setup MSI interrupt %d\n", i);
+ break;
+ }
+ }
+ }
+
+ return (error);
+}
+
+static int
+mpr_pci_detach(device_t dev)
+{
+ struct mpr_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ if ((error = mpr_free(sc)) != 0)
+ return (error);
+
+ mpr_pci_free(sc);
+ return (0);
+}
+
+static void
+mpr_pci_free(struct mpr_softc *sc)
+{
+ int i;
+
+ if (sc->mpr_parent_dmat != NULL) {
+ bus_dma_tag_destroy(sc->mpr_parent_dmat);
+ }
+
+ if (sc->mpr_flags & MPR_FLAGS_MSI) {
+ for (i = 0; i < MPR_MSI_COUNT; i++) {
+ if (sc->mpr_irq[i] != NULL) {
+ bus_teardown_intr(sc->mpr_dev, sc->mpr_irq[i],
+ sc->mpr_intrhand[i]);
+ bus_release_resource(sc->mpr_dev, SYS_RES_IRQ,
+ sc->mpr_irq_rid[i], sc->mpr_irq[i]);
+ }
+ }
+ pci_release_msi(sc->mpr_dev);
+ }
+
+ if (sc->mpr_flags & MPR_FLAGS_INTX) {
+ bus_teardown_intr(sc->mpr_dev, sc->mpr_irq[0],
+ sc->mpr_intrhand[0]);
+ bus_release_resource(sc->mpr_dev, SYS_RES_IRQ,
+ sc->mpr_irq_rid[0], sc->mpr_irq[0]);
+ }
+
+ if (sc->mpr_regs_resource != NULL) {
+ bus_release_resource(sc->mpr_dev, SYS_RES_MEMORY,
+ sc->mpr_regs_rid, sc->mpr_regs_resource);
+ }
+
+ return;
+}
+
+static int
+mpr_pci_suspend(device_t dev)
+{
+ return (EINVAL);
+}
+
+static int
+mpr_pci_resume(device_t dev)
+{
+ return (EINVAL);
+}
+
+static int
+mpr_alloc_msix(struct mpr_softc *sc, int msgs)
+{
+ int error;
+
+ error = pci_alloc_msix(sc->mpr_dev, &msgs);
+ return (error);
+}
+
+static int
+mpr_alloc_msi(struct mpr_softc *sc, int msgs)
+{
+ int error;
+
+ error = pci_alloc_msi(sc->mpr_dev, &msgs);
+ return (error);
+}
+
+int
+mpr_pci_restore(struct mpr_softc *sc)
+{
+ struct pci_devinfo *dinfo;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ dinfo = device_get_ivars(sc->mpr_dev);
+ if (dinfo == NULL) {
+ mpr_dprint(sc, MPR_FAULT, "%s: NULL dinfo\n", __func__);
+ return (EINVAL);
+ }
+
+ pci_cfg_restore(sc->mpr_dev, dinfo);
+ return (0);
+}
+
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
new file mode 100644
index 0000000000000..1f95358c877e7
--- /dev/null
+++ b/sys/dev/mpr/mpr_sas.c
@@ -0,0 +1,3485 @@
+/*-
+ * Copyright (c) 2009 Yahoo! Inc.
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Communications core for LSI MPT2 */
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/sbuf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <machine/stdarg.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_periph.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#if __FreeBSD_version >= 900026
+#include <cam/scsi/smp_all.h>
+#endif
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_sas.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+#include <dev/mpr/mpr_table.h>
+#include <dev/mpr/mpr_sas.h>
+
+#define MPRSAS_DISCOVERY_TIMEOUT 20
+#define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
+
+/*
+ * static array to check SCSI OpCode for EEDP protection bits
+ */
+#define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
+#define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
+#define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
+static uint8_t op_code_prot[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
+ 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
+
+static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
+static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
+static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
+static void mprsas_poll(struct cam_sim *sim);
+static void mprsas_scsiio_timeout(void *data);
+static void mprsas_abort_complete(struct mpr_softc *sc,
+ struct mpr_command *cm);
+static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
+static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
+static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
+static void mprsas_resetdev_complete(struct mpr_softc *,
+ struct mpr_command *);
+static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
+ struct mpr_command *cm);
+static int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
+ uint8_t type);
+static void mprsas_async(void *callback_arg, uint32_t code,
+ struct cam_path *path, void *arg);
+static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
+ struct ccb_getdev *cgd);
+#if (__FreeBSD_version < 901503) || \
+ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
+static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
+ struct ccb_getdev *cgd);
+static void mprsas_read_cap_done(struct cam_periph *periph,
+ union ccb *done_ccb);
+#endif
+static int mprsas_send_portenable(struct mpr_softc *sc);
+static void mprsas_portenable_complete(struct mpr_softc *sc,
+ struct mpr_command *cm);
+
+#if __FreeBSD_version >= 900026
+static void
+mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
+static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
+ union ccb *ccb, uint64_t sasaddr);
+static void
+mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
+#endif
+
+struct mprsas_target *
+mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
+ uint16_t handle)
+{
+ struct mprsas_target *target;
+ int i;
+
+ for (i = start; i < sassc->maxtargets; i++) {
+ target = &sassc->targets[i];
+ if (target->handle == handle)
+ return (target);
+ }
+
+ return (NULL);
+}
+
+/* we need to freeze the simq during attach and diag reset, to avoid failing
+ * commands before device handles have been found by discovery. Since
+ * discovery involves reading config pages and possibly sending commands,
+ * discovery actions may continue even after we receive the end of discovery
+ * event, so refcount discovery actions instead of assuming we can unfreeze
+ * the simq when we get the event.
+ */
+void
+mprsas_startup_increment(struct mprsas_softc *sassc)
+{
+ MPR_FUNCTRACE(sassc->sc);
+
+ if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
+ if (sassc->startup_refcount++ == 0) {
+ /* just starting, freeze the simq */
+ mpr_dprint(sassc->sc, MPR_INIT,
+ "%s freezing simq\n", __func__);
+#if (__FreeBSD_version >= 1000039) || \
+ ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
+ xpt_hold_boot();
+#endif
+ xpt_freeze_simq(sassc->sim, 1);
+ }
+ mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
+ sassc->startup_refcount);
+ }
+}
+
+void
+mprsas_release_simq_reinit(struct mprsas_softc *sassc)
+{
+ if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
+ sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
+ xpt_release_simq(sassc->sim, 1);
+ mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
+ }
+}
+
+void
+mprsas_startup_decrement(struct mprsas_softc *sassc)
+{
+ MPR_FUNCTRACE(sassc->sc);
+
+ if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
+ if (--sassc->startup_refcount == 0) {
+ /* finished all discovery-related actions, release
+ * the simq and rescan for the latest topology.
+ */
+ mpr_dprint(sassc->sc, MPR_INIT,
+ "%s releasing simq\n", __func__);
+ sassc->flags &= ~MPRSAS_IN_STARTUP;
+ xpt_release_simq(sassc->sim, 1);
+#if (__FreeBSD_version >= 1000039) || \
+ ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
+ xpt_release_boot();
+#else
+ mprsas_rescan_target(sassc->sc, NULL);
+#endif
+ }
+ mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
+ sassc->startup_refcount);
+ }
+}
+
+/* LSI's firmware requires us to stop sending commands when we're doing task
+ * management, so refcount the TMs and keep the simq frozen when any are in
+ * use.
+ */
+struct mpr_command *
+mprsas_alloc_tm(struct mpr_softc *sc)
+{
+ struct mpr_command *tm;
+
+ MPR_FUNCTRACE(sc);
+ tm = mpr_alloc_high_priority_command(sc);
+ if (tm != NULL) {
+ if (sc->sassc->tm_count++ == 0) {
+ mpr_dprint(sc, MPR_RECOVERY,
+ "%s freezing simq\n", __func__);
+ xpt_freeze_simq(sc->sassc->sim, 1);
+ }
+ mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
+ sc->sassc->tm_count);
+ }
+ return tm;
+}
+
+void
+mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ mpr_dprint(sc, MPR_TRACE, "%s", __func__);
+ if (tm == NULL)
+ return;
+
+ /* if there are no TMs in use, we can release the simq. We use our
+ * own refcount so that it's easier for a diag reset to cleanup and
+ * release the simq.
+ */
+ if (--sc->sassc->tm_count == 0) {
+ mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
+ xpt_release_simq(sc->sassc->sim, 1);
+ }
+ mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
+ sc->sassc->tm_count);
+
+ mpr_free_high_priority_command(sc, tm);
+}
+
+void
+mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
+{
+ struct mprsas_softc *sassc = sc->sassc;
+ path_id_t pathid;
+ target_id_t targetid;
+ union ccb *ccb;
+
+ MPR_FUNCTRACE(sc);
+ pathid = cam_sim_path(sassc->sim);
+ if (targ == NULL)
+ targetid = CAM_TARGET_WILDCARD;
+ else
+ targetid = targ - sassc->targets;
+
+ /*
+ * Allocate a CCB and schedule a rescan.
+ */
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
+ return;
+ }
+
+ if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
+ targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
+ xpt_free_ccb(ccb);
+ return;
+ }
+
+ if (targetid == CAM_TARGET_WILDCARD)
+ ccb->ccb_h.func_code = XPT_SCAN_BUS;
+ else
+ ccb->ccb_h.func_code = XPT_SCAN_TGT;
+
+ mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
+ xpt_rescan(ccb);
+}
+
+static void
+mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
+{
+ struct sbuf sb;
+ va_list ap;
+ char str[192];
+ char path_str[64];
+
+ if (cm == NULL)
+ return;
+
+ /* No need to be in here if debugging isn't enabled */
+ if ((cm->cm_sc->mpr_debug & level) == 0)
+ return;
+
+ sbuf_new(&sb, str, sizeof(str), 0);
+
+ va_start(ap, fmt);
+
+ if (cm->cm_ccb != NULL) {
+ xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
+ sizeof(path_str));
+ sbuf_cat(&sb, path_str);
+ if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
+ scsi_command_string(&cm->cm_ccb->csio, &sb);
+ sbuf_printf(&sb, "length %d ",
+ cm->cm_ccb->csio.dxfer_len);
+ }
+ } else {
+ sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
+ cam_sim_name(cm->cm_sc->sassc->sim),
+ cam_sim_unit(cm->cm_sc->sassc->sim),
+ cam_sim_bus(cm->cm_sc->sassc->sim),
+ cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
+ cm->cm_lun);
+ }
+
+ sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
+ sbuf_vprintf(&sb, fmt, ap);
+ sbuf_finish(&sb);
+ mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
+
+ va_end(ap);
+}
+
+static void
+mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ struct mprsas_target *targ;
+ uint16_t handle;
+
+ MPR_FUNCTRACE(sc);
+
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
+ targ = tm->cm_targ;
+
+ if (reply == NULL) {
+ /* XXX retry the remove after the diag reset completes? */
+ mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
+ "0x%04x\n", __func__, handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
+ mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
+ "device 0x%x\n", reply->IOCStatus, handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
+ reply->TerminationCount);
+ mpr_free_reply(sc, tm->cm_reply_data);
+ tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
+
+ mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
+ targ->tid, handle);
+
+ /*
+ * Don't clear target if remove fails because things will get confusing.
+ * Leave the devname and sasaddr intact so that we know to avoid reusing
+ * this target id if possible, and so we can assign the same target id
+ * to this device if it comes back in the future.
+ */
+ if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
+ targ = tm->cm_targ;
+ targ->handle = 0x0;
+ targ->encl_handle = 0x0;
+ targ->encl_level_valid = 0x0;
+ targ->encl_level = 0x0;
+ targ->connector_name[0] = ' ';
+ targ->connector_name[1] = ' ';
+ targ->connector_name[2] = ' ';
+ targ->connector_name[3] = ' ';
+ targ->encl_slot = 0x0;
+ targ->exp_dev_handle = 0x0;
+ targ->phy_num = 0x0;
+ targ->linkrate = 0x0;
+ targ->devinfo = 0x0;
+ targ->flags = 0x0;
+ targ->scsi_req_desc_type = 0;
+ }
+
+ mprsas_free_tm(sc, tm);
+}
+
+
+/*
+ * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
+ * Otherwise Volume Delete is same as Bare Drive Removal.
+ */
+void
+mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
+{
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpr_softc *sc;
+ struct mpr_command *cm;
+ struct mprsas_target *targ = NULL;
+
+ MPR_FUNCTRACE(sassc->sc);
+ sc = sassc->sc;
+
+ targ = mprsas_find_target_by_handle(sassc, 0, handle);
+ if (targ == NULL) {
+ /* FIXME: what is the action? */
+ /* We don't know about this device? */
+ mpr_dprint(sc, MPR_ERROR,
+ "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
+ return;
+ }
+
+ targ->flags |= MPRSAS_TARGET_INREMOVAL;
+
+ cm = mprsas_alloc_tm(sc);
+ if (cm == NULL) {
+ mpr_dprint(sc, MPR_ERROR,
+ "%s: command alloc failure\n", __func__);
+ return;
+ }
+
+ mprsas_rescan_target(sc, targ);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
+ req->DevHandle = targ->handle;
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+
+ /* SAS Hard Link Reset / SATA Link Reset */
+ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
+ cm->cm_targ = targ;
+ cm->cm_data = NULL;
+ cm->cm_desc.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ cm->cm_complete = mprsas_remove_volume;
+ cm->cm_complete_data = (void *)(uintptr_t)handle;
+ mpr_map_command(sc, cm);
+}
+
+/*
+ * The MPT2 firmware performs debounce on the link to avoid transient link
+ * errors and false removals. When it does decide that link has been lost
+ * and a device needs to go away, it expects that the host will perform a
+ * target reset and then an op remove. The reset has the side-effect of
+ * aborting any outstanding requests for the device, which is required for
+ * the op-remove to succeed. It's not clear if the host should check for
+ * the device coming back alive after the reset.
+ */
+void
+mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
+{
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpr_softc *sc;
+ struct mpr_command *cm;
+ struct mprsas_target *targ = NULL;
+
+ MPR_FUNCTRACE(sassc->sc);
+
+ sc = sassc->sc;
+
+ targ = mprsas_find_target_by_handle(sassc, 0, handle);
+ if (targ == NULL) {
+ /* FIXME: what is the action? */
+ /* We don't know about this device? */
+ mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
+ __func__, handle);
+ return;
+ }
+
+ targ->flags |= MPRSAS_TARGET_INREMOVAL;
+
+ cm = mprsas_alloc_tm(sc);
+ if (cm == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
+ __func__);
+ return;
+ }
+
+ mprsas_rescan_target(sc, targ);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
+ memset(req, 0, sizeof(*req));
+ req->DevHandle = htole16(targ->handle);
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+
+ /* SAS Hard Link Reset / SATA Link Reset */
+ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
+ cm->cm_targ = targ;
+ cm->cm_data = NULL;
+ cm->cm_desc.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ cm->cm_complete = mprsas_remove_device;
+ cm->cm_complete_data = (void *)(uintptr_t)handle;
+ mpr_map_command(sc, cm);
+}
+
+static void
+mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
+ struct mprsas_target *targ;
+ struct mpr_command *next_cm;
+ uint16_t handle;
+
+ MPR_FUNCTRACE(sc);
+
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
+ targ = tm->cm_targ;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
+ "handle %#04x! This should not happen!\n", __func__,
+ tm->cm_flags, handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply == NULL) {
+ /* XXX retry the remove after the diag reset completes? */
+ mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
+ "0x%04x\n", __func__, handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
+ mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
+ "device 0x%x\n", le16toh(reply->IOCStatus), handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
+ le32toh(reply->TerminationCount));
+ mpr_free_reply(sc, tm->cm_reply_data);
+ tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
+
+ /* Reuse the existing command */
+ req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
+ memset(req, 0, sizeof(*req));
+ req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ req->DevHandle = htole16(handle);
+ tm->cm_data = NULL;
+ tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ tm->cm_complete = mprsas_remove_complete;
+ tm->cm_complete_data = (void *)(uintptr_t)handle;
+
+ mpr_map_command(sc, tm);
+
+ mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
+ targ->tid, handle);
+ if (targ->encl_level_valid) {
+ mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
+ "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
+ targ->connector_name);
+ }
+ TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
+ union ccb *ccb;
+
+ mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
+ ccb = tm->cm_complete_data;
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ mprsas_scsiio_complete(sc, tm);
+ }
+}
+
+static void
+mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
+ uint16_t handle;
+ struct mprsas_target *targ;
+ struct mprsas_lun *lun;
+
+ MPR_FUNCTRACE(sc);
+
+ reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
+ handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
+ "handle %#04x! This should not happen!\n", __func__,
+ tm->cm_flags, handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply == NULL) {
+ /* most likely a chip reset */
+ mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
+ "0x%04x\n", __func__, handle);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
+ __func__, handle, le16toh(reply->IOCStatus));
+
+ /*
+ * Don't clear target if remove fails because things will get confusing.
+ * Leave the devname and sasaddr intact so that we know to avoid reusing
+ * this target id if possible, and so we can assign the same target id
+ * to this device if it comes back in the future.
+ */
+ if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
+ targ = tm->cm_targ;
+ targ->handle = 0x0;
+ targ->encl_handle = 0x0;
+ targ->encl_level_valid = 0x0;
+ targ->encl_level = 0x0;
+ targ->connector_name[0] = ' ';
+ targ->connector_name[1] = ' ';
+ targ->connector_name[2] = ' ';
+ targ->connector_name[3] = ' ';
+ targ->encl_slot = 0x0;
+ targ->exp_dev_handle = 0x0;
+ targ->phy_num = 0x0;
+ targ->linkrate = 0x0;
+ targ->devinfo = 0x0;
+ targ->flags = 0x0;
+ targ->scsi_req_desc_type = 0;
+
+ while (!SLIST_EMPTY(&targ->luns)) {
+ lun = SLIST_FIRST(&targ->luns);
+ SLIST_REMOVE_HEAD(&targ->luns, lun_link);
+ free(lun, M_MPR);
+ }
+ }
+
+ mprsas_free_tm(sc, tm);
+}
+
+static int
+mprsas_register_events(struct mpr_softc *sc)
+{
+ uint8_t events[16];
+
+ bzero(events, 16);
+ setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ setbit(events, MPI2_EVENT_SAS_DISCOVERY);
+ setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
+ setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
+ setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
+ setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ setbit(events, MPI2_EVENT_IR_VOLUME);
+ setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
+ setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
+ setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
+
+ mpr_register_events(sc, events, mprsas_evt_handler, NULL,
+ &sc->sassc->mprsas_eh);
+
+ return (0);
+}
+
+int
+mpr_attach_sas(struct mpr_softc *sc)
+{
+ struct mprsas_softc *sassc;
+ cam_status status;
+ int unit, error = 0;
+
+ MPR_FUNCTRACE(sc);
+
+ sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
+ if (!sassc) {
+ device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+
+ /*
+ * XXX MaxTargets could change during a reinit. since we don't
+ * resize the targets[] array during such an event, cache the value
+ * of MaxTargets here so that we don't get into trouble later. This
+ * should move into the reinit logic.
+ */
+ sassc->maxtargets = sc->facts->MaxTargets;
+ sassc->targets = malloc(sizeof(struct mprsas_target) *
+ sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
+ if (!sassc->targets) {
+ device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ free(sassc, M_MPR);
+ return (ENOMEM);
+ }
+ sc->sassc = sassc;
+ sassc->sc = sc;
+
+ if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
+ error = ENOMEM;
+ goto out;
+ }
+
+ unit = device_get_unit(sc->mpr_dev);
+ sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
+ unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
+ if (sassc->sim == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
+ error = EINVAL;
+ goto out;
+ }
+
+ TAILQ_INIT(&sassc->ev_queue);
+
+ /* Initialize taskqueue for Event Handling */
+ TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
+ sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
+ taskqueue_thread_enqueue, &sassc->ev_tq);
+
+ /* Run the task queue with lowest priority */
+ taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
+ device_get_nameunit(sc->mpr_dev));
+
+ mpr_lock(sc);
+
+ /*
+ * XXX There should be a bus for every port on the adapter, but since
+ * we're just going to fake the topology for now, we'll pretend that
+ * everything is just a target on a single bus.
+ */
+ if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
+ mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
+ error);
+ mpr_unlock(sc);
+ goto out;
+ }
+
+ /*
+ * Assume that discovery events will start right away. Freezing
+ *
+ * Hold off boot until discovery is complete.
+ */
+ sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
+ sc->sassc->startup_refcount = 0;
+ mprsas_startup_increment(sassc);
+
+ callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
+
+ sassc->tm_count = 0;
+
+ /*
+ * Register for async events so we can determine the EEDP
+ * capabilities of devices.
+ */
+ status = xpt_create_path(&sassc->path, /*periph*/NULL,
+ cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD);
+ if (status != CAM_REQ_CMP) {
+ mpr_printf(sc, "Error %#x creating sim path\n", status);
+ sassc->path = NULL;
+ } else {
+ int event;
+
+#if (__FreeBSD_version >= 1000006) || \
+ ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
+ event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
+#else
+ event = AC_FOUND_DEVICE;
+#endif
+
+ /*
+ * Prior to the CAM locking improvements, we can't call
+ * xpt_register_async() with a particular path specified.
+ *
+ * If a path isn't specified, xpt_register_async() will
+ * generate a wildcard path and acquire the XPT lock while
+ * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
+ * It will then drop the XPT lock once that is done.
+ *
+ * If a path is specified for xpt_register_async(), it will
+ * not acquire and drop the XPT lock around the call to
+ * xpt_action(). xpt_action() asserts that the caller
+ * holds the SIM lock, so the SIM lock has to be held when
+ * calling xpt_register_async() when the path is specified.
+ *
+ * But xpt_register_async calls xpt_for_all_devices(),
+ * which calls xptbustraverse(), which will acquire each
+ * SIM lock. When it traverses our particular bus, it will
+ * necessarily acquire the SIM lock, which will lead to a
+ * recursive lock acquisition.
+ *
+ * The CAM locking changes fix this problem by acquiring
+ * the XPT topology lock around bus traversal in
+ * xptbustraverse(), so the caller can hold the SIM lock
+ * and it does not cause a recursive lock acquisition.
+ *
+ * These __FreeBSD_version values are approximate, especially
+ * for stable/10, which is two months later than the actual
+ * change.
+ */
+
+#if (__FreeBSD_version < 1000703) || \
+ ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
+ mpr_unlock(sc);
+ status = xpt_register_async(event, mprsas_async, sc,
+ NULL);
+ mpr_lock(sc);
+#else
+ status = xpt_register_async(event, mprsas_async, sc,
+ sassc->path);
+#endif
+
+ if (status != CAM_REQ_CMP) {
+ mpr_dprint(sc, MPR_ERROR,
+ "Error %#x registering async handler for "
+ "AC_ADVINFO_CHANGED events\n", status);
+ xpt_free_path(sassc->path);
+ sassc->path = NULL;
+ }
+ }
+ if (status != CAM_REQ_CMP) {
+ /*
+ * EEDP use is the exception, not the rule.
+ * Warn the user, but do not fail to attach.
+ */
+ mpr_printf(sc, "EEDP capabilities disabled.\n");
+ }
+
+ mpr_unlock(sc);
+
+ mprsas_register_events(sc);
+out:
+ if (error)
+ mpr_detach_sas(sc);
+ return (error);
+}
+
+int
+mpr_detach_sas(struct mpr_softc *sc)
+{
+ struct mprsas_softc *sassc;
+ struct mprsas_lun *lun, *lun_tmp;
+ struct mprsas_target *targ;
+ int i;
+
+ MPR_FUNCTRACE(sc);
+
+ if (sc->sassc == NULL)
+ return (0);
+
+ sassc = sc->sassc;
+ mpr_deregister_events(sc, sassc->mprsas_eh);
+
+ /*
+ * Drain and free the event handling taskqueue with the lock
+ * unheld so that any parallel processing tasks drain properly
+ * without deadlocking.
+ */
+ if (sassc->ev_tq != NULL)
+ taskqueue_free(sassc->ev_tq);
+
+ /* Make sure CAM doesn't wedge if we had to bail out early. */
+ mpr_lock(sc);
+
+ /* Deregister our async handler */
+ if (sassc->path != NULL) {
+ xpt_register_async(0, mprsas_async, sc, sassc->path);
+ xpt_free_path(sassc->path);
+ sassc->path = NULL;
+ }
+
+ if (sassc->flags & MPRSAS_IN_STARTUP)
+ xpt_release_simq(sassc->sim, 1);
+
+ if (sassc->sim != NULL) {
+ xpt_bus_deregister(cam_sim_path(sassc->sim));
+ cam_sim_free(sassc->sim, FALSE);
+ }
+
+ sassc->flags |= MPRSAS_SHUTDOWN;
+ mpr_unlock(sc);
+
+ if (sassc->devq != NULL)
+ cam_simq_free(sassc->devq);
+
+ for (i = 0; i < sassc->maxtargets; i++) {
+ targ = &sassc->targets[i];
+ SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
+ free(lun, M_MPR);
+ }
+ }
+ free(sassc->targets, M_MPR);
+ free(sassc, M_MPR);
+ sc->sassc = NULL;
+
+ return (0);
+}
+
+void
+mprsas_discovery_end(struct mprsas_softc *sassc)
+{
+ struct mpr_softc *sc = sassc->sc;
+
+ MPR_FUNCTRACE(sc);
+
+ if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
+ callout_stop(&sassc->discovery_callout);
+
+}
+
+static void
+mprsas_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct mprsas_softc *sassc;
+
+ sassc = cam_sim_softc(sim);
+
+ MPR_FUNCTRACE(sassc->sc);
+ mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
+ ccb->ccb_h.func_code);
+ mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_PATH_INQ:
+ {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+
+ cpi->version_num = 1;
+ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
+ cpi->target_sprt = 0;
+#if (__FreeBSD_version >= 1000039) || \
+ ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
+ cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
+#else
+ cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
+#endif
+ cpi->hba_eng_cnt = 0;
+ cpi->max_target = sassc->maxtargets - 1;
+ cpi->max_lun = 255;
+ cpi->initiator_id = sassc->maxtargets - 1;
+ strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
+ strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->bus_id = cam_sim_bus(sim);
+ /*
+ * XXXSLM-I think this needs to change based on config page or
+ * something instead of hardcoded to 150000.
+ */
+ cpi->base_transfer_speed = 150000;
+ cpi->transport = XPORT_SAS;
+ cpi->transport_version = 0;
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_SPC;
+#if __FreeBSD_version >= 800001
+ /*
+ * XXXSLM-probably need to base this number on max SGL's and
+ * page size.
+ */
+ cpi->maxio = 256 * 1024;
+#endif
+ cpi->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_sas *sas;
+ struct ccb_trans_settings_scsi *scsi;
+ struct mprsas_target *targ;
+
+ cts = &ccb->cts;
+ sas = &cts->xport_specific.sas;
+ scsi = &cts->proto_specific.scsi;
+
+ KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
+ ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
+ cts->ccb_h.target_id));
+ targ = &sassc->targets[cts->ccb_h.target_id];
+ if (targ->handle == 0x0) {
+ cts->ccb_h.status = CAM_DEV_NOT_THERE;
+ break;
+ }
+
+ cts->protocol_version = SCSI_REV_SPC2;
+ cts->transport = XPORT_SAS;
+ cts->transport_version = 0;
+
+ sas->valid = CTS_SAS_VALID_SPEED;
+ switch (targ->linkrate) {
+ case 0x08:
+ sas->bitrate = 150000;
+ break;
+ case 0x09:
+ sas->bitrate = 300000;
+ break;
+ case 0x0a:
+ sas->bitrate = 600000;
+ break;
+ default:
+ sas->valid = 0;
+ }
+
+ cts->protocol = PROTO_SCSI;
+ scsi->valid = CTS_SCSI_VALID_TQ;
+ scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
+
+ cts->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ cam_calc_geometry(&ccb->ccg, /*extended*/1);
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_RESET_DEV:
+ mpr_dprint(sassc->sc, MPR_XINFO,
+ "mprsas_action XPT_RESET_DEV\n");
+ mprsas_action_resetdev(sassc, ccb);
+ return;
+ case XPT_RESET_BUS:
+ case XPT_ABORT:
+ case XPT_TERM_IO:
+ mpr_dprint(sassc->sc, MPR_XINFO,
+ "mprsas_action faking success for abort or reset\n");
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_SCSI_IO:
+ mprsas_action_scsiio(sassc, ccb);
+ return;
+#if __FreeBSD_version >= 900026
+ case XPT_SMP_IO:
+ mprsas_action_smpio(sassc, ccb);
+ return;
+#endif
+ default:
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ break;
+ }
+ xpt_done(ccb);
+
+}
+
+static void
+mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
+ target_id_t target_id, lun_id_t lun_id)
+{
+ path_id_t path_id = cam_sim_path(sc->sassc->sim);
+ struct cam_path *path;
+
+ mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
+ ac_code, target_id, (uintmax_t)lun_id);
+
+ if (xpt_create_path(&path, NULL,
+ path_id, target_id, lun_id) != CAM_REQ_CMP) {
+ mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
+ "notification\n");
+ return;
+ }
+
+ xpt_async(ac_code, path, NULL);
+ xpt_free_path(path);
+}
+
+static void
+mprsas_complete_all_commands(struct mpr_softc *sc)
+{
+ struct mpr_command *cm;
+ int i;
+ int completed;
+
+ MPR_FUNCTRACE(sc);
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ /* complete all commands with a NULL reply */
+ for (i = 1; i < sc->num_reqs; i++) {
+ cm = &sc->commands[i];
+ cm->cm_reply = NULL;
+ completed = 0;
+
+ if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
+ cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
+
+ if (cm->cm_complete != NULL) {
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "completing cm %p state %x ccb %p for diag reset\n",
+ cm, cm->cm_state, cm->cm_ccb);
+ cm->cm_complete(sc, cm);
+ completed = 1;
+ }
+
+ if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "waking up cm %p state %x ccb %p for diag reset\n",
+ cm, cm->cm_state, cm->cm_ccb);
+ wakeup(cm);
+ completed = 1;
+ }
+
+ if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
+ /* this should never happen, but if it does, log */
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "cm %p state %x flags 0x%x ccb %p during diag "
+ "reset\n", cm, cm->cm_state, cm->cm_flags,
+ cm->cm_ccb);
+ }
+ }
+}
+
+void
+mprsas_handle_reinit(struct mpr_softc *sc)
+{
+ int i;
+
+ /* Go back into startup mode and freeze the simq, so that CAM
+ * doesn't send any commands until after we've rediscovered all
+ * targets and found the proper device handles for them.
+ *
+ * After the reset, portenable will trigger discovery, and after all
+ * discovery-related activities have finished, the simq will be
+ * released.
+ */
+ mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
+ sc->sassc->flags |= MPRSAS_IN_STARTUP;
+ sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
+ mprsas_startup_increment(sc->sassc);
+
+ /* notify CAM of a bus reset */
+ mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD);
+
+ /* complete and cleanup after all outstanding commands */
+ mprsas_complete_all_commands(sc);
+
+ mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
+ "completion\n", __func__, sc->sassc->startup_refcount,
+ sc->sassc->tm_count);
+
+ /* zero all the target handles, since they may change after the
+ * reset, and we have to rediscover all the targets and use the new
+ * handles.
+ */
+ for (i = 0; i < sc->sassc->maxtargets; i++) {
+ if (sc->sassc->targets[i].outstanding != 0)
+ mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
+ i, sc->sassc->targets[i].outstanding);
+ sc->sassc->targets[i].handle = 0x0;
+ sc->sassc->targets[i].exp_dev_handle = 0x0;
+ sc->sassc->targets[i].outstanding = 0;
+ sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
+ }
+}
+static void
+mprsas_tm_timeout(void *data)
+{
+ struct mpr_command *tm = data;
+ struct mpr_softc *sc = tm->cm_sc;
+
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
+ "task mgmt %p timed out\n", tm);
+ mpr_reinit(sc);
+}
+
+static void
+mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
+ struct mpr_command *tm)
+{
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ unsigned int cm_count = 0;
+ struct mpr_command *cm;
+ struct mprsas_target *targ;
+
+ callout_stop(&tm->cm_callout);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ targ = tm->cm_targ;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
+ "This should not happen!\n", __func__, tm->cm_flags);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply == NULL) {
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "NULL reset reply for tm %p\n", tm);
+ if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
+ /* this completion was due to a reset, just cleanup */
+ targ->flags &= ~MPRSAS_TARGET_INRESET;
+ targ->tm = NULL;
+ mprsas_free_tm(sc, tm);
+ }
+ else {
+ /* we should have gotten a reply. */
+ mpr_reinit(sc);
+ }
+ return;
+ }
+
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "logical unit reset status 0x%x code 0x%x count %u\n",
+ le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
+ le32toh(reply->TerminationCount));
+
+ /* See if there are any outstanding commands for this LUN.
+ * This could be made more efficient by using a per-LU data
+ * structure of some sort.
+ */
+ TAILQ_FOREACH(cm, &targ->commands, cm_link) {
+ if (cm->cm_lun == tm->cm_lun)
+ cm_count++;
+ }
+
+ if (cm_count == 0) {
+ mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
+ "logical unit %u finished recovery after reset\n",
+ tm->cm_lun, tm);
+
+ mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
+ tm->cm_lun);
+
+ /* we've finished recovery for this logical unit. check and
+ * see if some other logical unit has a timedout command
+ * that needs to be processed.
+ */
+ cm = TAILQ_FIRST(&targ->timedout_commands);
+ if (cm) {
+ mprsas_send_abort(sc, tm, cm);
+ }
+ else {
+ targ->tm = NULL;
+ mprsas_free_tm(sc, tm);
+ }
+ }
+ else {
+ /* if we still have commands for this LUN, the reset
+ * effectively failed, regardless of the status reported.
+ * Escalate to a target reset.
+ */
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "logical unit reset complete for tm %p, but still have %u "
+ "command(s)\n", tm, cm_count);
+ mprsas_send_reset(sc, tm,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ }
+}
+
+static void
+mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mprsas_target *targ;
+
+ callout_stop(&tm->cm_callout);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ targ = tm->cm_targ;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
+ "This should not happen!\n", __func__, tm->cm_flags);
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply == NULL) {
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "NULL reset reply for tm %p\n", tm);
+ if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
+ /* this completion was due to a reset, just cleanup */
+ targ->flags &= ~MPRSAS_TARGET_INRESET;
+ targ->tm = NULL;
+ mprsas_free_tm(sc, tm);
+ }
+ else {
+ /* we should have gotten a reply. */
+ mpr_reinit(sc);
+ }
+ return;
+ }
+
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "target reset status 0x%x code 0x%x count %u\n",
+ le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
+ le32toh(reply->TerminationCount));
+
+ targ->flags &= ~MPRSAS_TARGET_INRESET;
+
+ if (targ->outstanding == 0) {
+ /* we've finished recovery for this target and all
+ * of its logical units.
+ */
+ mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
+ "recovery finished after target reset\n");
+
+ mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
+ CAM_LUN_WILDCARD);
+
+ targ->tm = NULL;
+ mprsas_free_tm(sc, tm);
+ }
+ else {
+ /* after a target reset, if this target still has
+ * outstanding commands, the reset effectively failed,
+ * regardless of the status reported. escalate.
+ */
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "target reset complete for tm %p, but still have %u "
+ "command(s)\n", tm, targ->outstanding);
+ mpr_reinit(sc);
+ }
+}
+
+#define MPR_RESET_TIMEOUT 30
+
+static int
+mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
+{
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mprsas_target *target;
+ int err;
+
+ target = tm->cm_targ;
+ if (target->handle == 0) {
+ mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
+ __func__, target->tid);
+ return -1;
+ }
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ req->DevHandle = htole16(target->handle);
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = type;
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
+ /* XXX Need to handle invalid LUNs */
+ MPR_SET_LUN(req->LUN, tm->cm_lun);
+ tm->cm_targ->logical_unit_resets++;
+ mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
+ "sending logical unit reset\n");
+ tm->cm_complete = mprsas_logical_unit_reset_complete;
+ }
+ else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
+ /*
+ * Target reset method =
+ * SAS Hard Link Reset / SATA Link Reset
+ */
+ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+ tm->cm_targ->target_resets++;
+ tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
+ mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
+ "sending target reset\n");
+ tm->cm_complete = mprsas_target_reset_complete;
+ }
+ else {
+ mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
+ return -1;
+ }
+
+ mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
+ target->handle);
+ if (target->encl_level_valid) {
+ mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
+ "connector name (%4s)\n", target->encl_level,
+ target->encl_slot, target->connector_name);
+ }
+
+ tm->cm_data = NULL;
+ tm->cm_desc.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ tm->cm_complete_data = (void *)tm;
+
+ callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
+ mprsas_tm_timeout, tm);
+
+ err = mpr_map_command(sc, tm);
+ if (err)
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "error %d sending reset type %u\n",
+ err, type);
+
+ return err;
+}
+
+
+static void
+mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ struct mpr_command *cm;
+ MPI2_SCSI_TASK_MANAGE_REPLY *reply;
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mprsas_target *targ;
+
+ callout_stop(&tm->cm_callout);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ targ = tm->cm_targ;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "cm_flags = %#x for abort %p TaskMID %u!\n",
+ tm->cm_flags, tm, le16toh(req->TaskMID));
+ mprsas_free_tm(sc, tm);
+ return;
+ }
+
+ if (reply == NULL) {
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "NULL abort reply for tm %p TaskMID %u\n",
+ tm, le16toh(req->TaskMID));
+ if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
+ /* this completion was due to a reset, just cleanup */
+ targ->tm = NULL;
+ mprsas_free_tm(sc, tm);
+ }
+ else {
+ /* we should have gotten a reply. */
+ mpr_reinit(sc);
+ }
+ return;
+ }
+
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "abort TaskMID %u status 0x%x code 0x%x count %u\n",
+ le16toh(req->TaskMID),
+ le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
+ le32toh(reply->TerminationCount));
+
+ cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
+ if (cm == NULL) {
+ /* if there are no more timedout commands, we're done with
+ * error recovery for this target.
+ */
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "finished recovery after aborting TaskMID %u\n",
+ le16toh(req->TaskMID));
+
+ targ->tm = NULL;
+ mprsas_free_tm(sc, tm);
+ }
+ else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
+ /* abort success, but we have more timedout commands to abort */
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "continuing recovery after aborting TaskMID %u\n",
+ le16toh(req->TaskMID));
+
+ mprsas_send_abort(sc, tm, cm);
+ }
+ else {
+ /* we didn't get a command completion, so the abort
+ * failed as far as we're concerned. escalate.
+ */
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "abort failed for TaskMID %u tm %p\n",
+ le16toh(req->TaskMID), tm);
+
+ mprsas_send_reset(sc, tm,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
+ }
+}
+
+#define MPR_ABORT_TIMEOUT 5
+
+static int
+mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
+ struct mpr_command *cm)
+{
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mprsas_target *targ;
+ int err;
+
+ targ = cm->cm_targ;
+ if (targ->handle == 0) {
+ mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
+ __func__, cm->cm_ccb->ccb_h.target_id);
+ return -1;
+ }
+
+ mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
+ "Aborting command %p\n", cm);
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ req->DevHandle = htole16(targ->handle);
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
+
+ /* XXX Need to handle invalid LUNs */
+ MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
+
+ req->TaskMID = htole16(cm->cm_desc.Default.SMID);
+
+ tm->cm_data = NULL;
+ tm->cm_desc.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ tm->cm_complete = mprsas_abort_complete;
+ tm->cm_complete_data = (void *)tm;
+ tm->cm_targ = cm->cm_targ;
+ tm->cm_lun = cm->cm_lun;
+
+ callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
+ mprsas_tm_timeout, tm);
+
+ targ->aborts++;
+
+ err = mpr_map_command(sc, tm);
+ if (err)
+ mprsas_log_command(tm, MPR_RECOVERY,
+ "error %d sending abort for cm %p SMID %u\n",
+ err, cm, req->TaskMID);
+ return err;
+}
+
+
+static void
+mprsas_scsiio_timeout(void *data)
+{
+ struct mpr_softc *sc;
+ struct mpr_command *cm;
+ struct mprsas_target *targ;
+
+ cm = (struct mpr_command *)data;
+ sc = cm->cm_sc;
+
+ MPR_FUNCTRACE(sc);
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
+
+ /*
+ * Run the interrupt handler to make sure it's not pending. This
+ * isn't perfect because the command could have already completed
+ * and been re-used, though this is unlikely.
+ */
+ mpr_intr_locked(sc);
+ if (cm->cm_state == MPR_CM_STATE_FREE) {
+ mprsas_log_command(cm, MPR_XINFO,
+ "SCSI command %p almost timed out\n", cm);
+ return;
+ }
+
+ if (cm->cm_ccb == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
+ return;
+ }
+
+ targ = cm->cm_targ;
+ targ->timeouts++;
+
+ mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
+ "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
+ targ->handle);
+ if (targ->encl_level_valid) {
+ mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
+ "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
+ targ->connector_name);
+ }
+
+ /* XXX first, check the firmware state, to see if it's still
+ * operational. if not, do a diag reset.
+ */
+
+ cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
+ cm->cm_state = MPR_CM_STATE_TIMEDOUT;
+ TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
+
+ if (targ->tm != NULL) {
+ /* target already in recovery, just queue up another
+ * timedout command to be processed later.
+ */
+ mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
+ "processing by tm %p\n", cm, targ->tm);
+ }
+ else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
+ mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
+ cm, targ->tm);
+
+ /* start recovery by aborting the first timedout command */
+ mprsas_send_abort(sc, targ->tm, cm);
+ }
+ else {
+ /* XXX queue this target up for recovery once a TM becomes
+ * available. The firmware only has a limited number of
+ * HighPriority credits for the high priority requests used
+ * for task management, and we ran out.
+ *
+ * Isilon: don't worry about this for now, since we have
+ * more credits than disks in an enclosure, and limit
+ * ourselves to one TM per target for recovery.
+ */
+ mpr_dprint(sc, MPR_RECOVERY,
+ "timedout cm %p failed to allocate a tm\n", cm);
+ }
+}
+
+static void
+mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
+{
+ MPI2_SCSI_IO_REQUEST *req;
+ struct ccb_scsiio *csio;
+ struct mpr_softc *sc;
+ struct mprsas_target *targ;
+ struct mprsas_lun *lun;
+ struct mpr_command *cm;
+ uint8_t i, lba_byte, *ref_tag_addr;
+ uint16_t eedp_flags;
+ uint32_t mpi_control;
+
+ sc = sassc->sc;
+ MPR_FUNCTRACE(sc);
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ csio = &ccb->csio;
+ targ = &sassc->targets[csio->ccb_h.target_id];
+ mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
+ if (targ->handle == 0x0) {
+ mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
+ __func__, csio->ccb_h.target_id);
+ csio->ccb_h.status = CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+ if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
+ mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
+ "supported %u\n", __func__, csio->ccb_h.target_id);
+ csio->ccb_h.status = CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+ /*
+ * Sometimes, it is possible to get a command that is not "In
+ * Progress" and was actually aborted by the upper layer. Check for
+ * this here and complete the command without error.
+ */
+ if (ccb->ccb_h.status != CAM_REQ_INPROG) {
+ mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
+ "target %u\n", __func__, csio->ccb_h.target_id);
+ xpt_done(ccb);
+ return;
+ }
+ /*
+ * If devinfo is 0 this will be a volume. In that case don't tell CAM
+ * that the volume has timed out. We want volumes to be enumerated
+ * until they are deleted/removed, not just failed.
+ */
+ if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
+ if (targ->devinfo == 0)
+ csio->ccb_h.status = CAM_REQ_CMP;
+ else
+ csio->ccb_h.status = CAM_SEL_TIMEOUT;
+ xpt_done(ccb);
+ return;
+ }
+
+ if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
+ mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
+ csio->ccb_h.status = CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ return;
+ }
+
+ cm = mpr_alloc_command(sc);
+ if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
+ if (cm != NULL) {
+ mpr_free_command(sc, cm);
+ }
+ if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
+ xpt_freeze_simq(sassc->sim, 1);
+ sassc->flags |= MPRSAS_QUEUE_FROZEN;
+ }
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ xpt_done(ccb);
+ return;
+ }
+
+ req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
+ bzero(req, sizeof(*req));
+ req->DevHandle = htole16(targ->handle);
+ req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ req->MsgFlags = 0;
+ req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
+ req->SenseBufferLength = MPR_SENSE_LEN;
+ req->SGLFlags = 0;
+ req->ChainOffset = 0;
+ req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
+ req->SGLOffset1= 0;
+ req->SGLOffset2= 0;
+ req->SGLOffset3= 0;
+ req->SkipCount = 0;
+ req->DataLength = htole32(csio->dxfer_len);
+ req->BidirectionalDataLength = 0;
+ req->IoFlags = htole16(csio->cdb_len);
+ req->EEDPFlags = 0;
+
+ /* Note: BiDirectional transfers are not supported */
+ switch (csio->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
+ break;
+ case CAM_DIR_OUT:
+ mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+ cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
+ break;
+ case CAM_DIR_NONE:
+ default:
+ mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
+ break;
+ }
+
+ if (csio->cdb_len == 32)
+ mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
+ /*
+ * It looks like the hardware doesn't require an explicit tag
+ * number for each transaction. SAM Task Management not supported
+ * at the moment.
+ */
+ switch (csio->tag_action) {
+ case MSG_HEAD_OF_Q_TAG:
+ mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
+ break;
+ case MSG_ORDERED_Q_TAG:
+ mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
+ break;
+ case MSG_ACA_TASK:
+ mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
+ break;
+ case CAM_TAG_ACTION_NONE:
+ case MSG_SIMPLE_Q_TAG:
+ default:
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ break;
+ }
+ mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
+ req->Control = htole32(mpi_control);
+
+ if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
+ mpr_free_command(sc, cm);
+ ccb->ccb_h.status = CAM_LUN_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+
+ if (csio->ccb_h.flags & CAM_CDB_POINTER)
+ bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
+ else
+ bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
+ req->IoFlags = htole16(csio->cdb_len);
+
+ /*
+ * Check if EEDP is supported and enabled. If it is then check if the
+ * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
+ * is formatted for EEDP support. If all of this is true, set CDB up
+ * for EEDP transfer.
+ */
+ eedp_flags = op_code_prot[req->CDB.CDB32[0]];
+ if (sc->eedp_enabled && eedp_flags) {
+ SLIST_FOREACH(lun, &targ->luns, lun_link) {
+ if (lun->lun_id == csio->ccb_h.target_lun) {
+ break;
+ }
+ }
+
+ if ((lun != NULL) && (lun->eedp_formatted)) {
+ req->EEDPBlockSize = htole16(lun->eedp_block_size);
+ eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
+ req->EEDPFlags = htole16(eedp_flags);
+
+ /*
+ * If CDB less than 32, fill in Primary Ref Tag with
+ * low 4 bytes of LBA. If CDB is 32, tag stuff is
+ * already there. Also, set protection bit. FreeBSD
+ * currently does not support CDBs bigger than 16, but
+ * the code doesn't hurt, and will be here for the
+ * future.
+ */
+ if (csio->cdb_len != 32) {
+ lba_byte = (csio->cdb_len == 16) ? 6 : 2;
+ ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
+ PrimaryReferenceTag;
+ for (i = 0; i < 4; i++) {
+ *ref_tag_addr =
+ req->CDB.CDB32[lba_byte + i];
+ ref_tag_addr++;
+ }
+ req->CDB.EEDP32.PrimaryReferenceTag =
+ htole32(req->
+ CDB.EEDP32.PrimaryReferenceTag);
+ req->CDB.EEDP32.PrimaryApplicationTagMask =
+ 0xFFFF;
+ req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
+ 0x20;
+ } else {
+ eedp_flags |=
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
+ req->EEDPFlags = htole16(eedp_flags);
+ req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
+ 0x1F) | 0x20;
+ }
+ }
+ }
+
+ cm->cm_length = csio->dxfer_len;
+ if (cm->cm_length != 0) {
+ cm->cm_data = ccb;
+ cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
+ } else {
+ cm->cm_data = NULL;
+ }
+ cm->cm_sge = &req->SGL;
+ cm->cm_sglsize = (32 - 24) * 4;
+ cm->cm_complete = mprsas_scsiio_complete;
+ cm->cm_complete_data = ccb;
+ cm->cm_targ = targ;
+ cm->cm_lun = csio->ccb_h.target_lun;
+ cm->cm_ccb = ccb;
+ /*
+ * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
+ * and set descriptor type.
+ */
+ if (targ->scsi_req_desc_type ==
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
+ req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
+ cm->cm_desc.FastPathSCSIIO.RequestFlags =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
+ } else {
+ cm->cm_desc.SCSIIO.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
+ }
+
+ callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
+ mprsas_scsiio_timeout, cm);
+
+ targ->issued++;
+ targ->outstanding++;
+ TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
+
+ mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
+ __func__, cm, ccb, targ->outstanding);
+
+ mpr_map_command(sc, cm);
+ return;
+}
+
+static void
+mpr_response_code(struct mpr_softc *sc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
+ desc);
+}
+
+/**
+ * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
+ */
+static void
+mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
+ Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
+{
+ u32 response_info;
+ u8 *response_bytes;
+ u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ u8 scsi_state = mpi_reply->SCSIState;
+ u8 scsi_status = mpi_reply->SCSIStatus;
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = sc->tmp_string;
+ u32 log_info = le32toh(mpi_reply->IOCLogInfo);
+
+ if (log_info == 0x31170000)
+ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+ desc_ioc_state = "success";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc_ioc_state = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ desc_ioc_state = "scsi recovered error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ desc_ioc_state = "scsi invalid dev handle";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc_ioc_state = "scsi device not there";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc_ioc_state = "scsi data overrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc_ioc_state = "scsi data underrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc_ioc_state = "scsi io data error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc_ioc_state = "scsi protocol error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc_ioc_state = "scsi task terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc_ioc_state = "scsi residual mismatch";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc_ioc_state = "scsi task mgmt failed";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc_ioc_state = "scsi ioc terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc_ioc_state = "scsi ext terminated";
+ break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
+ default:
+ desc_ioc_state = "unknown";
+ break;
+ }
+
+ switch (scsi_status) {
+ case MPI2_SCSI_STATUS_GOOD:
+ desc_scsi_status = "good";
+ break;
+ case MPI2_SCSI_STATUS_CHECK_CONDITION:
+ desc_scsi_status = "check condition";
+ break;
+ case MPI2_SCSI_STATUS_CONDITION_MET:
+ desc_scsi_status = "condition met";
+ break;
+ case MPI2_SCSI_STATUS_BUSY:
+ desc_scsi_status = "busy";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE:
+ desc_scsi_status = "intermediate";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc_scsi_status = "intermediate condmet";
+ break;
+ case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc_scsi_status = "reservation conflict";
+ break;
+ case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
+ desc_scsi_status = "command terminated";
+ break;
+ case MPI2_SCSI_STATUS_TASK_SET_FULL:
+ desc_scsi_status = "task set full";
+ break;
+ case MPI2_SCSI_STATUS_ACA_ACTIVE:
+ desc_scsi_status = "aca active";
+ break;
+ case MPI2_SCSI_STATUS_TASK_ABORTED:
+ desc_scsi_status = "task aborted";
+ break;
+ default:
+ desc_scsi_status = "unknown";
+ break;
+ }
+
+ desc_scsi_state[0] = '\0';
+ if (!scsi_state)
+ desc_scsi_state = " ";
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ strcat(desc_scsi_state, "response info ");
+ if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ strcat(desc_scsi_state, "state terminated ");
+ if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
+ strcat(desc_scsi_state, "no status ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
+ strcat(desc_scsi_state, "autosense failed ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
+ strcat(desc_scsi_state, "autosense valid ");
+
+ mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
+ le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
+ if (targ->encl_level_valid) {
+ mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
+ "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
+ targ->connector_name);
+ }
+ /* We can add more detail about underflow data here
+ * TO-DO
+ * */
+ mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
+ "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
+ desc_scsi_state, scsi_state);
+
+ if (sc->mpr_debug & MPR_XINFO &&
+ scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
+ scsi_sense_print(csio);
+ mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
+ response_info = le32toh(mpi_reply->ResponseInfo);
+ response_bytes = (u8 *)&response_info;
+ mpr_response_code(sc,response_bytes[0]);
+ }
+}
+
+static void
+mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ MPI2_SCSI_IO_REPLY *rep;
+ union ccb *ccb;
+ struct ccb_scsiio *csio;
+ struct mprsas_softc *sassc;
+ struct scsi_vpd_supported_page_list *vpd_list = NULL;
+ u8 *TLR_bits, TLR_on;
+ int dir = 0, i;
+ u16 alloc_len;
+
+ MPR_FUNCTRACE(sc);
+ mpr_dprint(sc, MPR_TRACE,
+ "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
+ cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
+ cm->cm_targ->outstanding);
+
+ callout_stop(&cm->cm_callout);
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ sassc = sc->sassc;
+ ccb = cm->cm_complete_data;
+ csio = &ccb->csio;
+ rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
+ /*
+ * XXX KDM if the chain allocation fails, does it matter if we do
+ * the sync and unload here? It is simpler to do it in every case,
+ * assuming it doesn't cause problems.
+ */
+ if (cm->cm_data != NULL) {
+ if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
+ dir = BUS_DMASYNC_POSTREAD;
+ else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
+ dir = BUS_DMASYNC_POSTWRITE;
+ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
+ bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
+ }
+
+ cm->cm_targ->completed++;
+ cm->cm_targ->outstanding--;
+ TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
+ ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
+
+ if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
+ TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
+ if (cm->cm_reply != NULL)
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "completed timedout cm %p ccb %p during recovery "
+ "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
+ le16toh(rep->IOCStatus), rep->SCSIStatus,
+ rep->SCSIState, le32toh(rep->TransferCount));
+ else
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "completed timedout cm %p ccb %p during recovery\n",
+ cm, cm->cm_ccb);
+ } else if (cm->cm_targ->tm != NULL) {
+ if (cm->cm_reply != NULL)
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "completed cm %p ccb %p during recovery "
+ "ioc %x scsi %x state %x xfer %u\n",
+ cm, cm->cm_ccb, le16toh(rep->IOCStatus),
+ rep->SCSIStatus, rep->SCSIState,
+ le32toh(rep->TransferCount));
+ else
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "completed cm %p ccb %p during recovery\n",
+ cm, cm->cm_ccb);
+ } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
+ mprsas_log_command(cm, MPR_RECOVERY,
+ "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
+ }
+
+ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ /*
+ * We ran into an error after we tried to map the command,
+ * so we're getting a callback without queueing the command
+ * to the hardware. So we set the status here, and it will
+ * be retained below. We'll go through the "fast path",
+ * because there can be no reply when we haven't actually
+ * gone out to the hardware.
+ */
+ ccb->ccb_h.status = CAM_REQUEUE_REQ;
+
+ /*
+ * Currently the only error included in the mask is
+ * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
+ * chain frames. We need to freeze the queue until we get
+ * a command that completed without this error, which will
+ * hopefully have some chain frames attached that we can
+ * use. If we wanted to get smarter about it, we would
+ * only unfreeze the queue in this condition when we're
+ * sure that we're getting some chain frames back. That's
+ * probably unnecessary.
+ */
+ if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
+ xpt_freeze_simq(sassc->sim, 1);
+ sassc->flags |= MPRSAS_QUEUE_FROZEN;
+ mpr_dprint(sc, MPR_INFO, "Error sending command, "
+ "freezing SIM queue\n");
+ }
+ }
+
+ /*
+ * If this is a Start Stop Unit command and it was issued by the driver
+ * during shutdown, decrement the refcount to account for all of the
+ * commands that were sent. All SSU commands should be completed before
+ * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
+ * is TRUE.
+ */
+ if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
+ mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
+ sc->SSU_refcount--;
+ }
+
+ /* Take the fast path to completion */
+ if (cm->cm_reply == NULL) {
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
+ if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
+ ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+ else {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ ccb->csio.scsi_status = SCSI_STATUS_OK;
+ }
+ if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
+ sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
+ mpr_dprint(sc, MPR_XINFO,
+ "Unfreezing SIM queue\n");
+ }
+ }
+
+ /*
+ * There are two scenarios where the status won't be
+ * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
+ * set, the second is in the MPR_FLAGS_DIAGRESET above.
+ */
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ /*
+ * Freeze the dev queue so that commands are
+ * executed in the correct order with after error
+ * recovery.
+ */
+ ccb->ccb_h.status |= CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
+ }
+ mpr_free_command(sc, cm);
+ xpt_done(ccb);
+ return;
+ }
+
+ mprsas_log_command(cm, MPR_XINFO,
+ "ioc %x scsi %x state %x xfer %u\n",
+ le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
+ le32toh(rep->TransferCount));
+
+ switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ csio->resid = cm->cm_length - le32toh(rep->TransferCount);
+ /* FALLTHROUGH */
+ case MPI2_IOCSTATUS_SUCCESS:
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+
+ if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
+ MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
+ mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
+
+ /* Completion failed at the transport level. */
+ if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
+ MPI2_SCSI_STATE_TERMINATED)) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ break;
+ }
+
+ /* In a modern packetized environment, an autosense failure
+ * implies that there's not much else that can be done to
+ * recover the command.
+ */
+ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
+ ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
+ break;
+ }
+
+ /*
+ * CAM doesn't care about SAS Response Info data, but if this is
+ * the state check if TLR should be done. If not, clear the
+ * TLR_bits for the target.
+ */
+ if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
+ ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
+ == MPR_SCSI_RI_INVALID_FRAME)) {
+ sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
+ (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
+ }
+
+ /*
+ * Intentionally override the normal SCSI status reporting
+ * for these two cases. These are likely to happen in a
+ * multi-initiator environment, and we want to make sure that
+ * CAM retries these commands rather than fail them.
+ */
+ if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
+ (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ break;
+ }
+
+ /* Handle normal status and sense */
+ csio->scsi_status = rep->SCSIStatus;
+ if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ else
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+
+ if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ int sense_len, returned_sense_len;
+
+ returned_sense_len = min(le32toh(rep->SenseCount),
+ sizeof(struct scsi_sense_data));
+ if (returned_sense_len < csio->sense_len)
+ csio->sense_resid = csio->sense_len -
+ returned_sense_len;
+ else
+ csio->sense_resid = 0;
+
+ sense_len = min(returned_sense_len,
+ csio->sense_len - csio->sense_resid);
+ bzero(&csio->sense_data, sizeof(csio->sense_data));
+ bcopy(cm->cm_sense, &csio->sense_data, sense_len);
+ ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
+ }
+
+ /*
+ * Check if this is an INQUIRY command. If it's a VPD inquiry,
+ * and it's page code 0 (Supported Page List), and there is
+ * inquiry data, and this is for a sequential access device, and
+ * the device is an SSP target, and TLR is supported by the
+ * controller, turn the TLR_bits value ON if page 0x90 is
+ * supported.
+ */
+ if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
+ (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
+ (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
+ ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
+ (csio->data_ptr != NULL) &&
+ ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
+ (sc->control_TLR) &&
+ (sc->mapping_table[csio->ccb_h.target_id].device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
+ vpd_list = (struct scsi_vpd_supported_page_list *)
+ csio->data_ptr;
+ TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
+ TLR_bits;
+ *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
+ TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
+ alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
+ csio->cdb_io.cdb_bytes[4];
+ alloc_len -= csio->resid;
+ for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
+ if (vpd_list->list[i] == 0x90) {
+ *TLR_bits = TLR_on;
+ break;
+ }
+ }
+ }
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ /*
+ * If devinfo is 0 this will be a volume. In that case don't
+ * tell CAM that the volume is not there. We want volumes to
+ * be enumerated until they are deleted/removed, not just
+ * failed.
+ */
+ if (cm->cm_targ->devinfo == 0)
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ else
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ mpr_print_scsiio_cmd(sc, cm);
+ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ /*
+ * This is one of the responses that comes back when an I/O
+ * has been aborted. If it is because of a timeout that we
+ * initiated, just set the status to CAM_CMD_TIMEOUT.
+ * Otherwise set it to CAM_REQ_ABORTED. The effect on the
+ * command is the same (it gets retried, subject to the
+ * retry counter), the only difference is what gets printed
+ * on the console.
+ */
+ if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
+ ccb->ccb_h.status = CAM_CMD_TIMEOUT;
+ else
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ /* resid is ignored for this condition */
+ csio->resid = 0;
+ ccb->ccb_h.status = CAM_DATA_RUN_ERR;
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ /*
+ * Since these are generally external (i.e. hopefully
+ * transient transport-related) errors, retry these without
+ * decrementing the retry count.
+ */
+ ccb->ccb_h.status = CAM_REQUEUE_REQ;
+ mprsas_log_command(cm, MPR_INFO,
+ "terminated ioc %x scsi %x state %x xfer %u\n",
+ le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
+ le32toh(rep->TransferCount));
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_VPID:
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ mprsas_log_command(cm, MPR_XINFO,
+ "completed ioc %x scsi %x state %x xfer %u\n",
+ le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
+ le32toh(rep->TransferCount));
+ csio->resid = cm->cm_length;
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ break;
+ }
+
+ mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
+
+ if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
+ sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
+ mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
+ "queue\n");
+ }
+
+ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
+ ccb->ccb_h.status |= CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
+ }
+
+ mpr_free_command(sc, cm);
+ xpt_done(ccb);
+}
+
+#if __FreeBSD_version >= 900026
+static void
+mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ MPI2_SMP_PASSTHROUGH_REPLY *rpl;
+ MPI2_SMP_PASSTHROUGH_REQUEST *req;
+ uint64_t sasaddr;
+ union ccb *ccb;
+
+ ccb = cm->cm_complete_data;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and SMP
+ * commands require two S/G elements only. That should be handled
+ * in the standard request size.
+ */
+ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
+ __func__, cm->cm_flags);
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ goto bailout;
+ }
+
+ rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
+ if (rpl == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ goto bailout;
+ }
+
+ req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
+ sasaddr = le32toh(req->SASAddress.Low);
+ sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
+
+ if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
+ MPI2_IOCSTATUS_SUCCESS ||
+ rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
+ mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
+ __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ goto bailout;
+ }
+
+ mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
+ "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
+
+ if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ else
+ ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
+
+bailout:
+ /*
+ * We sync in both directions because we had DMAs in the S/G list
+ * in both directions.
+ */
+ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
+ mpr_free_command(sc, cm);
+ xpt_done(ccb);
+}
+
+static void
+mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
+ uint64_t sasaddr)
+{
+ struct mpr_command *cm;
+ uint8_t *request, *response;
+ MPI2_SMP_PASSTHROUGH_REQUEST *req;
+ struct mpr_softc *sc;
+ struct sglist *sg;
+ int error;
+
+ sc = sassc->sc;
+ sg = NULL;
+ error = 0;
+
+#if (__FreeBSD_version >= 1000028) || \
+ ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
+ switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
+ case CAM_DATA_PADDR:
+ case CAM_DATA_SG_PADDR:
+ /*
+ * XXX We don't yet support physical addresses here.
+ */
+ mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
+ "supported\n", __func__);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ case CAM_DATA_SG:
+ /*
+ * The chip does not support more than one buffer for the
+ * request or response.
+ */
+ if ((ccb->smpio.smp_request_sglist_cnt > 1)
+ || (ccb->smpio.smp_response_sglist_cnt > 1)) {
+ mpr_dprint(sc, MPR_ERROR,
+ "%s: multiple request or response buffer segments "
+ "not supported for SMP\n", __func__);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+
+ /*
+ * The CAM_SCATTER_VALID flag was originally implemented
+ * for the XPT_SCSI_IO CCB, which only has one data pointer.
+ * We have two. So, just take that flag to mean that we
+ * might have S/G lists, and look at the S/G segment count
+ * to figure out whether that is the case for each individual
+ * buffer.
+ */
+ if (ccb->smpio.smp_request_sglist_cnt != 0) {
+ bus_dma_segment_t *req_sg;
+
+ req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
+ request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
+ } else
+ request = ccb->smpio.smp_request;
+
+ if (ccb->smpio.smp_response_sglist_cnt != 0) {
+ bus_dma_segment_t *rsp_sg;
+
+ rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
+ response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
+ } else
+ response = ccb->smpio.smp_response;
+ break;
+ case CAM_DATA_VADDR:
+ request = ccb->smpio.smp_request;
+ response = ccb->smpio.smp_response;
+ break;
+ default:
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+#else /* __FreeBSD_version < 1000028 */
+ /*
+ * XXX We don't yet support physical addresses here.
+ */
+ if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
+ mpr_printf(sc, "%s: physical addresses not supported\n",
+ __func__);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+
+ /*
+ * If the user wants to send an S/G list, check to make sure they
+ * have single buffers.
+ */
+ if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
+ /*
+ * The chip does not support more than one buffer for the
+ * request or response.
+ */
+ if ((ccb->smpio.smp_request_sglist_cnt > 1)
+ || (ccb->smpio.smp_response_sglist_cnt > 1)) {
+ mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
+ "response buffer segments not supported for SMP\n",
+ __func__);
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+
+ /*
+ * The CAM_SCATTER_VALID flag was originally implemented
+ * for the XPT_SCSI_IO CCB, which only has one data pointer.
+ * We have two. So, just take that flag to mean that we
+ * might have S/G lists, and look at the S/G segment count
+ * to figure out whether that is the case for each individual
+ * buffer.
+ */
+ if (ccb->smpio.smp_request_sglist_cnt != 0) {
+ bus_dma_segment_t *req_sg;
+
+ req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
+ request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
+ } else
+ request = ccb->smpio.smp_request;
+
+ if (ccb->smpio.smp_response_sglist_cnt != 0) {
+ bus_dma_segment_t *rsp_sg;
+
+ rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
+ response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
+ } else
+ response = ccb->smpio.smp_response;
+ } else {
+ request = ccb->smpio.smp_request;
+ response = ccb->smpio.smp_response;
+ }
+#endif /* __FreeBSD_version < 1000028 */
+
+ cm = mpr_alloc_command(sc);
+ if (cm == NULL) {
+ mpr_dprint(sc, MPR_ERROR,
+ "%s: cannot allocate command\n", __func__);
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ xpt_done(ccb);
+ return;
+ }
+
+ req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
+ bzero(req, sizeof(*req));
+ req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+
+ /* Allow the chip to use any route to this SAS address. */
+ req->PhysicalPort = 0xff;
+
+ req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
+ req->SGLFlags =
+ MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
+
+ mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
+ "%#jx\n", __func__, (uintmax_t)sasaddr);
+
+ mpr_init_sge(cm, req, &req->SGL);
+
+ /*
+ * Set up a uio to pass into mpr_map_command(). This allows us to
+ * do one map command, and one busdma call in there.
+ */
+ cm->cm_uio.uio_iov = cm->cm_iovec;
+ cm->cm_uio.uio_iovcnt = 2;
+ cm->cm_uio.uio_segflg = UIO_SYSSPACE;
+
+ /*
+ * The read/write flag isn't used by busdma, but set it just in
+ * case. This isn't exactly accurate, either, since we're going in
+ * both directions.
+ */
+ cm->cm_uio.uio_rw = UIO_WRITE;
+
+ cm->cm_iovec[0].iov_base = request;
+ cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
+ cm->cm_iovec[1].iov_base = response;
+ cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
+
+ cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
+ cm->cm_iovec[1].iov_len;
+
+ /*
+ * Trigger a warning message in mpr_data_cb() for the user if we
+ * wind up exceeding two S/G segments. The chip expects one
+ * segment for the request and another for the response.
+ */
+ cm->cm_max_segs = 2;
+
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete = mprsas_smpio_complete;
+ cm->cm_complete_data = ccb;
+
+ /*
+ * Tell the mapping code that we're using a uio, and that this is
+ * an SMP passthrough request. There is a little special-case
+ * logic there (in mpr_data_cb()) to handle the bidirectional
+ * transfer.
+ */
+ cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
+ MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
+
+ /* The chip data format is little endian. */
+ req->SASAddress.High = htole32(sasaddr >> 32);
+ req->SASAddress.Low = htole32(sasaddr);
+
+ /*
+ * XXX Note that we don't have a timeout/abort mechanism here.
+ * From the manual, it looks like task management requests only
+ * work for SCSI IO and SATA passthrough requests. We may need to
+ * have a mechanism to retry requests in the event of a chip reset
+ * at least. Hopefully the chip will insure that any errors short
+ * of that are relayed back to the driver.
+ */
+ error = mpr_map_command(sc, cm);
+ if ((error != 0) && (error != EINPROGRESS)) {
+ mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
+ "mpr_map_command()\n", __func__, error);
+ goto bailout_error;
+ }
+
+ return;
+
+bailout_error:
+ mpr_free_command(sc, cm);
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ xpt_done(ccb);
+ return;
+}
+
+static void
+mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
+{
+ struct mpr_softc *sc;
+ struct mprsas_target *targ;
+ uint64_t sasaddr = 0;
+
+ sc = sassc->sc;
+
+ /*
+ * Make sure the target exists.
+ */
+ KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
+ ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
+ targ = &sassc->targets[ccb->ccb_h.target_id];
+ if (targ->handle == 0x0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
+ __func__, ccb->ccb_h.target_id);
+ ccb->ccb_h.status = CAM_SEL_TIMEOUT;
+ xpt_done(ccb);
+ return;
+ }
+
+ /*
+ * If this device has an embedded SMP target, we'll talk to it
+ * directly.
+ * figure out what the expander's address is.
+ */
+ if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
+ sasaddr = targ->sasaddr;
+
+ /*
+ * If we don't have a SAS address for the expander yet, try
+ * grabbing it from the page 0x83 information cached in the
+ * transport layer for this target. LSI expanders report the
+ * expander SAS address as the port-associated SAS address in
+ * Inquiry VPD page 0x83. Maxim expanders don't report it in page
+ * 0x83.
+ *
+ * XXX KDM disable this for now, but leave it commented out so that
+ * it is obvious that this is another possible way to get the SAS
+ * address.
+ *
+ * The parent handle method below is a little more reliable, and
+ * the other benefit is that it works for devices other than SES
+ * devices. So you can send a SMP request to a da(4) device and it
+ * will get routed to the expander that device is attached to.
+ * (Assuming the da(4) device doesn't contain an SMP target...)
+ */
+#if 0
+ if (sasaddr == 0)
+ sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
+#endif
+
+ /*
+ * If we still don't have a SAS address for the expander, look for
+ * the parent device of this device, which is probably the expander.
+ */
+ if (sasaddr == 0) {
+#ifdef OLD_MPR_PROBE
+ struct mprsas_target *parent_target;
+#endif
+
+ if (targ->parent_handle == 0x0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
+ "a valid parent handle!\n", __func__, targ->handle);
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ goto bailout;
+ }
+#ifdef OLD_MPR_PROBE
+ parent_target = mprsas_find_target_by_handle(sassc, 0,
+ targ->parent_handle);
+
+ if (parent_target == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
+ "a valid parent target!\n", __func__, targ->handle);
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ goto bailout;
+ }
+
+ if ((parent_target->devinfo &
+ MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
+ "does not have an SMP target!\n", __func__,
+ targ->handle, parent_target->handle);
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ goto bailout;
+
+ }
+
+ sasaddr = parent_target->sasaddr;
+#else /* OLD_MPR_PROBE */
+ if ((targ->parent_devinfo &
+ MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
+ "does not have an SMP target!\n", __func__,
+ targ->handle, targ->parent_handle);
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ goto bailout;
+
+ }
+ if (targ->parent_sasaddr == 0x0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
+ "%d does not have a valid SAS address!\n", __func__,
+ targ->handle, targ->parent_handle);
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ goto bailout;
+ }
+
+ sasaddr = targ->parent_sasaddr;
+#endif /* OLD_MPR_PROBE */
+
+ }
+
+ if (sasaddr == 0) {
+ mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
+ "handle %d\n", __func__, targ->handle);
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ goto bailout;
+ }
+ mprsas_send_smpcmd(sassc, ccb, sasaddr);
+
+ return;
+
+bailout:
+ xpt_done(ccb);
+
+}
+#endif //__FreeBSD_version >= 900026
+
+static void
+mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
+{
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+ struct mpr_softc *sc;
+ struct mpr_command *tm;
+ struct mprsas_target *targ;
+
+ MPR_FUNCTRACE(sassc->sc);
+ mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
+
+ KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
+ ("Target %d out of bounds in XPT_RESET_DEV\n",
+ ccb->ccb_h.target_id));
+ sc = sassc->sc;
+ tm = mpr_alloc_command(sc);
+ if (tm == NULL) {
+ mpr_dprint(sc, MPR_ERROR,
+ "command alloc failure in mprsas_action_resetdev\n");
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ xpt_done(ccb);
+ return;
+ }
+
+ targ = &sassc->targets[ccb->ccb_h.target_id];
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+ req->DevHandle = htole16(targ->handle);
+ req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+
+ /* SAS Hard Link Reset / SATA Link Reset */
+ req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
+ tm->cm_data = NULL;
+ tm->cm_desc.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ tm->cm_complete = mprsas_resetdev_complete;
+ tm->cm_complete_data = ccb;
+ tm->cm_targ = targ;
+ mpr_map_command(sc, tm);
+}
+
+static void
+mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
+{
+ MPI2_SCSI_TASK_MANAGE_REPLY *resp;
+ union ccb *ccb;
+
+ MPR_FUNCTRACE(sc);
+ mtx_assert(&sc->mpr_mtx, MA_OWNED);
+
+ resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
+ ccb = tm->cm_complete_data;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * task management commands don't have S/G lists.
+ */
+ if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ MPI2_SCSI_TASK_MANAGE_REQUEST *req;
+
+ req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
+
+ mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
+ "handle %#04x! This should not happen!\n", __func__,
+ tm->cm_flags, req->DevHandle);
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ goto bailout;
+ }
+
+ mpr_dprint(sc, MPR_XINFO,
+ "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
+ le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
+
+ if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
+ CAM_LUN_WILDCARD);
+ }
+ else
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+
+bailout:
+
+ mprsas_free_tm(sc, tm);
+ xpt_done(ccb);
+}
+
+static void
+mprsas_poll(struct cam_sim *sim)
+{
+ struct mprsas_softc *sassc;
+
+ sassc = cam_sim_softc(sim);
+
+ if (sassc->sc->mpr_debug & MPR_TRACE) {
+ /* frequent debug messages during a panic just slow
+ * everything down too much.
+ */
+ mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
+ sassc->sc->mpr_debug &= ~MPR_TRACE;
+ }
+
+ mpr_intr_locked(sassc->sc);
+}
+
+static void
+mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
+ void *arg)
+{
+ struct mpr_softc *sc;
+
+ sc = (struct mpr_softc *)callback_arg;
+
+ switch (code) {
+#if (__FreeBSD_version >= 1000006) || \
+ ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
+ case AC_ADVINFO_CHANGED: {
+ struct mprsas_target *target;
+ struct mprsas_softc *sassc;
+ struct scsi_read_capacity_data_long rcap_buf;
+ struct ccb_dev_advinfo cdai;
+ struct mprsas_lun *lun;
+ lun_id_t lunid;
+ int found_lun;
+ uintptr_t buftype;
+
+ buftype = (uintptr_t)arg;
+
+ found_lun = 0;
+ sassc = sc->sassc;
+
+ /*
+ * We're only interested in read capacity data changes.
+ */
+ if (buftype != CDAI_TYPE_RCAPLONG)
+ break;
+
+ /*
+ * See the comment in mpr_attach_sas() for a detailed
+ * explanation. In these versions of FreeBSD we register
+ * for all events and filter out the events that don't
+ * apply to us.
+ */
+#if (__FreeBSD_version < 1000703) || \
+ ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
+ if (xpt_path_path_id(path) != sassc->sim->path_id)
+ break;
+#endif
+
+ /*
+ * We should have a handle for this, but check to make sure.
+ */
+ KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
+ ("Target %d out of bounds in mprsas_async\n",
+ xpt_path_target_id(path)));
+ target = &sassc->targets[xpt_path_target_id(path)];
+ if (target->handle == 0)
+ break;
+
+ lunid = xpt_path_lun_id(path);
+
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id == lunid) {
+ found_lun = 1;
+ break;
+ }
+ }
+
+ if (found_lun == 0) {
+ lun = malloc(sizeof(struct mprsas_lun), M_MPR,
+ M_NOWAIT | M_ZERO);
+ if (lun == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
+ "LUN for EEDP support.\n");
+ break;
+ }
+ lun->lun_id = lunid;
+ SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
+ }
+
+ bzero(&rcap_buf, sizeof(rcap_buf));
+ xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
+ cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
+ cdai.ccb_h.flags = CAM_DIR_IN;
+ cdai.buftype = CDAI_TYPE_RCAPLONG;
+ cdai.flags = 0;
+ cdai.bufsiz = sizeof(rcap_buf);
+ cdai.buf = (uint8_t *)&rcap_buf;
+ xpt_action((union ccb *)&cdai);
+ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
+ cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
+
+ if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
+ && (rcap_buf.prot & SRC16_PROT_EN)) {
+ lun->eedp_formatted = TRUE;
+ lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
+ } else {
+ lun->eedp_formatted = FALSE;
+ lun->eedp_block_size = 0;
+ }
+ break;
+ }
+#endif
+ case AC_FOUND_DEVICE: {
+ struct ccb_getdev *cgd;
+
+ /*
+ * See the comment in mpr_attach_sas() for a detailed
+ * explanation. In these versions of FreeBSD we register
+ * for all events and filter out the events that don't
+ * apply to us.
+ */
+#if (__FreeBSD_version < 1000703) || \
+ ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
+ if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
+ break;
+#endif
+
+ cgd = arg;
+ mprsas_prepare_ssu(sc, path, cgd);
+
+#if (__FreeBSD_version < 901503) || \
+ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
+ mprsas_check_eedp(sc, path, cgd);
+#endif
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void
+mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
+ struct ccb_getdev *cgd)
+{
+ struct mprsas_softc *sassc = sc->sassc;
+ path_id_t pathid;
+ target_id_t targetid;
+ lun_id_t lunid;
+ struct mprsas_target *target;
+ struct mprsas_lun *lun;
+ uint8_t found_lun;
+
+ sassc = sc->sassc;
+ pathid = cam_sim_path(sassc->sim);
+ targetid = xpt_path_target_id(path);
+ lunid = xpt_path_lun_id(path);
+
+ KASSERT(targetid < sassc->maxtargets,
+ ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
+ target = &sassc->targets[targetid];
+ if (target->handle == 0x0)
+ return;
+
+ /*
+ * If LUN is already in list, don't create a new one.
+ */
+ found_lun = FALSE;
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id == lunid) {
+ found_lun = TRUE;
+ break;
+ }
+ }
+ if (!found_lun) {
+ lun = malloc(sizeof(struct mprsas_lun), M_MPR,
+ M_NOWAIT | M_ZERO);
+ if (lun == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
+ "preparing SSU.\n");
+ return;
+ }
+ lun->lun_id = lunid;
+ SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
+ }
+
+ /*
+ * If this is a SATA direct-access end device, mark it so that a SCSI
+ * StartStopUnit command will be sent to it when the driver is being
+ * shutdown.
+ */
+ if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
+ (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
+ ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
+ MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
+ lun->stop_at_shutdown = TRUE;
+ }
+}
+
+#if (__FreeBSD_version < 901503) || \
+ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
+static void
+mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
+ struct ccb_getdev *cgd)
+{
+ struct mprsas_softc *sassc = sc->sassc;
+ struct ccb_scsiio *csio;
+ struct scsi_read_capacity_16 *scsi_cmd;
+ struct scsi_read_capacity_eedp *rcap_buf;
+ path_id_t pathid;
+ target_id_t targetid;
+ lun_id_t lunid;
+ union ccb *ccb;
+ struct cam_path *local_path;
+ struct mprsas_target *target;
+ struct mprsas_lun *lun;
+ uint8_t found_lun;
+ char path_str[64];
+
+ sassc = sc->sassc;
+ pathid = cam_sim_path(sassc->sim);
+ targetid = xpt_path_target_id(path);
+ lunid = xpt_path_lun_id(path);
+
+ KASSERT(targetid < sassc->maxtargets,
+ ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
+ target = &sassc->targets[targetid];
+ if (target->handle == 0x0)
+ return;
+
+ /*
+ * Determine if the device is EEDP capable.
+ *
+ * If this flag is set in the inquiry data, the device supports
+ * protection information, and must support the 16 byte read capacity
+ * command, otherwise continue without sending read cap 16
+ */
+ if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
+ return;
+
+ /*
+ * Issue a READ CAPACITY 16 command. This info is used to determine if
+ * the LUN is formatted for EEDP support.
+ */
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
+ "support.\n");
+ return;
+ }
+
+ if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
+ != CAM_REQ_CMP) {
+ mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
+ "support\n");
+ xpt_free_ccb(ccb);
+ return;
+ }
+
+ /*
+ * If LUN is already in list, don't create a new one.
+ */
+ found_lun = FALSE;
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id == lunid) {
+ found_lun = TRUE;
+ break;
+ }
+ }
+ if (!found_lun) {
+ lun = malloc(sizeof(struct mprsas_lun), M_MPR,
+ M_NOWAIT | M_ZERO);
+ if (lun == NULL) {
+ mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
+ "EEDP support.\n");
+ xpt_free_path(local_path);
+ xpt_free_ccb(ccb);
+ return;
+ }
+ lun->lun_id = lunid;
+ SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
+ }
+
+ xpt_path_string(local_path, path_str, sizeof(path_str));
+ mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
+ path_str, target->handle);
+
+ /*
+ * Issue a READ CAPACITY 16 command for the LUN. The
+ * mprsas_read_cap_done function will load the read cap info into the
+ * LUN struct.
+ */
+ rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
+ M_NOWAIT | M_ZERO);
+ if (rcap_buf == NULL) {
+ mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
+ "buffer for EEDP support.\n");
+ xpt_free_path(ccb->ccb_h.path);
+ xpt_free_ccb(ccb);
+ return;
+ }
+ xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
+ csio = &ccb->csio;
+ csio->ccb_h.func_code = XPT_SCSI_IO;
+ csio->ccb_h.flags = CAM_DIR_IN;
+ csio->ccb_h.retry_count = 4;
+ csio->ccb_h.cbfcnp = mprsas_read_cap_done;
+ csio->ccb_h.timeout = 60000;
+ csio->data_ptr = (uint8_t *)rcap_buf;
+ csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
+ csio->sense_len = MPR_SENSE_LEN;
+ csio->cdb_len = sizeof(*scsi_cmd);
+ csio->tag_action = MSG_SIMPLE_Q_TAG;
+
+ scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
+ bzero(scsi_cmd, sizeof(*scsi_cmd));
+ scsi_cmd->opcode = 0x9E;
+ scsi_cmd->service_action = SRC16_SERVICE_ACTION;
+ ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
+
+ ccb->ccb_h.ppriv_ptr1 = sassc;
+ xpt_action(ccb);
+}
+
+static void
+mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct mprsas_softc *sassc;
+ struct mprsas_target *target;
+ struct mprsas_lun *lun;
+ struct scsi_read_capacity_eedp *rcap_buf;
+
+ if (done_ccb == NULL)
+ return;
+
+ /* Driver need to release devq, it Scsi command is
+ * generated by driver internally.
+ * Currently there is a single place where driver
+ * calls scsi command internally. In future if driver
+ * calls more scsi command internally, it needs to release
+ * devq internally, since those command will not go back to
+ * cam_periph.
+ */
+ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
+ done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
+ xpt_release_devq(done_ccb->ccb_h.path,
+ /*count*/ 1, /*run_queue*/TRUE);
+ }
+
+ rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
+
+ /*
+ * Get the LUN ID for the path and look it up in the LUN list for the
+ * target.
+ */
+ sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
+ KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
+ ("Target %d out of bounds in mprsas_read_cap_done\n",
+ done_ccb->ccb_h.target_id));
+ target = &sassc->targets[done_ccb->ccb_h.target_id];
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ if (lun->lun_id != done_ccb->ccb_h.target_lun)
+ continue;
+
+ /*
+ * Got the LUN in the target's LUN list. Fill it in with EEDP
+ * info. If the READ CAP 16 command had some SCSI error (common
+ * if command is not supported), mark the lun as not supporting
+ * EEDP and set the block size to 0.
+ */
+ if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
+ || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
+ lun->eedp_formatted = FALSE;
+ lun->eedp_block_size = 0;
+ break;
+ }
+
+ if (rcap_buf->protect & 0x01) {
+ mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
+ "target ID %d is formatted for EEDP "
+ "support.\n", done_ccb->ccb_h.target_lun,
+ done_ccb->ccb_h.target_id);
+ lun->eedp_formatted = TRUE;
+ lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
+ }
+ break;
+ }
+
+ // Finished with this CCB and path.
+ free(rcap_buf, M_MPR);
+ xpt_free_path(done_ccb->ccb_h.path);
+ xpt_free_ccb(done_ccb);
+}
+#endif /* (__FreeBSD_version < 901503) || \
+ ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
+
+int
+mprsas_startup(struct mpr_softc *sc)
+{
+ /*
+ * Send the port enable message and set the wait_for_port_enable flag.
+ * This flag helps to keep the simq frozen until all discovery events
+ * are processed.
+ */
+ sc->wait_for_port_enable = 1;
+ mprsas_send_portenable(sc);
+ return (0);
+}
+
+static int
+mprsas_send_portenable(struct mpr_softc *sc)
+{
+ MPI2_PORT_ENABLE_REQUEST *request;
+ struct mpr_command *cm;
+
+ MPR_FUNCTRACE(sc);
+
+ if ((cm = mpr_alloc_command(sc)) == NULL)
+ return (EBUSY);
+ request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
+ request->Function = MPI2_FUNCTION_PORT_ENABLE;
+ request->MsgFlags = 0;
+ request->VP_ID = 0;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete = mprsas_portenable_complete;
+ cm->cm_data = NULL;
+ cm->cm_sge = NULL;
+
+ mpr_map_command(sc, cm);
+ mpr_dprint(sc, MPR_XINFO,
+ "mpr_send_portenable finished cm %p req %p complete %p\n",
+ cm, cm->cm_req, cm->cm_complete);
+ return (0);
+}
+
+static void
+mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ MPI2_PORT_ENABLE_REPLY *reply;
+ struct mprsas_softc *sassc;
+
+ MPR_FUNCTRACE(sc);
+ sassc = sc->sassc;
+
+ /*
+ * Currently there should be no way we can hit this case. It only
+ * happens when we have a failure to allocate chain frames, and
+ * port enable commands don't have S/G lists.
+ */
+ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
+ mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
+ "This should not happen!\n", __func__, cm->cm_flags);
+ }
+
+ reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
+ if (reply == NULL)
+ mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
+ else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
+ MPI2_IOCSTATUS_SUCCESS)
+ mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
+
+ mpr_free_command(sc, cm);
+ if (sc->mpr_ich.ich_arg != NULL) {
+ mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
+ config_intrhook_disestablish(&sc->mpr_ich);
+ sc->mpr_ich.ich_arg = NULL;
+ }
+
+ /*
+ * Done waiting for port enable to complete. Decrement the refcount.
+ * If refcount is 0, discovery is complete and a rescan of the bus can
+ * take place.
+ */
+ sc->wait_for_port_enable = 0;
+ sc->port_enable_complete = 1;
+ wakeup(&sc->port_enable_complete);
+ mprsas_startup_decrement(sassc);
+}
+
+int
+mprsas_check_id(struct mprsas_softc *sassc, int id)
+{
+ struct mpr_softc *sc = sassc->sc;
+ char *ids;
+ char *name;
+
+ ids = &sc->exclude_ids[0];
+ while((name = strsep(&ids, ",")) != NULL) {
+ if (name[0] == '\0')
+ continue;
+ if (strtol(name, NULL, 0) == (long)id)
+ return (1);
+ }
+
+ return (0);
+}
diff --git a/sys/dev/mpr/mpr_sas.h b/sys/dev/mpr/mpr_sas.h
new file mode 100644
index 0000000000000..9d3116af29d0c
--- /dev/null
+++ b/sys/dev/mpr/mpr_sas.h
@@ -0,0 +1,168 @@
+/*-
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+struct mpr_fw_event_work;
+
+struct mprsas_lun {
+ SLIST_ENTRY(mprsas_lun) lun_link;
+ lun_id_t lun_id;
+ uint8_t eedp_formatted;
+ uint32_t eedp_block_size;
+ uint8_t stop_at_shutdown;
+};
+
+struct mprsas_target {
+ uint16_t handle;
+ uint8_t linkrate;
+ uint8_t encl_level_valid;
+ uint8_t encl_level;
+ char connector_name[4];
+ uint64_t devname;
+ uint32_t devinfo;
+ uint16_t encl_handle;
+ uint16_t encl_slot;
+ uint8_t flags;
+#define MPRSAS_TARGET_INABORT (1 << 0)
+#define MPRSAS_TARGET_INRESET (1 << 1)
+#define MPRSAS_TARGET_INDIAGRESET (1 << 2)
+#define MPRSAS_TARGET_INREMOVAL (1 << 3)
+#define MPR_TARGET_FLAGS_RAID_COMPONENT (1 << 4)
+#define MPR_TARGET_FLAGS_VOLUME (1 << 5)
+#define MPRSAS_TARGET_INRECOVERY (MPRSAS_TARGET_INABORT | \
+ MPRSAS_TARGET_INRESET | MPRSAS_TARGET_INCHIPRESET)
+
+#define MPRSAS_TARGET_ADD (1 << 29)
+#define MPRSAS_TARGET_REMOVE (1 << 30)
+ uint16_t tid;
+ SLIST_HEAD(, mprsas_lun) luns;
+ TAILQ_HEAD(, mpr_command) commands;
+ struct mpr_command *tm;
+ TAILQ_HEAD(, mpr_command) timedout_commands;
+ uint16_t exp_dev_handle;
+ uint16_t phy_num;
+ uint64_t sasaddr;
+ uint16_t parent_handle;
+ uint64_t parent_sasaddr;
+ uint32_t parent_devinfo;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ TAILQ_ENTRY(mprsas_target) sysctl_link;
+ uint64_t issued;
+ uint64_t completed;
+ unsigned int outstanding;
+ unsigned int timeouts;
+ unsigned int aborts;
+ unsigned int logical_unit_resets;
+ unsigned int target_resets;
+ uint8_t scsi_req_desc_type;
+};
+
+struct mprsas_softc {
+ struct mpr_softc *sc;
+ u_int flags;
+#define MPRSAS_IN_DISCOVERY (1 << 0)
+#define MPRSAS_IN_STARTUP (1 << 1)
+#define MPRSAS_DISCOVERY_TIMEOUT_PENDING (1 << 2)
+#define MPRSAS_QUEUE_FROZEN (1 << 3)
+#define MPRSAS_SHUTDOWN (1 << 4)
+#define MPRSAS_SCANTHREAD (1 << 5)
+ u_int maxtargets;
+ struct mprsas_target *targets;
+ struct cam_devq *devq;
+ struct cam_sim *sim;
+ struct cam_path *path;
+ struct intr_config_hook sas_ich;
+ struct callout discovery_callout;
+ struct mpr_event_handle *mprsas_eh;
+
+ u_int startup_refcount;
+ u_int tm_count;
+ struct proc *sysctl_proc;
+
+ struct taskqueue *ev_tq;
+ struct task ev_task;
+ TAILQ_HEAD(, mpr_fw_event_work) ev_queue;
+};
+
+MALLOC_DECLARE(M_MPRSAS);
+
+/*
+ * Abstracted so that the driver can be backwards and forwards compatible
+ * with future versions of CAM that will provide this functionality.
+ */
+#define MPR_SET_LUN(lun, ccblun) \
+ mprsas_set_lun(lun, ccblun)
+
+static __inline int
+mprsas_set_lun(uint8_t *lun, u_int ccblun)
+{
+ uint64_t *newlun;
+
+ newlun = (uint64_t *)lun;
+ *newlun = 0;
+ if (ccblun <= 0xff) {
+ /* Peripheral device address method, LUN is 0 to 255 */
+ lun[1] = ccblun;
+ } else if (ccblun <= 0x3fff) {
+ /* Flat space address method, LUN is <= 16383 */
+ scsi_ulto2b(ccblun, lun);
+ lun[0] |= 0x40;
+ } else if (ccblun <= 0xffffff) {
+ /* Extended flat space address method, LUN is <= 16777215 */
+ scsi_ulto3b(ccblun, &lun[1]);
+ /* Extended Flat space address method */
+ lun[0] = 0xc0;
+ /* Length = 1, i.e. LUN is 3 bytes long */
+ lun[0] |= 0x10;
+ /* Extended Address Method */
+ lun[0] |= 0x02;
+ } else {
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+#define MPR_SET_SINGLE_LUN(req, lun) \
+do { \
+ bzero((req)->LUN, 8); \
+ (req)->LUN[1] = lun; \
+} while(0)
+
+void mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ);
+void mprsas_discovery_end(struct mprsas_softc *sassc);
+void mprsas_startup_increment(struct mprsas_softc *sassc);
+void mprsas_startup_decrement(struct mprsas_softc *sassc);
+void mprsas_release_simq_reinit(struct mprsas_softc *sassc);
+
+struct mpr_command * mprsas_alloc_tm(struct mpr_softc *sc);
+void mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm);
+void mprsas_firmware_event_work(void *arg, int pending);
+int mprsas_check_id(struct mprsas_softc *sassc, int id);
diff --git a/sys/dev/mpr/mpr_sas_lsi.c b/sys/dev/mpr/mpr_sas_lsi.c
new file mode 100644
index 0000000000000..32e9b3a078047
--- /dev/null
+++ b/sys/dev/mpr/mpr_sas_lsi.c
@@ -0,0 +1,1218 @@
+/*-
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Communications core for LSI MPT2 */
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/sbuf.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <machine/stdarg.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_xpt_periph.h>
+#include <cam/cam_periph.h>
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_sas.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_raid.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+#include <dev/mpr/mpr_table.h>
+#include <dev/mpr/mpr_sas.h>
+
+/* For Hashed SAS Address creation for SATA Drives */
+#define MPT2SAS_SN_LEN 20
+#define MPT2SAS_MN_LEN 40
+
+struct mpr_fw_event_work {
+ u16 event;
+ void *event_data;
+ TAILQ_ENTRY(mpr_fw_event_work) ev_link;
+};
+
+union _sata_sas_address {
+ u8 wwid[8];
+ struct {
+ u32 high;
+ u32 low;
+ } word;
+};
+
+/*
+ * define the IDENTIFY DEVICE structure
+ */
+struct _ata_identify_device_data {
+ u16 reserved1[10]; /* 0-9 */
+ u16 serial_number[10]; /* 10-19 */
+ u16 reserved2[7]; /* 20-26 */
+ u16 model_number[20]; /* 27-46*/
+ u16 reserved3[209]; /* 47-255*/
+};
+static u32 event_count;
+static void mprsas_fw_work(struct mpr_softc *sc,
+ struct mpr_fw_event_work *fw_event);
+static void mprsas_fw_event_free(struct mpr_softc *,
+ struct mpr_fw_event_work *);
+static int mprsas_add_device(struct mpr_softc *sc, u16 handle, u8 linkrate);
+static int mprsas_get_sata_identify(struct mpr_softc *sc, u16 handle,
+ Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz,
+ u32 devinfo);
+int mprsas_get_sas_address_for_sata_disk(struct mpr_softc *sc,
+ u64 *sas_address, u16 handle, u32 device_info);
+static int mprsas_volume_add(struct mpr_softc *sc,
+ u16 handle);
+static void mprsas_SSU_to_SATA_devices(struct mpr_softc *sc);
+static void mprsas_stop_unit_done(struct cam_periph *periph,
+ union ccb *done_ccb);
+
+void
+mprsas_evt_handler(struct mpr_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *event)
+{
+ struct mpr_fw_event_work *fw_event;
+ u16 sz;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+ mpr_print_evt_sas(sc, event);
+ mprsas_record_event(sc, event);
+
+ fw_event = malloc(sizeof(struct mpr_fw_event_work), M_MPR,
+ M_ZERO|M_NOWAIT);
+ if (!fw_event) {
+ printf("%s: allocate failed for fw_event\n", __func__);
+ return;
+ }
+ sz = le16toh(event->EventDataLength) * 4;
+ fw_event->event_data = malloc(sz, M_MPR, M_ZERO|M_NOWAIT);
+ if (!fw_event->event_data) {
+ printf("%s: allocate failed for event_data\n", __func__);
+ free(fw_event, M_MPR);
+ return;
+ }
+
+ bcopy(event->EventData, fw_event->event_data, sz);
+ fw_event->event = event->Event;
+ if ((event->Event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ event->Event == MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE ||
+ event->Event == MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST) &&
+ sc->track_mapping_events)
+ sc->pending_map_events++;
+
+ /*
+ * When wait_for_port_enable flag is set, make sure that all the events
+ * are processed. Increment the startup_refcount and decrement it after
+ * events are processed.
+ */
+ if ((event->Event == MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ event->Event == MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST) &&
+ sc->wait_for_port_enable)
+ mprsas_startup_increment(sc->sassc);
+
+ TAILQ_INSERT_TAIL(&sc->sassc->ev_queue, fw_event, ev_link);
+ taskqueue_enqueue(sc->sassc->ev_tq, &sc->sassc->ev_task);
+
+}
+
+static void
+mprsas_fw_event_free(struct mpr_softc *sc, struct mpr_fw_event_work *fw_event)
+{
+
+ free(fw_event->event_data, M_MPR);
+ free(fw_event, M_MPR);
+}
+
+/**
+ * _mpr_fw_work - delayed task for processing firmware events
+ * @sc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+mprsas_fw_work(struct mpr_softc *sc, struct mpr_fw_event_work *fw_event)
+{
+ struct mprsas_softc *sassc;
+ sassc = sc->sassc;
+
+ mpr_dprint(sc, MPR_EVENT, "(%d)->(%s) Working on Event: [%x]\n",
+ event_count++, __func__, fw_event->event);
+ switch (fw_event->event) {
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *data;
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy;
+ int i;
+
+ data = (MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *)
+ fw_event->event_data;
+
+ mpr_mapping_topology_change_event(sc, fw_event->event_data);
+
+ for (i = 0; i < data->NumEntries; i++) {
+ phy = &data->PHY[i];
+ switch (phy->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ if (mprsas_add_device(sc,
+ le16toh(phy->AttachedDevHandle),
+ phy->LinkRate)) {
+ printf("%s: failed to add device with "
+ "handle 0x%x\n", __func__,
+ le16toh(phy->AttachedDevHandle));
+ mprsas_prepare_remove(sassc, le16toh(
+ phy->AttachedDevHandle));
+ }
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ mprsas_prepare_remove(sassc, le16toh(
+ phy->AttachedDevHandle));
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ default:
+ break;
+ }
+ }
+ /*
+ * refcount was incremented for this event in
+ * mprsas_evt_handler. Decrement it here because the event has
+ * been processed.
+ */
+ mprsas_startup_decrement(sassc);
+ break;
+ }
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ MPI2_EVENT_DATA_SAS_DISCOVERY *data;
+
+ data = (MPI2_EVENT_DATA_SAS_DISCOVERY *)fw_event->event_data;
+
+ if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_STARTED)
+ mpr_dprint(sc, MPR_TRACE,"SAS discovery start "
+ "event\n");
+ if (data->ReasonCode & MPI2_EVENT_SAS_DISC_RC_COMPLETED) {
+ mpr_dprint(sc, MPR_TRACE,"SAS discovery stop event\n");
+ sassc->flags &= ~MPRSAS_IN_DISCOVERY;
+ mprsas_discovery_end(sassc);
+ }
+ break;
+ }
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ Mpi2EventDataSasEnclDevStatusChange_t *data;
+ data = (Mpi2EventDataSasEnclDevStatusChange_t *)
+ fw_event->event_data;
+ mpr_mapping_enclosure_dev_status_change_event(sc,
+ fw_event->event_data);
+ break;
+ }
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ {
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config, reason;
+ u16 elementType;
+ Mpi2EventDataIrConfigChangeList_t *event_data;
+ struct mprsas_target *targ;
+ unsigned int id;
+
+ event_data = fw_event->event_data;
+ foreign_config = (le32toh(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element =
+ (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ id = mpr_mapping_get_raid_id_from_handle(sc,
+ element->VolDevHandle);
+
+ mpr_mapping_ir_config_change_event(sc, event_data);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ reason = element->ReasonCode;
+ elementType = le16toh(element->ElementFlags) &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
+ /*
+ * check for element type of Phys Disk or Hot Spare
+ */
+ if ((elementType !=
+ MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT)
+ && (elementType !=
+ MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT))
+ // do next element
+ goto skip_fp_send;
+
+ /*
+ * check for reason of Hide, Unhide, PD Created, or PD
+ * Deleted
+ */
+ if ((reason != MPI2_EVENT_IR_CHANGE_RC_HIDE) &&
+ (reason != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) &&
+ (reason != MPI2_EVENT_IR_CHANGE_RC_PD_CREATED) &&
+ (reason != MPI2_EVENT_IR_CHANGE_RC_PD_DELETED))
+ goto skip_fp_send;
+
+ // check for a reason of Hide or PD Created
+ if ((reason == MPI2_EVENT_IR_CHANGE_RC_HIDE) ||
+ (reason == MPI2_EVENT_IR_CHANGE_RC_PD_CREATED))
+ {
+ // build RAID Action message
+ Mpi2RaidActionRequest_t *action;
+ Mpi2RaidActionReply_t *reply;
+ struct mpr_command *cm;
+ int error = 0;
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed\n",
+ __func__);
+ return;
+ }
+
+ mpr_dprint(sc, MPR_INFO, "Sending FP action "
+ "from "
+ "MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST "
+ ":\n");
+ action = (MPI2_RAID_ACTION_REQUEST *)cm->cm_req;
+ action->Function = MPI2_FUNCTION_RAID_ACTION;
+ action->Action =
+ MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
+ action->PhysDiskNum = element->PhysDiskNum;
+ cm->cm_desc.Default.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ error = mpr_request_polled(sc, cm);
+ reply = (Mpi2RaidActionReply_t *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the poll returns error then we
+ * need to do diag reset
+ */
+ printf("%s: poll for page completed "
+ "with error %d", __func__, error);
+ }
+ if (reply && (le16toh(reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK) !=
+ MPI2_IOCSTATUS_SUCCESS) {
+ mpr_dprint(sc, MPR_INFO, "%s: error "
+ "sending RaidActionPage; iocstatus "
+ "= 0x%x\n", __func__,
+ le16toh(reply->IOCStatus));
+ }
+
+ if (cm)
+ mpr_free_command(sc, cm);
+ }
+skip_fp_send:
+ mpr_dprint(sc, MPR_INFO, "Received "
+ "MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST Reason "
+ "code %x:\n", element->ReasonCode);
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config) {
+ if (mprsas_volume_add(sc,
+ le16toh(element->VolDevHandle))) {
+ printf("%s: failed to add RAID "
+ "volume with handle 0x%x\n",
+ __func__, le16toh(element->
+ VolDevHandle));
+ }
+ }
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ /*
+ * Rescan after volume is deleted or removed.
+ */
+ if (!foreign_config) {
+ if (id == MPR_MAP_BAD_ID) {
+ printf("%s: could not get ID "
+ "for volume with handle "
+ "0x%04x\n", __func__,
+ le16toh(element->
+ VolDevHandle));
+ break;
+ }
+
+ targ = &sassc->targets[id];
+ targ->handle = 0x0;
+ targ->encl_slot = 0x0;
+ targ->encl_handle = 0x0;
+ targ->encl_level_valid = 0x0;
+ targ->encl_level = 0x0;
+ targ->connector_name[0] = ' ';
+ targ->connector_name[1] = ' ';
+ targ->connector_name[2] = ' ';
+ targ->connector_name[3] = ' ';
+ targ->exp_dev_handle = 0x0;
+ targ->phy_num = 0x0;
+ targ->linkrate = 0x0;
+ mprsas_rescan_target(sc, targ);
+ printf("RAID target id 0x%x removed\n",
+ targ->tid);
+ }
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ /*
+ * Phys Disk of a volume has been created. Hide
+ * it from the OS.
+ */
+ targ = mprsas_find_target_by_handle(sassc, 0,
+ element->PhysDiskDevHandle);
+ if (targ == NULL)
+ break;
+ targ->flags |= MPR_TARGET_FLAGS_RAID_COMPONENT;
+ mprsas_rescan_target(sc, targ);
+
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ /*
+ * Phys Disk of a volume has been deleted.
+ * Expose it to the OS.
+ */
+ if (mprsas_add_device(sc,
+ le16toh(element->PhysDiskDevHandle), 0)) {
+ printf("%s: failed to add device with "
+ "handle 0x%x\n", __func__,
+ le16toh(element->
+ PhysDiskDevHandle));
+ mprsas_prepare_remove(sassc,
+ le16toh(element->
+ PhysDiskDevHandle));
+ }
+ break;
+ }
+ }
+ /*
+ * refcount was incremented for this event in
+ * mprsas_evt_handler. Decrement it here because the event has
+ * been processed.
+ */
+ mprsas_startup_decrement(sassc);
+ break;
+ }
+ case MPI2_EVENT_IR_VOLUME:
+ {
+ Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+
+ /*
+ * Informational only.
+ */
+ mpr_dprint(sc, MPR_EVENT, "Received IR Volume event:\n");
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
+ mpr_dprint(sc, MPR_EVENT, " Volume Settings "
+ "changed from 0x%x to 0x%x for Volome with "
+ "handle 0x%x", le32toh(event_data->PreviousValue),
+ le32toh(event_data->NewValue),
+ le16toh(event_data->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
+ mpr_dprint(sc, MPR_EVENT, " Volume Status "
+ "changed from 0x%x to 0x%x for Volome with "
+ "handle 0x%x", le32toh(event_data->PreviousValue),
+ le32toh(event_data->NewValue),
+ le16toh(event_data->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
+ mpr_dprint(sc, MPR_EVENT, " Volume State "
+ "changed from 0x%x to 0x%x for Volome with "
+ "handle 0x%x", le32toh(event_data->PreviousValue),
+ le32toh(event_data->NewValue),
+ le16toh(event_data->VolDevHandle));
+ u32 state;
+ struct mprsas_target *targ;
+ state = le32toh(event_data->NewValue);
+ switch (state) {
+ case MPI2_RAID_VOL_STATE_MISSING:
+ case MPI2_RAID_VOL_STATE_FAILED:
+ mprsas_prepare_volume_remove(sassc,
+ event_data->VolDevHandle);
+ break;
+
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ targ =
+ mprsas_find_target_by_handle(sassc,
+ 0, event_data->VolDevHandle);
+ if (targ) {
+ printf("%s %d: Volume handle "
+ "0x%x is already added \n",
+ __func__, __LINE__,
+ event_data->VolDevHandle);
+ break;
+ }
+ if (mprsas_volume_add(sc,
+ le16toh(event_data->
+ VolDevHandle))) {
+ printf("%s: failed to add RAID "
+ "volume with handle 0x%x\n",
+ __func__, le16toh(
+ event_data->VolDevHandle));
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ {
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ fw_event->event_data;
+ struct mprsas_target *targ;
+
+ /*
+ * Informational only.
+ */
+ mpr_dprint(sc, MPR_EVENT, "Received IR Phys Disk event:\n");
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
+ mpr_dprint(sc, MPR_EVENT, " Phys Disk Settings "
+ "changed from 0x%x to 0x%x for Phys Disk Number "
+ "%d and handle 0x%x at Enclosure handle 0x%x, Slot "
+ "%d", le32toh(event_data->PreviousValue),
+ le32toh(event_data->NewValue),
+ event_data->PhysDiskNum,
+ le16toh(event_data->PhysDiskDevHandle),
+ le16toh(event_data->EnclosureHandle),
+ le16toh(event_data->Slot));
+ break;
+ case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
+ mpr_dprint(sc, MPR_EVENT, " Phys Disk Status changed "
+ "from 0x%x to 0x%x for Phys Disk Number %d and "
+ "handle 0x%x at Enclosure handle 0x%x, Slot %d",
+ le32toh(event_data->PreviousValue),
+ le32toh(event_data->NewValue),
+ event_data->PhysDiskNum,
+ le16toh(event_data->PhysDiskDevHandle),
+ le16toh(event_data->EnclosureHandle),
+ le16toh(event_data->Slot));
+ break;
+ case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
+ mpr_dprint(sc, MPR_EVENT, " Phys Disk State changed "
+ "from 0x%x to 0x%x for Phys Disk Number %d and "
+ "handle 0x%x at Enclosure handle 0x%x, Slot %d",
+ le32toh(event_data->PreviousValue),
+ le32toh(event_data->NewValue),
+ event_data->PhysDiskNum,
+ le16toh(event_data->PhysDiskDevHandle),
+ le16toh(event_data->EnclosureHandle),
+ le16toh(event_data->Slot));
+ switch (event_data->NewValue) {
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+ targ = mprsas_find_target_by_handle(
+ sassc, 0,
+ event_data->PhysDiskDevHandle);
+ if (targ) {
+ targ->flags |=
+ MPR_TARGET_FLAGS_RAID_COMPONENT;
+ printf("%s %d: Found Target "
+ "for handle 0x%x.\n",
+ __func__, __LINE__ ,
+ event_data->
+ PhysDiskDevHandle);
+ }
+ break;
+ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ default:
+ targ = mprsas_find_target_by_handle(
+ sassc, 0,
+ event_data->PhysDiskDevHandle);
+ if (targ) {
+ targ->flags |=
+ ~MPR_TARGET_FLAGS_RAID_COMPONENT;
+ printf("%s %d: Found Target "
+ "for handle 0x%x. \n",
+ __func__, __LINE__ ,
+ event_data->
+ PhysDiskDevHandle);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ {
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ fw_event->event_data;
+
+ /*
+ * Informational only.
+ */
+ mpr_dprint(sc, MPR_EVENT, "Received IR Op Status event:\n");
+ mpr_dprint(sc, MPR_EVENT, " RAID Operation of %d is %d "
+ "percent complete for Volume with handle 0x%x",
+ event_data->RAIDOperation, event_data->PercentComplete,
+ le16toh(event_data->VolDevHandle));
+ break;
+ }
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ {
+ pMpi2EventDataTemperature_t temp_event;
+
+ temp_event = (pMpi2EventDataTemperature_t)fw_event->event_data;
+
+ /*
+ * The Temp Sensor Count must be greater than the event's Sensor
+ * Num to be valid. If valid, print the temp thresholds that
+ * have been exceeded.
+ */
+ if (sc->iounit_pg8.NumSensors > temp_event->SensorNum) {
+ mpr_dprint(sc, MPR_FAULT, "Temperature Threshold flags "
+ "%s %s %s %s exceeded for Sensor: %d !!!\n",
+ ((temp_event->Status & 0x01) == 1) ? "0 " : " ",
+ ((temp_event->Status & 0x02) == 2) ? "1 " : " ",
+ ((temp_event->Status & 0x04) == 4) ? "2 " : " ",
+ ((temp_event->Status & 0x08) == 8) ? "3 " : " ",
+ temp_event->SensorNum);
+ mpr_dprint(sc, MPR_FAULT, "Current Temp in Celsius: "
+ "%d\n", temp_event->CurrentTemperature);
+ }
+ break;
+ }
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ default:
+ mpr_dprint(sc, MPR_TRACE,"Unhandled event 0x%0X\n",
+ fw_event->event);
+ break;
+
+ }
+ mpr_dprint(sc, MPR_EVENT, "(%d)->(%s) Event Free: [%x]\n", event_count,
+ __func__, fw_event->event);
+ mprsas_fw_event_free(sc, fw_event);
+}
+
+void
+mprsas_firmware_event_work(void *arg, int pending)
+{
+ struct mpr_fw_event_work *fw_event;
+ struct mpr_softc *sc;
+
+ sc = (struct mpr_softc *)arg;
+ mpr_lock(sc);
+ while ((fw_event = TAILQ_FIRST(&sc->sassc->ev_queue)) != NULL) {
+ TAILQ_REMOVE(&sc->sassc->ev_queue, fw_event, ev_link);
+ mprsas_fw_work(sc, fw_event);
+ }
+ mpr_unlock(sc);
+}
+
+static int
+mprsas_add_device(struct mpr_softc *sc, u16 handle, u8 linkrate){
+ char devstring[80];
+ struct mprsas_softc *sassc;
+ struct mprsas_target *targ;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t config_page;
+ uint64_t sas_address, sata_sas_address;
+ uint64_t parent_sas_address = 0;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ u32 device_info, parent_devinfo = 0;
+ unsigned int id;
+ int ret;
+ int error = 0;
+ struct mprsas_lun *lun;
+
+ sassc = sc->sassc;
+ mprsas_startup_increment(sassc);
+ if ((mpr_config_get_sas_device_pg0(sc, &mpi_reply, &config_page,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printf("%s: error reading SAS device page0\n", __func__);
+ error = ENXIO;
+ goto out;
+ }
+
+ device_info = le32toh(config_page.DeviceInfo);
+
+ if (((device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0)
+ && (le16toh(config_page.ParentDevHandle) != 0)) {
+ Mpi2ConfigReply_t tmp_mpi_reply;
+ Mpi2SasDevicePage0_t parent_config_page;
+
+ if ((mpr_config_get_sas_device_pg0(sc, &tmp_mpi_reply,
+ &parent_config_page, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16toh(config_page.ParentDevHandle)))) {
+ printf("%s: error reading SAS device %#x page0\n",
+ __func__, le16toh(config_page.ParentDevHandle));
+ } else {
+ parent_sas_address = parent_config_page.SASAddress.High;
+ parent_sas_address = (parent_sas_address << 32) |
+ parent_config_page.SASAddress.Low;
+ parent_devinfo = le32toh(parent_config_page.DeviceInfo);
+ }
+ }
+ /* TODO Check proper endianess */
+ sas_address = config_page.SASAddress.High;
+ sas_address = (sas_address << 32) |
+ config_page.SASAddress.Low;
+
+ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE)
+ == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) {
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) {
+ ret = mprsas_get_sas_address_for_sata_disk(sc,
+ &sata_sas_address, handle, device_info);
+ if (!ret)
+ id = mpr_mapping_get_sas_id(sc,
+ sata_sas_address, handle);
+ else
+ id = mpr_mapping_get_sas_id(sc,
+ sas_address, handle);
+ } else
+ id = mpr_mapping_get_sas_id(sc, sas_address,
+ handle);
+ } else
+ id = mpr_mapping_get_sas_id(sc, sas_address, handle);
+
+ if (id == MPR_MAP_BAD_ID) {
+ printf("failure at %s:%d/%s()! Could not get ID for device "
+ "with handle 0x%04x\n", __FILE__, __LINE__, __func__,
+ handle);
+ error = ENXIO;
+ goto out;
+ }
+
+ if (mprsas_check_id(sassc, id) != 0) {
+ device_printf(sc->mpr_dev, "Excluding target id %d\n", id);
+ error = ENXIO;
+ goto out;
+ }
+
+ mpr_dprint(sc, MPR_MAPPING, "SAS Address from SAS device page0 = %jx\n",
+ sas_address);
+ targ = &sassc->targets[id];
+ targ->devinfo = device_info;
+ targ->devname = le32toh(config_page.DeviceName.High);
+ targ->devname = (targ->devname << 32) |
+ le32toh(config_page.DeviceName.Low);
+ targ->encl_handle = le16toh(config_page.EnclosureHandle);
+ targ->encl_slot = le16toh(config_page.Slot);
+ targ->encl_level = config_page.EnclosureLevel;
+ targ->connector_name[0] = config_page.ConnectorName[0];
+ targ->connector_name[1] = config_page.ConnectorName[1];
+ targ->connector_name[2] = config_page.ConnectorName[2];
+ targ->connector_name[3] = config_page.ConnectorName[3];
+ targ->handle = handle;
+ targ->parent_handle = le16toh(config_page.ParentDevHandle);
+ targ->sasaddr = mpr_to_u64(&config_page.SASAddress);
+ targ->parent_sasaddr = le64toh(parent_sas_address);
+ targ->parent_devinfo = parent_devinfo;
+ targ->tid = id;
+ targ->linkrate = (linkrate>>4);
+ targ->flags = 0;
+ if (le16toh(config_page.Flags) &
+ MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) {
+ targ->scsi_req_desc_type =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ }
+ if (le16toh(config_page.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ targ->encl_level_valid = TRUE;
+ }
+ TAILQ_INIT(&targ->commands);
+ TAILQ_INIT(&targ->timedout_commands);
+ while (!SLIST_EMPTY(&targ->luns)) {
+ lun = SLIST_FIRST(&targ->luns);
+ SLIST_REMOVE_HEAD(&targ->luns, lun_link);
+ free(lun, M_MPR);
+ }
+ SLIST_INIT(&targ->luns);
+
+ mpr_describe_devinfo(targ->devinfo, devstring, 80);
+ mpr_dprint(sc, (MPR_XINFO|MPR_MAPPING), "Found device <%s> <%s> "
+ "handle<0x%04x> enclosureHandle<0x%04x> slot %d\n", devstring,
+ mpr_describe_table(mpr_linkrate_names, targ->linkrate),
+ targ->handle, targ->encl_handle, targ->encl_slot);
+ if (targ->encl_level_valid) {
+ mpr_dprint(sc, (MPR_XINFO|MPR_MAPPING), "At enclosure level %d "
+ "and connector name (%4s)\n", targ->encl_level,
+ targ->connector_name);
+ }
+#if ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000039)) || \
+ (__FreeBSD_version < 902502)
+ if ((sassc->flags & MPRSAS_IN_STARTUP) == 0)
+#endif
+ mprsas_rescan_target(sc, targ);
+ mpr_dprint(sc, MPR_MAPPING, "Target id 0x%x added\n", targ->tid);
+out:
+ mprsas_startup_decrement(sassc);
+ return (error);
+
+}
+
+int
+mprsas_get_sas_address_for_sata_disk(struct mpr_softc *sc,
+ u64 *sas_address, u16 handle, u32 device_info)
+{
+ Mpi2SataPassthroughReply_t mpi_reply;
+ int i, rc, try_count;
+ u32 *bufferptr;
+ union _sata_sas_address hash_address;
+ struct _ata_identify_device_data ata_identify;
+ u8 buffer[MPT2SAS_MN_LEN + MPT2SAS_SN_LEN];
+ u32 ioc_status;
+ u8 sas_status;
+
+ memset(&ata_identify, 0, sizeof(ata_identify));
+ try_count = 0;
+ do {
+ rc = mprsas_get_sata_identify(sc, handle, &mpi_reply,
+ (char *)&ata_identify, sizeof(ata_identify), device_info);
+ try_count++;
+ ioc_status = le16toh(mpi_reply.IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ sas_status = mpi_reply.SASStatus;
+ } while ((rc == -EAGAIN || ioc_status || sas_status) &&
+ (try_count < 5));
+
+ if (rc == 0 && !ioc_status && !sas_status) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: got SATA identify "
+ "successfully for handle = 0x%x with try_count = %d\n",
+ __func__, handle, try_count);
+ } else {
+ mpr_dprint(sc, MPR_MAPPING, "%s: handle = 0x%x failed\n",
+ __func__, handle);
+ return -1;
+ }
+ /* Copy & byteswap the 40 byte model number to a buffer */
+ for (i = 0; i < MPT2SAS_MN_LEN; i += 2) {
+ buffer[i] = ((u8 *)ata_identify.model_number)[i + 1];
+ buffer[i + 1] = ((u8 *)ata_identify.model_number)[i];
+ }
+ /* Copy & byteswap the 20 byte serial number to a buffer */
+ for (i = 0; i < MPT2SAS_SN_LEN; i += 2) {
+ buffer[MPT2SAS_MN_LEN + i] =
+ ((u8 *)ata_identify.serial_number)[i + 1];
+ buffer[MPT2SAS_MN_LEN + i + 1] =
+ ((u8 *)ata_identify.serial_number)[i];
+ }
+ bufferptr = (u32 *)buffer;
+ /* There are 60 bytes to hash down to 8. 60 isn't divisible by 8,
+ * so loop through the first 56 bytes (7*8),
+ * and then add in the last dword.
+ */
+ hash_address.word.low = 0;
+ hash_address.word.high = 0;
+ for (i = 0; (i < ((MPT2SAS_MN_LEN+MPT2SAS_SN_LEN)/8)); i++) {
+ hash_address.word.low += *bufferptr;
+ bufferptr++;
+ hash_address.word.high += *bufferptr;
+ bufferptr++;
+ }
+ /* Add the last dword */
+ hash_address.word.low += *bufferptr;
+ /* Make sure the hash doesn't start with 5, because it could clash
+ * with a SAS address. Change 5 to a D.
+ */
+ if ((hash_address.word.high & 0x000000F0) == (0x00000050))
+ hash_address.word.high |= 0x00000080;
+ *sas_address = (u64)hash_address.wwid[0] << 56 |
+ (u64)hash_address.wwid[1] << 48 | (u64)hash_address.wwid[2] << 40 |
+ (u64)hash_address.wwid[3] << 32 | (u64)hash_address.wwid[4] << 24 |
+ (u64)hash_address.wwid[5] << 16 | (u64)hash_address.wwid[6] << 8 |
+ (u64)hash_address.wwid[7];
+ return 0;
+}
+
+static int
+mprsas_get_sata_identify(struct mpr_softc *sc, u16 handle,
+ Mpi2SataPassthroughReply_t *mpi_reply, char *id_buffer, int sz, u32 devinfo)
+{
+ Mpi2SataPassthroughRequest_t *mpi_request;
+ Mpi2SataPassthroughReply_t *reply;
+ struct mpr_command *cm;
+ char *buffer;
+ int error = 0;
+
+ buffer = malloc( sz, M_MPR, M_NOWAIT | M_ZERO);
+ if (!buffer)
+ return ENOMEM;
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ free(buffer, M_MPR);
+ return (EBUSY);
+ }
+ mpi_request = (MPI2_SATA_PASSTHROUGH_REQUEST *)cm->cm_req;
+ bzero(mpi_request,sizeof(MPI2_SATA_PASSTHROUGH_REQUEST));
+ mpi_request->Function = MPI2_FUNCTION_SATA_PASSTHROUGH;
+ mpi_request->VF_ID = 0;
+ mpi_request->DevHandle = htole16(handle);
+ mpi_request->PassthroughFlags = (MPI2_SATA_PT_REQ_PT_FLAGS_PIO |
+ MPI2_SATA_PT_REQ_PT_FLAGS_READ);
+ mpi_request->DataLength = htole32(sz);
+ mpi_request->CommandFIS[0] = 0x27;
+ mpi_request->CommandFIS[1] = 0x80;
+ mpi_request->CommandFIS[2] = (devinfo &
+ MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? 0xA1 : 0xEC;
+ cm->cm_sge = &mpi_request->SGL;
+ cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
+ cm->cm_flags = MPR_CM_FLAGS_DATAIN;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_data = buffer;
+ cm->cm_length = htole32(sz);
+ error = mpr_wait_command(sc, cm, 60, CAN_SLEEP);
+ reply = (Mpi2SataPassthroughReply_t *)cm->cm_reply;
+ if (error || (reply == NULL)) {
+ /* FIXME */
+ /*
+ * If the request returns an error then we need to do a diag
+ * reset
+ */
+ printf("%s: request for page completed with error %d",
+ __func__, error);
+ error = ENXIO;
+ goto out;
+ }
+ bcopy(buffer, id_buffer, sz);
+ bcopy(reply, mpi_reply, sizeof(Mpi2SataPassthroughReply_t));
+ if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
+ MPI2_IOCSTATUS_SUCCESS) {
+ printf("%s: error reading SATA PASSTHRU; iocstatus = 0x%x\n",
+ __func__, reply->IOCStatus);
+ error = ENXIO;
+ goto out;
+ }
+out:
+ mpr_free_command(sc, cm);
+ free(buffer, M_MPR);
+ return (error);
+}
+
+static int
+mprsas_volume_add(struct mpr_softc *sc, u16 handle)
+{
+ struct mprsas_softc *sassc;
+ struct mprsas_target *targ;
+ u64 wwid;
+ unsigned int id;
+ int error = 0;
+ struct mprsas_lun *lun;
+
+ sassc = sc->sassc;
+ mprsas_startup_increment(sassc);
+ /* wwid is endian safe */
+ mpr_config_get_volume_wwid(sc, handle, &wwid);
+ if (!wwid) {
+ printf("%s: invalid WWID; cannot add volume to mapping table\n",
+ __func__);
+ error = ENXIO;
+ goto out;
+ }
+
+ id = mpr_mapping_get_raid_id(sc, wwid, handle);
+ if (id == MPR_MAP_BAD_ID) {
+ printf("%s: could not get ID for volume with handle 0x%04x and "
+ "WWID 0x%016llx\n", __func__, handle,
+ (unsigned long long)wwid);
+ error = ENXIO;
+ goto out;
+ }
+
+ targ = &sassc->targets[id];
+ targ->tid = id;
+ targ->handle = handle;
+ targ->devname = wwid;
+ TAILQ_INIT(&targ->commands);
+ TAILQ_INIT(&targ->timedout_commands);
+ while (!SLIST_EMPTY(&targ->luns)) {
+ lun = SLIST_FIRST(&targ->luns);
+ SLIST_REMOVE_HEAD(&targ->luns, lun_link);
+ free(lun, M_MPR);
+ }
+ SLIST_INIT(&targ->luns);
+#if ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000039)) || \
+ (__FreeBSD_version < 902502)
+ if ((sassc->flags & MPRSAS_IN_STARTUP) == 0)
+#endif
+ mprsas_rescan_target(sc, targ);
+ mpr_dprint(sc, MPR_MAPPING, "RAID target id %d added (WWID = 0x%jx)\n",
+ targ->tid, wwid);
+out:
+ mprsas_startup_decrement(sassc);
+ return (error);
+}
+
+/**
+ * mprsas_SSU_to_SATA_devices
+ * @sc: per adapter object
+ *
+ * Looks through the target list and issues a StartStopUnit SCSI command to each
+ * SATA direct-access device. This helps to ensure that data corruption is
+ * avoided when the system is being shut down. This must be called after the IR
+ * System Shutdown RAID Action is sent if in IR mode.
+ *
+ * Return nothing.
+ */
+static void
+mprsas_SSU_to_SATA_devices(struct mpr_softc *sc)
+{
+ struct mprsas_softc *sassc = sc->sassc;
+ union ccb *ccb;
+ path_id_t pathid = cam_sim_path(sassc->sim);
+ target_id_t targetid;
+ struct mprsas_target *target;
+ struct mprsas_lun *lun;
+ char path_str[64];
+ struct timeval cur_time, start_time;
+
+ mpr_lock(sc);
+
+ /*
+ * For each LUN of each target, issue a StartStopUnit command to stop
+ * the device.
+ */
+ sc->SSU_started = TRUE;
+ sc->SSU_refcount = 0;
+ for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
+ target = &sassc->targets[targetid];
+ if (target->handle == 0x0) {
+ continue;
+ }
+
+ SLIST_FOREACH(lun, &target->luns, lun_link) {
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ mpr_unlock(sc);
+ mpr_dprint(sc, MPR_FAULT, "Unable to alloc "
+ "CCB to stop unit.\n");
+ return;
+ }
+
+ /*
+ * The stop_at_shutdown flag will be set if this LUN is
+ * a SATA direct-access end device.
+ */
+ if (lun->stop_at_shutdown) {
+ if (xpt_create_path(&ccb->ccb_h.path,
+ xpt_periph, pathid, targetid,
+ lun->lun_id) != CAM_REQ_CMP) {
+ mpr_dprint(sc, MPR_FAULT, "Unable to "
+ "create LUN path to stop unit.\n");
+ xpt_free_ccb(ccb);
+ mpr_unlock(sc);
+ return;
+ }
+ xpt_path_string(ccb->ccb_h.path, path_str,
+ sizeof(path_str));
+
+ mpr_dprint(sc, MPR_INFO, "Sending StopUnit: "
+ "path %s handle %d\n", path_str,
+ target->handle);
+
+ /*
+ * Issue a START STOP UNIT command for the LUN.
+ * Increment the SSU counter to be used to
+ * count the number of required replies.
+ */
+ mpr_dprint(sc, MPR_INFO, "Incrementing SSU "
+ "count\n");
+ sc->SSU_refcount++;
+ ccb->ccb_h.target_id =
+ xpt_path_target_id(ccb->ccb_h.path);
+ ccb->ccb_h.target_lun = lun->lun_id;
+ ccb->ccb_h.ppriv_ptr1 = sassc;
+ scsi_start_stop(&ccb->csio,
+ /*retries*/0,
+ mprsas_stop_unit_done,
+ MSG_SIMPLE_Q_TAG,
+ /*start*/FALSE,
+ /*load/eject*/0,
+ /*immediate*/FALSE,
+ MPR_SENSE_LEN,
+ /*timeout*/10000);
+ xpt_action(ccb);
+ }
+ }
+ }
+
+ mpr_unlock(sc);
+
+ /*
+ * Wait until all of the SSU commands have completed or time has
+ * expired (60 seconds). pause for 100ms each time through. If any
+ * command times out, the target will be reset in the SCSI command
+ * timeout routine.
+ */
+ getmicrotime(&start_time);
+ while (sc->SSU_refcount) {
+ pause("mprwait", hz/10);
+
+ getmicrotime(&cur_time);
+ if ((cur_time.tv_sec - start_time.tv_sec) > 60) {
+ mpr_dprint(sc, MPR_FAULT, "Time has expired waiting "
+ "for SSU commands to complete.\n");
+ break;
+ }
+ }
+}
+
+static void
+mprsas_stop_unit_done(struct cam_periph *periph, union ccb *done_ccb)
+{
+ struct mprsas_softc *sassc;
+ char path_str[64];
+
+ sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
+
+ xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
+ mpr_dprint(sassc->sc, MPR_INFO, "Completing stop unit for %s\n",
+ path_str);
+
+ if (done_ccb == NULL)
+ return;
+
+ /*
+ * Nothing more to do except free the CCB and path. If the command
+ * timed out, an abort reset, then target reset will be issued during
+ * the SCSI Command process.
+ */
+ xpt_free_path(done_ccb->ccb_h.path);
+ xpt_free_ccb(done_ccb);
+}
+
+/**
+ * mprsas_ir_shutdown - IR shutdown notification
+ * @sc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+void
+mprsas_ir_shutdown(struct mpr_softc *sc)
+{
+ u16 volume_mapping_flags;
+ u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags);
+ struct dev_mapping_table *mt_entry;
+ u32 start_idx, end_idx;
+ unsigned int id, found_volume = 0;
+ struct mpr_command *cm;
+ Mpi2RaidActionRequest_t *action;
+
+ mpr_dprint(sc, MPR_TRACE, "%s\n", __func__);
+
+ /* is IR firmware build loaded? */
+ if (!sc->ir_firmware)
+ goto out;
+
+ /* are there any volumes? Look at IR target IDs. */
+ // TODO-later, this should be looked up in the RAID config structure
+ // when it is implemented.
+ volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ start_idx = 0;
+ if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0)
+ start_idx = 1;
+ } else
+ start_idx = sc->max_devices - sc->max_volumes;
+ end_idx = start_idx + sc->max_volumes - 1;
+
+ for (id = start_idx; id < end_idx; id++) {
+ mt_entry = &sc->mapping_table[id];
+ if ((mt_entry->physical_id != 0) &&
+ (mt_entry->missing_count == 0)) {
+ found_volume = 1;
+ break;
+ }
+ }
+
+ if (!found_volume)
+ goto out;
+
+ if ((cm = mpr_alloc_command(sc)) == NULL) {
+ printf("%s: command alloc failed\n", __func__);
+ goto out;
+ }
+
+ action = (MPI2_RAID_ACTION_REQUEST *)cm->cm_req;
+ action->Function = MPI2_FUNCTION_RAID_ACTION;
+ action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ mpr_lock(sc);
+ mpr_wait_command(sc, cm, 5, CAN_SLEEP);
+ mpr_unlock(sc);
+
+ /*
+ * Don't check for reply, just leave.
+ */
+ if (cm)
+ mpr_free_command(sc, cm);
+
+out:
+ mprsas_SSU_to_SATA_devices(sc);
+}
diff --git a/sys/dev/mpr/mpr_table.c b/sys/dev/mpr/mpr_table.c
new file mode 100644
index 0000000000000..b1e12b38415e2
--- /dev/null
+++ b/sys/dev/mpr/mpr_table.c
@@ -0,0 +1,516 @@
+/*-
+ * Copyright (c) 2009 Yahoo! Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/* Debugging tables for MPT2 */
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <cam/scsi/scsi_all.h>
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+#include <dev/mpr/mpr_table.h>
+
+char *
+mpr_describe_table(struct mpr_table_lookup *table, u_int code)
+{
+ int i;
+
+ for (i = 0; table[i].string != NULL; i++) {
+ if (table[i].code == code)
+ return(table[i].string);
+ }
+ return(table[i+1].string);
+}
+
+struct mpr_table_lookup mpr_event_names[] = {
+ {"LogData", 0x01},
+ {"StateChange", 0x02},
+ {"HardResetReceived", 0x05},
+ {"EventChange", 0x0a},
+ {"TaskSetFull", 0x0e},
+ {"SasDeviceStatusChange", 0x0f},
+ {"IrOperationStatus", 0x14},
+ {"SasDiscovery", 0x16},
+ {"SasBroadcastPrimitive", 0x17},
+ {"SasInitDeviceStatusChange", 0x18},
+ {"SasInitTableOverflow", 0x19},
+ {"SasTopologyChangeList", 0x1c},
+ {"SasEnclDeviceStatusChange", 0x1d},
+ {"IrVolume", 0x1e},
+ {"IrPhysicalDisk", 0x1f},
+ {"IrConfigurationChangeList", 0x20},
+ {"LogEntryAdded", 0x21},
+ {"SasPhyCounter", 0x22},
+ {"GpioInterrupt", 0x23},
+ {"HbdPhyEvent", 0x24},
+ {NULL, 0},
+ {"Unknown Event", 0}
+};
+
+struct mpr_table_lookup mpr_phystatus_names[] = {
+ {"NewTargetAdded", 0x01},
+ {"TargetGone", 0x02},
+ {"PHYLinkStatusChange", 0x03},
+ {"PHYLinkStatusUnchanged", 0x04},
+ {"TargetMissing", 0x05},
+ {NULL, 0},
+ {"Unknown Status", 0}
+};
+
+struct mpr_table_lookup mpr_linkrate_names[] = {
+ {"PHY disabled", 0x01},
+ {"Speed Negotiation Failed", 0x02},
+ {"SATA OOB Complete", 0x03},
+ {"SATA Port Selector", 0x04},
+ {"SMP Reset in Progress", 0x05},
+ {"1.5Gbps", 0x08},
+ {"3.0Gbps", 0x09},
+ {"6.0Gbps", 0x0a},
+ {NULL, 0},
+ {"LinkRate Unknown", 0x00}
+};
+
+struct mpr_table_lookup mpr_sasdev0_devtype[] = {
+ {"End Device", 0x01},
+ {"Edge Expander", 0x02},
+ {"Fanout Expander", 0x03},
+ {NULL, 0},
+ {"No Device", 0x00}
+};
+
+struct mpr_table_lookup mpr_phyinfo_reason_names[] = {
+ {"Power On", 0x01},
+ {"Hard Reset", 0x02},
+ {"SMP Phy Control Link Reset", 0x03},
+ {"Loss DWORD Sync", 0x04},
+ {"Multiplex Sequence", 0x05},
+ {"I-T Nexus Loss Timer", 0x06},
+ {"Break Timeout Timer", 0x07},
+ {"PHY Test Function", 0x08},
+ {NULL, 0},
+ {"Unknown Reason", 0x00}
+};
+
+struct mpr_table_lookup mpr_whoinit_names[] = {
+ {"System BIOS", 0x01},
+ {"ROM BIOS", 0x02},
+ {"PCI Peer", 0x03},
+ {"Host Driver", 0x04},
+ {"Manufacturing", 0x05},
+ {NULL, 0},
+ {"Not Initialized", 0x00}
+};
+
+struct mpr_table_lookup mpr_sasdisc_reason[] = {
+ {"Discovery Started", 0x01},
+ {"Discovery Complete", 0x02},
+ {NULL, 0},
+ {"Unknown", 0x00}
+};
+
+struct mpr_table_lookup mpr_sastopo_exp[] = {
+ {"Added", 0x01},
+ {"Not Responding", 0x02},
+ {"Responding", 0x03},
+ {"Delay Not Responding", 0x04},
+ {NULL, 0},
+ {"Unknown", 0x00}
+};
+
+struct mpr_table_lookup mpr_sasdev_reason[] = {
+ {"SMART Data", 0x05},
+ {"Unsupported", 0x07},
+ {"Internal Device Reset", 0x08},
+ {"Task Abort Internal", 0x09},
+ {"Abort Task Set Internal", 0x0a},
+ {"Clear Task Set Internal", 0x0b},
+ {"Query Task Internal", 0x0c},
+ {"Async Notification", 0x0d},
+ {"Cmp Internal Device Reset", 0x0e},
+ {"Cmp Task Abort Internal", 0x0f},
+ {"Sata Init Failure", 0x10},
+ {NULL, 0},
+ {"Unknown", 0x00}
+};
+
+void
+mpr_describe_devinfo(uint32_t devinfo, char *string, int len)
+{
+ snprintf(string, len, "%b,%s", devinfo,
+ "\20" "\4SataHost" "\5SmpInit" "\6StpInit" "\7SspInit"
+ "\10SataDev" "\11SmpTarg" "\12StpTarg" "\13SspTarg" "\14Direct"
+ "\15LsiDev" "\16AtapiDev" "\17SepDev",
+ mpr_describe_table(mpr_sasdev0_devtype, devinfo & 0x03));
+}
+
+void
+mpr_print_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
+{
+
+ MPR_PRINTFIELD_START(sc, "IOCFacts");
+ MPR_PRINTFIELD(sc, facts, MsgVersion, 0x%x);
+ MPR_PRINTFIELD(sc, facts, HeaderVersion, 0x%x);
+ MPR_PRINTFIELD(sc, facts, IOCNumber, %d);
+ MPR_PRINTFIELD(sc, facts, IOCExceptions, 0x%x);
+ MPR_PRINTFIELD(sc, facts, MaxChainDepth, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "WhoInit: %s\n",
+ mpr_describe_table(mpr_whoinit_names, facts->WhoInit));
+ MPR_PRINTFIELD(sc, facts, NumberOfPorts, %d);
+ MPR_PRINTFIELD(sc, facts, RequestCredit, %d);
+ MPR_PRINTFIELD(sc, facts, ProductID, 0x%x);
+ mpr_dprint_field(sc, MPR_XINFO, "IOCCapabilities: %b\n",
+ facts->IOCCapabilities, "\20" "\3ScsiTaskFull" "\4DiagTrace"
+ "\5SnapBuf" "\6ExtBuf" "\7EEDP" "\10BiDirTarg" "\11Multicast"
+ "\14TransRetry" "\15IR" "\16EventReplay" "\17RaidAccel"
+ "\20MSIXIndex" "\21HostDisc");
+ mpr_dprint_field(sc, MPR_XINFO, "FWVersion= %d-%d-%d-%d\n",
+ facts->FWVersion.Struct.Major,
+ facts->FWVersion.Struct.Minor,
+ facts->FWVersion.Struct.Unit,
+ facts->FWVersion.Struct.Dev);
+ MPR_PRINTFIELD(sc, facts, IOCRequestFrameSize, %d);
+ MPR_PRINTFIELD(sc, facts, MaxInitiators, %d);
+ MPR_PRINTFIELD(sc, facts, MaxTargets, %d);
+ MPR_PRINTFIELD(sc, facts, MaxSasExpanders, %d);
+ MPR_PRINTFIELD(sc, facts, MaxEnclosures, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "ProtocolFlags: %b\n",
+ facts->ProtocolFlags, "\20" "\1ScsiTarg" "\2ScsiInit");
+ MPR_PRINTFIELD(sc, facts, HighPriorityCredit, %d);
+ MPR_PRINTFIELD(sc, facts, MaxReplyDescriptorPostQueueDepth, %d);
+ MPR_PRINTFIELD(sc, facts, ReplyFrameSize, %d);
+ MPR_PRINTFIELD(sc, facts, MaxVolumes, %d);
+ MPR_PRINTFIELD(sc, facts, MaxDevHandle, %d);
+ MPR_PRINTFIELD(sc, facts, MaxPersistentEntries, %d);
+}
+
+void
+mpr_print_portfacts(struct mpr_softc *sc, MPI2_PORT_FACTS_REPLY *facts)
+{
+
+ MPR_PRINTFIELD_START(sc, "PortFacts");
+ MPR_PRINTFIELD(sc, facts, PortNumber, %d);
+ MPR_PRINTFIELD(sc, facts, PortType, 0x%x);
+ MPR_PRINTFIELD(sc, facts, MaxPostedCmdBuffers, %d);
+}
+
+void
+mpr_print_event(struct mpr_softc *sc, MPI2_EVENT_NOTIFICATION_REPLY *event)
+{
+
+ MPR_EVENTFIELD_START(sc, "EventReply");
+ MPR_EVENTFIELD(sc, event, EventDataLength, %d);
+ MPR_EVENTFIELD(sc, event, AckRequired, %d);
+ mpr_dprint_field(sc, MPR_EVENT, "Event: %s (0x%x)\n",
+ mpr_describe_table(mpr_event_names, event->Event), event->Event);
+ MPR_EVENTFIELD(sc, event, EventContext, 0x%x);
+}
+
+void
+mpr_print_sasdev0(struct mpr_softc *sc, MPI2_CONFIG_PAGE_SAS_DEV_0 *buf)
+{
+ MPR_PRINTFIELD_START(sc, "SAS Device Page 0");
+ MPR_PRINTFIELD(sc, buf, Slot, %d);
+ MPR_PRINTFIELD(sc, buf, EnclosureHandle, 0x%x);
+ mpr_dprint_field(sc, MPR_XINFO, "SASAddress: 0x%jx\n",
+ mpr_to_u64(&buf->SASAddress));
+ MPR_PRINTFIELD(sc, buf, ParentDevHandle, 0x%x);
+ MPR_PRINTFIELD(sc, buf, PhyNum, %d);
+ MPR_PRINTFIELD(sc, buf, AccessStatus, 0x%x);
+ MPR_PRINTFIELD(sc, buf, DevHandle, 0x%x);
+ MPR_PRINTFIELD(sc, buf, AttachedPhyIdentifier, 0x%x);
+ MPR_PRINTFIELD(sc, buf, ZoneGroup, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "DeviceInfo: %b,%s\n", buf->DeviceInfo,
+ "\20" "\4SataHost" "\5SmpInit" "\6StpInit" "\7SspInit"
+ "\10SataDev" "\11SmpTarg" "\12StpTarg" "\13SspTarg" "\14Direct"
+ "\15LsiDev" "\16AtapiDev" "\17SepDev",
+ mpr_describe_table(mpr_sasdev0_devtype, buf->DeviceInfo & 0x03));
+ MPR_PRINTFIELD(sc, buf, Flags, 0x%x);
+ MPR_PRINTFIELD(sc, buf, PhysicalPort, %d);
+ MPR_PRINTFIELD(sc, buf, MaxPortConnections, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "DeviceName: 0x%jx\n",
+ mpr_to_u64(&buf->DeviceName));
+ MPR_PRINTFIELD(sc, buf, PortGroups, %d);
+ MPR_PRINTFIELD(sc, buf, DmaGroup, %d);
+ MPR_PRINTFIELD(sc, buf, ControlGroup, %d);
+}
+
+void
+mpr_print_evt_sas(struct mpr_softc *sc, MPI2_EVENT_NOTIFICATION_REPLY *event)
+{
+
+ mpr_print_event(sc, event);
+
+ switch(event->Event) {
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ MPI2_EVENT_DATA_SAS_DISCOVERY *data;
+
+ data = (MPI2_EVENT_DATA_SAS_DISCOVERY *)&event->EventData;
+ mpr_dprint_field(sc, MPR_EVENT, "Flags: %b\n", data->Flags,
+ "\20" "\1InProgress" "\2DeviceChange");
+ mpr_dprint_field(sc, MPR_EVENT, "ReasonCode: %s\n",
+ mpr_describe_table(mpr_sasdisc_reason, data->ReasonCode));
+ MPR_EVENTFIELD(sc, data, PhysicalPort, %d);
+ mpr_dprint_field(sc, MPR_EVENT, "DiscoveryStatus: %b\n",
+ data->DiscoveryStatus, "\20"
+ "\1Loop" "\2UnaddressableDev" "\3DupSasAddr" "\5SmpTimeout"
+ "\6ExpRouteFull" "\7RouteIndexError" "\10SmpFailed"
+ "\11SmpCrcError" "\12SubSubLink" "\13TableTableLink"
+ "\14UnsupDevice" "\15TableSubLink" "\16MultiDomain"
+ "\17MultiSub" "\20MultiSubSub" "\34DownstreamInit"
+ "\35MaxPhys" "\36MaxTargs" "\37MaxExpanders"
+ "\40MaxEnclosures");
+ break;
+ }
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *data;
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY *phy;
+ int i, phynum;
+
+ data = (MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST *)
+ &event->EventData;
+ MPR_EVENTFIELD(sc, data, EnclosureHandle, 0x%x);
+ MPR_EVENTFIELD(sc, data, ExpanderDevHandle, 0x%x);
+ MPR_EVENTFIELD(sc, data, NumPhys, %d);
+ MPR_EVENTFIELD(sc, data, NumEntries, %d);
+ MPR_EVENTFIELD(sc, data, StartPhyNum, %d);
+ mpr_dprint_field(sc, MPR_EVENT, "ExpStatus: %s (0x%x)\n",
+ mpr_describe_table(mpr_sastopo_exp, data->ExpStatus),
+ data->ExpStatus);
+ MPR_EVENTFIELD(sc, data, PhysicalPort, %d);
+ for (i = 0; i < data->NumEntries; i++) {
+ phy = &data->PHY[i];
+ phynum = data->StartPhyNum + i;
+ mpr_dprint_field(sc, MPR_EVENT,
+ "PHY[%d].AttachedDevHandle: 0x%04x\n", phynum,
+ phy->AttachedDevHandle);
+ mpr_dprint_field(sc, MPR_EVENT,
+ "PHY[%d].LinkRate: %s (0x%x)\n", phynum,
+ mpr_describe_table(mpr_linkrate_names,
+ (phy->LinkRate >> 4) & 0xf), phy->LinkRate);
+ mpr_dprint_field(sc,MPR_EVENT,"PHY[%d].PhyStatus: "
+ "%s\n", phynum,
+ mpr_describe_table(mpr_phystatus_names,
+ phy->PhyStatus));
+ }
+ break;
+ }
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE *data;
+
+ data = (MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE *)
+ &event->EventData;
+ MPR_EVENTFIELD(sc, data, EnclosureHandle, 0x%x);
+ mpr_dprint_field(sc, MPR_EVENT, "ReasonCode: %s\n",
+ mpr_describe_table(mpr_sastopo_exp, data->ReasonCode));
+ MPR_EVENTFIELD(sc, data, PhysicalPort, %d);
+ MPR_EVENTFIELD(sc, data, NumSlots, %d);
+ MPR_EVENTFIELD(sc, data, StartSlot, %d);
+ MPR_EVENTFIELD(sc, data, PhyBits, 0x%x);
+ break;
+ }
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ {
+ MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *data;
+
+ data = (MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
+ &event->EventData;
+ MPR_EVENTFIELD(sc, data, TaskTag, 0x%x);
+ mpr_dprint_field(sc, MPR_EVENT, "ReasonCode: %s\n",
+ mpr_describe_table(mpr_sasdev_reason, data->ReasonCode));
+ MPR_EVENTFIELD(sc, data, ASC, 0x%x);
+ MPR_EVENTFIELD(sc, data, ASCQ, 0x%x);
+ MPR_EVENTFIELD(sc, data, DevHandle, 0x%x);
+ mpr_dprint_field(sc, MPR_EVENT, "SASAddress: 0x%jx\n",
+ mpr_to_u64(&data->SASAddress));
+ }
+ default:
+ break;
+ }
+}
+
+void
+mpr_print_expander1(struct mpr_softc *sc, MPI2_CONFIG_PAGE_EXPANDER_1 *buf)
+{
+ MPR_PRINTFIELD_START(sc, "SAS Expander Page 1 #%d", buf->Phy);
+ MPR_PRINTFIELD(sc, buf, PhysicalPort, %d);
+ MPR_PRINTFIELD(sc, buf, NumPhys, %d);
+ MPR_PRINTFIELD(sc, buf, Phy, %d);
+ MPR_PRINTFIELD(sc, buf, NumTableEntriesProgrammed, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "ProgrammedLinkRate: %s (0x%x)\n",
+ mpr_describe_table(mpr_linkrate_names,
+ (buf->ProgrammedLinkRate >> 4) & 0xf), buf->ProgrammedLinkRate);
+ mpr_dprint_field(sc, MPR_XINFO, "HwLinkRate: %s (0x%x)\n",
+ mpr_describe_table(mpr_linkrate_names,
+ (buf->HwLinkRate >> 4) & 0xf), buf->HwLinkRate);
+ MPR_PRINTFIELD(sc, buf, AttachedDevHandle, 0x%04x);
+ mpr_dprint_field(sc, MPR_XINFO, "PhyInfo Reason: %s (0x%x)\n",
+ mpr_describe_table(mpr_phyinfo_reason_names,
+ (buf->PhyInfo >> 16) & 0xf), buf->PhyInfo);
+ mpr_dprint_field(sc, MPR_XINFO, "AttachedDeviceInfo: %b,%s\n",
+ buf->AttachedDeviceInfo, "\20" "\4SATAhost" "\5SMPinit" "\6STPinit"
+ "\7SSPinit" "\10SATAdev" "\11SMPtarg" "\12STPtarg" "\13SSPtarg"
+ "\14Direct" "\15LSIdev" "\16ATAPIdev" "\17SEPdev",
+ mpr_describe_table(mpr_sasdev0_devtype,
+ buf->AttachedDeviceInfo & 0x03));
+ MPR_PRINTFIELD(sc, buf, ExpanderDevHandle, 0x%04x);
+ MPR_PRINTFIELD(sc, buf, ChangeCount, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "NegotiatedLinkRate: %s (0x%x)\n",
+ mpr_describe_table(mpr_linkrate_names,
+ buf->NegotiatedLinkRate & 0xf), buf->NegotiatedLinkRate);
+ MPR_PRINTFIELD(sc, buf, PhyIdentifier, %d);
+ MPR_PRINTFIELD(sc, buf, AttachedPhyIdentifier, %d);
+ MPR_PRINTFIELD(sc, buf, DiscoveryInfo, 0x%x);
+ MPR_PRINTFIELD(sc, buf, AttachedPhyInfo, 0x%x);
+ mpr_dprint_field(sc, MPR_XINFO, "AttachedPhyInfo Reason: %s (0x%x)\n",
+ mpr_describe_table(mpr_phyinfo_reason_names,
+ buf->AttachedPhyInfo & 0xf), buf->AttachedPhyInfo);
+ MPR_PRINTFIELD(sc, buf, ZoneGroup, %d);
+ MPR_PRINTFIELD(sc, buf, SelfConfigStatus, 0x%x);
+}
+
+void
+mpr_print_sasphy0(struct mpr_softc *sc, MPI2_CONFIG_PAGE_SAS_PHY_0 *buf)
+{
+ MPR_PRINTFIELD_START(sc, "SAS PHY Page 0");
+ MPR_PRINTFIELD(sc, buf, OwnerDevHandle, 0x%04x);
+ MPR_PRINTFIELD(sc, buf, AttachedDevHandle, 0x%04x);
+ MPR_PRINTFIELD(sc, buf, AttachedPhyIdentifier, %d);
+ mpr_dprint_field(sc, MPR_XINFO, "AttachedPhyInfo Reason: %s (0x%x)\n",
+ mpr_describe_table(mpr_phyinfo_reason_names,
+ buf->AttachedPhyInfo & 0xf), buf->AttachedPhyInfo);
+ mpr_dprint_field(sc, MPR_XINFO, "ProgrammedLinkRate: %s (0x%x)\n",
+ mpr_describe_table(mpr_linkrate_names,
+ (buf->ProgrammedLinkRate >> 4) & 0xf), buf->ProgrammedLinkRate);
+ mpr_dprint_field(sc, MPR_XINFO, "HwLinkRate: %s (0x%x)\n",
+ mpr_describe_table(mpr_linkrate_names,
+ (buf->HwLinkRate >> 4) & 0xf), buf->HwLinkRate);
+ MPR_PRINTFIELD(sc, buf, ChangeCount, %d);
+ MPR_PRINTFIELD(sc, buf, Flags, 0x%x);
+ mpr_dprint_field(sc, MPR_XINFO, "PhyInfo Reason: %s (0x%x)\n",
+ mpr_describe_table(mpr_phyinfo_reason_names,
+ (buf->PhyInfo >> 16) & 0xf), buf->PhyInfo);
+ mpr_dprint_field(sc, MPR_XINFO, "NegotiatedLinkRate: %s (0x%x)\n",
+ mpr_describe_table(mpr_linkrate_names,
+ buf->NegotiatedLinkRate & 0xf), buf->NegotiatedLinkRate);
+}
+
+void
+mpr_print_sgl(struct mpr_softc *sc, struct mpr_command *cm, int offset)
+{
+ MPI2_IEEE_SGE_SIMPLE64 *ieee_sge;
+ MPI25_IEEE_SGE_CHAIN64 *ieee_sgc;
+ MPI2_SGE_SIMPLE64 *sge;
+ MPI2_REQUEST_HEADER *req;
+ struct mpr_chain *chain = NULL;
+ char *frame;
+ u_int i = 0, flags, length;
+
+ req = (MPI2_REQUEST_HEADER *)cm->cm_req;
+ frame = (char *)cm->cm_req;
+ ieee_sge = (MPI2_IEEE_SGE_SIMPLE64 *)&frame[offset * 4];
+ sge = (MPI2_SGE_SIMPLE64 *)&frame[offset * 4];
+ printf("SGL for command %p\n", cm);
+
+ hexdump(frame, 128, NULL, 0);
+ while ((frame != NULL) && (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE))) {
+ flags = ieee_sge->Flags;
+ length = le32toh(ieee_sge->Length);
+ printf("IEEE seg%d flags=0x%02x len=0x%08x addr=0x%016jx\n", i,
+ flags, length, mpr_to_u64(&ieee_sge->Address));
+ if (flags & MPI25_IEEE_SGE_FLAGS_END_OF_LIST)
+ break;
+ ieee_sge++;
+ i++;
+ if (flags & MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT) {
+ ieee_sgc = (MPI25_IEEE_SGE_CHAIN64 *)ieee_sge;
+ printf("IEEE chain flags=0x%x len=0x%x Offset=0x%x "
+ "Address=0x%016jx\n", ieee_sgc->Flags,
+ le32toh(ieee_sgc->Length),
+ ieee_sgc->NextChainOffset,
+ mpr_to_u64(&ieee_sgc->Address));
+ if (chain == NULL)
+ chain = TAILQ_FIRST(&cm->cm_chain_list);
+ else
+ chain = TAILQ_NEXT(chain, chain_link);
+ frame = (char *)chain->chain;
+ ieee_sge = (MPI2_IEEE_SGE_SIMPLE64 *)frame;
+ hexdump(frame, 128, NULL, 0);
+ }
+ }
+ while ((frame != NULL) && (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
+ flags = le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT;
+ printf("seg%d flags=0x%02x len=0x%06x addr=0x%016jx\n", i,
+ flags, le32toh(sge->FlagsLength) & 0xffffff,
+ mpr_to_u64(&sge->Address));
+ if (flags & (MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_END_OF_BUFFER))
+ break;
+ sge++;
+ i++;
+ }
+}
+
+void
+mpr_print_scsiio_cmd(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ MPI2_SCSI_IO_REQUEST *req;
+
+ req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
+ mpr_print_sgl(sc, cm, req->SGLOffset0);
+}
+
diff --git a/sys/dev/lindev/lindev.h b/sys/dev/mpr/mpr_table.h
index 9b0be8250a02d..6539232138e31 100644
--- a/sys/dev/lindev/lindev.h
+++ b/sys/dev/mpr/mpr_table.h
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2009 "Bjoern A. Zeeb" <bz@FreeBSD.org>
+ * Copyright (c) 2009 Yahoo! Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,9 +26,28 @@
* $FreeBSD$
*/
-#ifndef _DEV_LINDEV_LINDEV_H
-#define _DEV_LINDEV_LINDEV_H
+#ifndef _MPR_TABLE_H
+#define _MPR_TABLE_H
-int lindev_modevent_full(module_t, int, void *);
+struct mpr_table_lookup {
+ char *string;
+ u_int code;
+};
-#endif /* _DEV_LINDEV_LINDEV_H */
+char * mpr_describe_table(struct mpr_table_lookup *table, u_int code);
+void mpr_describe_devinfo(uint32_t devinfo, char *string, int len);
+
+extern struct mpr_table_lookup mpr_event_names[];
+extern struct mpr_table_lookup mpr_phystatus_names[];
+extern struct mpr_table_lookup mpr_linkrate_names[];
+
+void mpr_print_iocfacts(struct mpr_softc *, MPI2_IOC_FACTS_REPLY *);
+void mpr_print_portfacts(struct mpr_softc *, MPI2_PORT_FACTS_REPLY *);
+void mpr_print_event(struct mpr_softc *, MPI2_EVENT_NOTIFICATION_REPLY *);
+void mpr_print_sasdev0(struct mpr_softc *, MPI2_CONFIG_PAGE_SAS_DEV_0 *);
+void mpr_print_evt_sas(struct mpr_softc *, MPI2_EVENT_NOTIFICATION_REPLY *);
+void mpr_print_expander1(struct mpr_softc *, MPI2_CONFIG_PAGE_EXPANDER_1 *);
+void mpr_print_sasphy0(struct mpr_softc *, MPI2_CONFIG_PAGE_SAS_PHY_0 *);
+void mpr_print_sgl(struct mpr_softc *, struct mpr_command *, int);
+void mpr_print_scsiio_cmd(struct mpr_softc *, struct mpr_command *);
+#endif
diff --git a/sys/dev/mpr/mpr_user.c b/sys/dev/mpr/mpr_user.c
new file mode 100644
index 0000000000000..60680f5288a31
--- /dev/null
+++ b/sys/dev/mpr/mpr_user.c
@@ -0,0 +1,2453 @@
+/*-
+ * Copyright (c) 2008 Yahoo!, Inc.
+ * All rights reserved.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD userland interface
+ */
+/*-
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * LSI MPT-Fusion Host Adapter FreeBSD
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_compat.h"
+
+/* TODO Move headers to mprvar */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/selinfo.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/bio.h>
+#include <sys/malloc.h>
+#include <sys/uio.h>
+#include <sys/sysctl.h>
+#include <sys/ioccom.h>
+#include <sys/endian.h>
+#include <sys/queue.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+#include <sys/proc.h>
+#include <sys/sysent.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include <cam/cam.h>
+#include <cam/scsi/scsi_all.h>
+
+#include <dev/mpr/mpi/mpi2_type.h>
+#include <dev/mpr/mpi/mpi2.h>
+#include <dev/mpr/mpi/mpi2_ioc.h>
+#include <dev/mpr/mpi/mpi2_cnfg.h>
+#include <dev/mpr/mpi/mpi2_init.h>
+#include <dev/mpr/mpi/mpi2_tool.h>
+#include <dev/mpr/mpr_ioctl.h>
+#include <dev/mpr/mprvar.h>
+#include <dev/mpr/mpr_table.h>
+#include <dev/mpr/mpr_sas.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+static d_open_t mpr_open;
+static d_close_t mpr_close;
+static d_ioctl_t mpr_ioctl_devsw;
+
+static struct cdevsw mpr_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_open = mpr_open,
+ .d_close = mpr_close,
+ .d_ioctl = mpr_ioctl_devsw,
+ .d_name = "mpr",
+};
+
+typedef int (mpr_user_f)(struct mpr_command *, struct mpr_usr_command *);
+static mpr_user_f mpi_pre_ioc_facts;
+static mpr_user_f mpi_pre_port_facts;
+static mpr_user_f mpi_pre_fw_download;
+static mpr_user_f mpi_pre_fw_upload;
+static mpr_user_f mpi_pre_sata_passthrough;
+static mpr_user_f mpi_pre_smp_passthrough;
+static mpr_user_f mpi_pre_config;
+static mpr_user_f mpi_pre_sas_io_unit_control;
+
+static int mpr_user_read_cfg_header(struct mpr_softc *,
+ struct mpr_cfg_page_req *);
+static int mpr_user_read_cfg_page(struct mpr_softc *,
+ struct mpr_cfg_page_req *, void *);
+static int mpr_user_read_extcfg_header(struct mpr_softc *,
+ struct mpr_ext_cfg_page_req *);
+static int mpr_user_read_extcfg_page(struct mpr_softc *,
+ struct mpr_ext_cfg_page_req *, void *);
+static int mpr_user_write_cfg_page(struct mpr_softc *,
+ struct mpr_cfg_page_req *, void *);
+static int mpr_user_setup_request(struct mpr_command *,
+ struct mpr_usr_command *);
+static int mpr_user_command(struct mpr_softc *, struct mpr_usr_command *);
+
+static int mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t *data);
+static void mpr_user_get_adapter_data(struct mpr_softc *sc,
+ mpr_adapter_data_t *data);
+static void mpr_user_read_pci_info(struct mpr_softc *sc,
+ mpr_pci_info_t *data);
+static uint8_t mpr_get_fw_diag_buffer_number(struct mpr_softc *sc,
+ uint32_t unique_id);
+static int mpr_post_fw_diag_buffer(struct mpr_softc *sc,
+ mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
+static int mpr_release_fw_diag_buffer(struct mpr_softc *sc,
+ mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
+ uint32_t diag_type);
+static int mpr_diag_register(struct mpr_softc *sc,
+ mpr_fw_diag_register_t *diag_register, uint32_t *return_code);
+static int mpr_diag_unregister(struct mpr_softc *sc,
+ mpr_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
+static int mpr_diag_query(struct mpr_softc *sc,
+ mpr_fw_diag_query_t *diag_query, uint32_t *return_code);
+static int mpr_diag_read_buffer(struct mpr_softc *sc,
+ mpr_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
+ uint32_t *return_code);
+static int mpr_diag_release(struct mpr_softc *sc,
+ mpr_fw_diag_release_t *diag_release, uint32_t *return_code);
+static int mpr_do_diag_action(struct mpr_softc *sc, uint32_t action,
+ uint8_t *diag_action, uint32_t length, uint32_t *return_code);
+static int mpr_user_diag_action(struct mpr_softc *sc,
+ mpr_diag_action_t *data);
+static void mpr_user_event_query(struct mpr_softc *sc,
+ mpr_event_query_t *data);
+static void mpr_user_event_enable(struct mpr_softc *sc,
+ mpr_event_enable_t *data);
+static int mpr_user_event_report(struct mpr_softc *sc,
+ mpr_event_report_t *data);
+static int mpr_user_reg_access(struct mpr_softc *sc, mpr_reg_access_t *data);
+static int mpr_user_btdh(struct mpr_softc *sc, mpr_btdh_mapping_t *data);
+
+static MALLOC_DEFINE(M_MPRUSER, "mpr_user", "Buffers for mpr(4) ioctls");
+
+/* Macros from compat/freebsd32/freebsd32.h */
+#define PTRIN(v) (void *)(uintptr_t)(v)
+#define PTROUT(v) (uint32_t)(uintptr_t)(v)
+
+#define CP(src,dst,fld) do { (dst).fld = (src).fld; } while (0)
+#define PTRIN_CP(src,dst,fld) \
+ do { (dst).fld = PTRIN((src).fld); } while (0)
+#define PTROUT_CP(src,dst,fld) \
+ do { (dst).fld = PTROUT((src).fld); } while (0)
+
+/*
+ * MPI functions that support IEEE SGLs for SAS3.
+ */
+static uint8_t ieee_sgl_func_list[] = {
+ MPI2_FUNCTION_SCSI_IO_REQUEST,
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
+ MPI2_FUNCTION_SMP_PASSTHROUGH,
+ MPI2_FUNCTION_SATA_PASSTHROUGH,
+ MPI2_FUNCTION_FW_UPLOAD,
+ MPI2_FUNCTION_FW_DOWNLOAD,
+ MPI2_FUNCTION_TARGET_ASSIST,
+ MPI2_FUNCTION_TARGET_STATUS_SEND,
+ MPI2_FUNCTION_TOOLBOX
+};
+
+int
+mpr_attach_user(struct mpr_softc *sc)
+{
+ int unit;
+
+ unit = device_get_unit(sc->mpr_dev);
+ sc->mpr_cdev = make_dev(&mpr_cdevsw, unit, UID_ROOT, GID_OPERATOR,
+ 0640, "mpr%d", unit);
+ if (sc->mpr_cdev == NULL) {
+ return (ENOMEM);
+ }
+ sc->mpr_cdev->si_drv1 = sc;
+ return (0);
+}
+
+void
+mpr_detach_user(struct mpr_softc *sc)
+{
+
+ /* XXX: do a purge of pending requests? */
+ if (sc->mpr_cdev != NULL)
+ destroy_dev(sc->mpr_cdev);
+}
+
+static int
+mpr_open(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+mpr_close(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+mpr_user_read_cfg_header(struct mpr_softc *sc,
+ struct mpr_cfg_page_req *page_req)
+{
+ MPI2_CONFIG_PAGE_HEADER *hdr;
+ struct mpr_config_params params;
+ int error;
+
+ hdr = &params.hdr.Struct;
+ params.action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ params.page_address = le32toh(page_req->page_address);
+ hdr->PageVersion = 0;
+ hdr->PageLength = 0;
+ hdr->PageNumber = page_req->header.PageNumber;
+ hdr->PageType = page_req->header.PageType;
+ params.buffer = NULL;
+ params.length = 0;
+ params.callback = NULL;
+
+ if ((error = mpr_read_config_page(sc, &params)) != 0) {
+ /*
+ * Leave the request. Without resetting the chip, it's
+ * still owned by it and we'll just get into trouble
+ * freeing it now. Mark it as abandoned so that if it
+ * shows up later it can be freed.
+ */
+ mpr_printf(sc, "read_cfg_header timed out\n");
+ return (ETIMEDOUT);
+ }
+
+ page_req->ioc_status = htole16(params.status);
+ if ((page_req->ioc_status & MPI2_IOCSTATUS_MASK) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ bcopy(hdr, &page_req->header, sizeof(page_req->header));
+ }
+
+ return (0);
+}
+
+static int
+mpr_user_read_cfg_page(struct mpr_softc *sc,
+ struct mpr_cfg_page_req *page_req,
+ void *buf)
+{
+ MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr;
+ struct mpr_config_params params;
+ int error;
+
+ reqhdr = buf;
+ hdr = &params.hdr.Struct;
+ hdr->PageVersion = reqhdr->PageVersion;
+ hdr->PageLength = reqhdr->PageLength;
+ hdr->PageNumber = reqhdr->PageNumber;
+ hdr->PageType = reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK;
+ params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ params.page_address = le32toh(page_req->page_address);
+ params.buffer = buf;
+ params.length = le32toh(page_req->len);
+ params.callback = NULL;
+
+ if ((error = mpr_read_config_page(sc, &params)) != 0) {
+ mpr_printf(sc, "mpr_user_read_cfg_page timed out\n");
+ return (ETIMEDOUT);
+ }
+
+ page_req->ioc_status = htole16(params.status);
+ return (0);
+}
+
+static int
+mpr_user_read_extcfg_header(struct mpr_softc *sc,
+ struct mpr_ext_cfg_page_req *ext_page_req)
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
+ struct mpr_config_params params;
+ int error;
+
+ hdr = &params.hdr.Ext;
+ params.action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ hdr->PageVersion = ext_page_req->header.PageVersion;
+ hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ hdr->ExtPageLength = 0;
+ hdr->PageNumber = ext_page_req->header.PageNumber;
+ hdr->ExtPageType = ext_page_req->header.ExtPageType;
+ params.page_address = le32toh(ext_page_req->page_address);
+ if ((error = mpr_read_config_page(sc, &params)) != 0) {
+ /*
+ * Leave the request. Without resetting the chip, it's
+ * still owned by it and we'll just get into trouble
+ * freeing it now. Mark it as abandoned so that if it
+ * shows up later it can be freed.
+ */
+ mpr_printf(sc, "mpr_user_read_extcfg_header timed out\n");
+ return (ETIMEDOUT);
+ }
+
+ ext_page_req->ioc_status = htole16(params.status);
+ if ((ext_page_req->ioc_status & MPI2_IOCSTATUS_MASK) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ ext_page_req->header.PageVersion = hdr->PageVersion;
+ ext_page_req->header.PageNumber = hdr->PageNumber;
+ ext_page_req->header.PageType = hdr->PageType;
+ ext_page_req->header.ExtPageLength = hdr->ExtPageLength;
+ ext_page_req->header.ExtPageType = hdr->ExtPageType;
+ }
+
+ return (0);
+}
+
+static int
+mpr_user_read_extcfg_page(struct mpr_softc *sc,
+ struct mpr_ext_cfg_page_req *ext_page_req, void *buf)
+{
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER *reqhdr, *hdr;
+ struct mpr_config_params params;
+ int error;
+
+ reqhdr = buf;
+ hdr = &params.hdr.Ext;
+ params.action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ params.page_address = le32toh(ext_page_req->page_address);
+ hdr->PageVersion = reqhdr->PageVersion;
+ hdr->PageNumber = reqhdr->PageNumber;
+ hdr->PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ hdr->ExtPageType = reqhdr->ExtPageType;
+ hdr->ExtPageLength = reqhdr->ExtPageLength;
+ params.buffer = buf;
+ params.length = le32toh(ext_page_req->len);
+ params.callback = NULL;
+
+ if ((error = mpr_read_config_page(sc, &params)) != 0) {
+ mpr_printf(sc, "mpr_user_read_extcfg_page timed out\n");
+ return (ETIMEDOUT);
+ }
+
+ ext_page_req->ioc_status = htole16(params.status);
+ return (0);
+}
+
+static int
+mpr_user_write_cfg_page(struct mpr_softc *sc,
+ struct mpr_cfg_page_req *page_req, void *buf)
+{
+ MPI2_CONFIG_PAGE_HEADER *reqhdr, *hdr;
+ struct mpr_config_params params;
+ u_int hdr_attr;
+ int error;
+
+ reqhdr = buf;
+ hdr = &params.hdr.Struct;
+ hdr_attr = reqhdr->PageType & MPI2_CONFIG_PAGEATTR_MASK;
+ if (hdr_attr != MPI2_CONFIG_PAGEATTR_CHANGEABLE &&
+ hdr_attr != MPI2_CONFIG_PAGEATTR_PERSISTENT) {
+ mpr_printf(sc, "page type 0x%x not changeable\n",
+ reqhdr->PageType & MPI2_CONFIG_PAGETYPE_MASK);
+ return (EINVAL);
+ }
+
+ /*
+ * There isn't any point in restoring stripped out attributes
+ * if you then mask them going down to issue the request.
+ */
+
+ hdr->PageVersion = reqhdr->PageVersion;
+ hdr->PageLength = reqhdr->PageLength;
+ hdr->PageNumber = reqhdr->PageNumber;
+ hdr->PageType = reqhdr->PageType;
+ params.action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ params.page_address = le32toh(page_req->page_address);
+ params.buffer = buf;
+ params.length = le32toh(page_req->len);
+ params.callback = NULL;
+
+ if ((error = mpr_write_config_page(sc, &params)) != 0) {
+ mpr_printf(sc, "mpr_write_cfg_page timed out\n");
+ return (ETIMEDOUT);
+ }
+
+ page_req->ioc_status = htole16(params.status);
+ return (0);
+}
+
+void
+mpr_init_sge(struct mpr_command *cm, void *req, void *sge)
+{
+ int off, space;
+
+ space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
+ off = (uintptr_t)sge - (uintptr_t)req;
+
+ KASSERT(off < space, ("bad pointers %p %p, off %d, space %d",
+ req, sge, off, space));
+
+ cm->cm_sge = sge;
+ cm->cm_sglsize = space - off;
+}
+
+/*
+ * Prepare the mpr_command for an IOC_FACTS request.
+ */
+static int
+mpi_pre_ioc_facts(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI2_IOC_FACTS_REQUEST *req = (void *)cm->cm_req;
+ MPI2_IOC_FACTS_REPLY *rpl;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ cm->cm_sge = NULL;
+ cm->cm_sglsize = 0;
+ return (0);
+}
+
+/*
+ * Prepare the mpr_command for a PORT_FACTS request.
+ */
+static int
+mpi_pre_port_facts(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI2_PORT_FACTS_REQUEST *req = (void *)cm->cm_req;
+ MPI2_PORT_FACTS_REPLY *rpl;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ cm->cm_sge = NULL;
+ cm->cm_sglsize = 0;
+ return (0);
+}
+
+/*
+ * Prepare the mpr_command for a FW_DOWNLOAD request.
+ */
+static int
+mpi_pre_fw_download(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI25_FW_DOWNLOAD_REQUEST *req = (void *)cm->cm_req;
+ MPI2_FW_DOWNLOAD_REPLY *rpl;
+ int error;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ if (cmd->len == 0)
+ return (EINVAL);
+
+ error = copyin(cmd->buf, cm->cm_data, cmd->len);
+ if (error != 0)
+ return (error);
+
+ mpr_init_sge(cm, req, &req->SGL);
+
+ /*
+ * For now, the F/W image must be provided in a single request.
+ */
+ if ((req->MsgFlags & MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT) == 0)
+ return (EINVAL);
+ if (req->TotalImageSize != cmd->len)
+ return (EINVAL);
+
+ req->ImageOffset = 0;
+ req->ImageSize = cmd->len;
+
+ cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
+
+ return (mpr_push_ieee_sge(cm, &req->SGL, 0));
+}
+
+/*
+ * Prepare the mpr_command for a FW_UPLOAD request.
+ */
+static int
+mpi_pre_fw_upload(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI25_FW_UPLOAD_REQUEST *req = (void *)cm->cm_req;
+ MPI2_FW_UPLOAD_REPLY *rpl;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ mpr_init_sge(cm, req, &req->SGL);
+ if (cmd->len == 0) {
+ /* Perhaps just asking what the size of the fw is? */
+ return (0);
+ }
+
+ req->ImageOffset = 0;
+ req->ImageSize = cmd->len;
+
+ return (mpr_push_ieee_sge(cm, &req->SGL, 0));
+}
+
+/*
+ * Prepare the mpr_command for a SATA_PASSTHROUGH request.
+ */
+static int
+mpi_pre_sata_passthrough(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI2_SATA_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req;
+ MPI2_SATA_PASSTHROUGH_REPLY *rpl;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ mpr_init_sge(cm, req, &req->SGL);
+ return (0);
+}
+
+/*
+ * Prepare the mpr_command for a SMP_PASSTHROUGH request.
+ */
+static int
+mpi_pre_smp_passthrough(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI2_SMP_PASSTHROUGH_REQUEST *req = (void *)cm->cm_req;
+ MPI2_SMP_PASSTHROUGH_REPLY *rpl;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ mpr_init_sge(cm, req, &req->SGL);
+ return (0);
+}
+
+/*
+ * Prepare the mpr_command for a CONFIG request.
+ */
+static int
+mpi_pre_config(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI2_CONFIG_REQUEST *req = (void *)cm->cm_req;
+ MPI2_CONFIG_REPLY *rpl;
+
+ if (cmd->req_len != sizeof *req)
+ return (EINVAL);
+ if (cmd->rpl_len != sizeof *rpl)
+ return (EINVAL);
+
+ mpr_init_sge(cm, req, &req->PageBufferSGE);
+ return (0);
+}
+
+/*
+ * Prepare the mpr_command for a SAS_IO_UNIT_CONTROL request.
+ */
+static int
+mpi_pre_sas_io_unit_control(struct mpr_command *cm,
+ struct mpr_usr_command *cmd)
+{
+
+ cm->cm_sge = NULL;
+ cm->cm_sglsize = 0;
+ return (0);
+}
+
+/*
+ * A set of functions to prepare an mpr_command for the various
+ * supported requests.
+ */
+struct mpr_user_func {
+ U8 Function;
+ mpr_user_f *f_pre;
+} mpr_user_func_list[] = {
+ { MPI2_FUNCTION_IOC_FACTS, mpi_pre_ioc_facts },
+ { MPI2_FUNCTION_PORT_FACTS, mpi_pre_port_facts },
+ { MPI2_FUNCTION_FW_DOWNLOAD, mpi_pre_fw_download },
+ { MPI2_FUNCTION_FW_UPLOAD, mpi_pre_fw_upload },
+ { MPI2_FUNCTION_SATA_PASSTHROUGH, mpi_pre_sata_passthrough },
+ { MPI2_FUNCTION_SMP_PASSTHROUGH, mpi_pre_smp_passthrough},
+ { MPI2_FUNCTION_CONFIG, mpi_pre_config},
+ { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, mpi_pre_sas_io_unit_control },
+ { 0xFF, NULL } /* list end */
+};
+
+static int
+mpr_user_setup_request(struct mpr_command *cm, struct mpr_usr_command *cmd)
+{
+ MPI2_REQUEST_HEADER *hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
+ struct mpr_user_func *f;
+
+ for (f = mpr_user_func_list; f->f_pre != NULL; f++) {
+ if (hdr->Function == f->Function)
+ return (f->f_pre(cm, cmd));
+ }
+ return (EINVAL);
+}
+
+static int
+mpr_user_command(struct mpr_softc *sc, struct mpr_usr_command *cmd)
+{
+ MPI2_REQUEST_HEADER *hdr;
+ MPI2_DEFAULT_REPLY *rpl;
+ void *buf = NULL;
+ struct mpr_command *cm = NULL;
+ int err = 0;
+ int sz;
+
+ mpr_lock(sc);
+ cm = mpr_alloc_command(sc);
+
+ if (cm == NULL) {
+ mpr_printf(sc, "%s: no mpr requests\n", __func__);
+ err = ENOMEM;
+ goto Ret;
+ }
+ mpr_unlock(sc);
+
+ hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
+
+ mpr_dprint(sc, MPR_USER, "%s: req %p %d rpl %p %d\n", __func__,
+ cmd->req, cmd->req_len, cmd->rpl, cmd->rpl_len);
+
+ if (cmd->req_len > (int)sc->facts->IOCRequestFrameSize * 4) {
+ err = EINVAL;
+ goto RetFreeUnlocked;
+ }
+ err = copyin(cmd->req, hdr, cmd->req_len);
+ if (err != 0)
+ goto RetFreeUnlocked;
+
+ mpr_dprint(sc, MPR_USER, "%s: Function %02X MsgFlags %02X\n", __func__,
+ hdr->Function, hdr->MsgFlags);
+
+ if (cmd->len > 0) {
+ buf = malloc(cmd->len, M_MPRUSER, M_WAITOK|M_ZERO);
+ if (!buf) {
+ mpr_printf(sc, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ cm->cm_data = buf;
+ cm->cm_length = cmd->len;
+ } else {
+ cm->cm_data = NULL;
+ cm->cm_length = 0;
+ }
+
+ cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+
+ err = mpr_user_setup_request(cm, cmd);
+ if (err == EINVAL) {
+ mpr_printf(sc, "%s: unsupported parameter or unsupported "
+ "function in request (function = 0x%X)\n", __func__,
+ hdr->Function);
+ }
+ if (err != 0)
+ goto RetFreeUnlocked;
+
+ mpr_lock(sc);
+ err = mpr_wait_command(sc, cm, 30, CAN_SLEEP);
+
+ if (err) {
+ mpr_printf(sc, "%s: invalid request: error %d\n",
+ __func__, err);
+ goto Ret;
+ }
+
+ rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
+ if (rpl != NULL)
+ sz = rpl->MsgLength * 4;
+ else
+ sz = 0;
+
+ if (sz > cmd->rpl_len) {
+ mpr_printf(sc, "%s: user reply buffer (%d) smaller than "
+ "returned buffer (%d)\n", __func__, cmd->rpl_len, sz);
+ sz = cmd->rpl_len;
+ }
+
+ mpr_unlock(sc);
+ copyout(rpl, cmd->rpl, sz);
+ if (buf != NULL)
+ copyout(buf, cmd->buf, cmd->len);
+ mpr_dprint(sc, MPR_USER, "%s: reply size %d\n", __func__, sz);
+
+RetFreeUnlocked:
+ mpr_lock(sc);
+ if (cm != NULL)
+ mpr_free_command(sc, cm);
+Ret:
+ mpr_unlock(sc);
+ if (buf != NULL)
+ free(buf, M_MPRUSER);
+ return (err);
+}
+
+static int
+mpr_user_pass_thru(struct mpr_softc *sc, mpr_pass_thru_t *data)
+{
+ MPI2_REQUEST_HEADER *hdr, tmphdr;
+ MPI2_DEFAULT_REPLY *rpl;
+ struct mpr_command *cm = NULL;
+ int i, err = 0, dir = 0, sz;
+ uint8_t tool, function = 0;
+ u_int sense_len;
+ struct mprsas_target *targ = NULL;
+
+ /*
+ * Only allow one passthru command at a time. Use the MPR_FLAGS_BUSY
+ * bit to denote that a passthru is being processed.
+ */
+ mpr_lock(sc);
+ if (sc->mpr_flags & MPR_FLAGS_BUSY) {
+ mpr_dprint(sc, MPR_USER, "%s: Only one passthru command "
+ "allowed at a single time.", __func__);
+ mpr_unlock(sc);
+ return (EBUSY);
+ }
+ sc->mpr_flags |= MPR_FLAGS_BUSY;
+ mpr_unlock(sc);
+
+ /*
+ * Do some validation on data direction. Valid cases are:
+ * 1) DataSize is 0 and direction is NONE
+ * 2) DataSize is non-zero and one of:
+ * a) direction is READ or
+ * b) direction is WRITE or
+ * c) direction is BOTH and DataOutSize is non-zero
+ * If valid and the direction is BOTH, change the direction to READ.
+ * if valid and the direction is not BOTH, make sure DataOutSize is 0.
+ */
+ if (((data->DataSize == 0) &&
+ (data->DataDirection == MPR_PASS_THRU_DIRECTION_NONE)) ||
+ ((data->DataSize != 0) &&
+ ((data->DataDirection == MPR_PASS_THRU_DIRECTION_READ) ||
+ (data->DataDirection == MPR_PASS_THRU_DIRECTION_WRITE) ||
+ ((data->DataDirection == MPR_PASS_THRU_DIRECTION_BOTH) &&
+ (data->DataOutSize != 0))))) {
+ if (data->DataDirection == MPR_PASS_THRU_DIRECTION_BOTH)
+ data->DataDirection = MPR_PASS_THRU_DIRECTION_READ;
+ else
+ data->DataOutSize = 0;
+ } else
+ return (EINVAL);
+
+ mpr_dprint(sc, MPR_USER, "%s: req 0x%jx %d rpl 0x%jx %d "
+ "data in 0x%jx %d data out 0x%jx %d data dir %d\n", __func__,
+ data->PtrRequest, data->RequestSize, data->PtrReply,
+ data->ReplySize, data->PtrData, data->DataSize,
+ data->PtrDataOut, data->DataOutSize, data->DataDirection);
+
+ /*
+ * copy in the header so we know what we're dealing with before we
+ * commit to allocating a command for it.
+ */
+ err = copyin(PTRIN(data->PtrRequest), &tmphdr, data->RequestSize);
+ if (err != 0)
+ goto RetFreeUnlocked;
+
+ if (data->RequestSize > (int)sc->facts->IOCRequestFrameSize * 4) {
+ err = EINVAL;
+ goto RetFreeUnlocked;
+ }
+
+ function = tmphdr.Function;
+ mpr_dprint(sc, MPR_USER, "%s: Function %02X MsgFlags %02X\n", __func__,
+ function, tmphdr.MsgFlags);
+
+ /*
+ * Handle a passthru TM request.
+ */
+ if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ MPI2_SCSI_TASK_MANAGE_REQUEST *task;
+
+ mpr_lock(sc);
+ cm = mprsas_alloc_tm(sc);
+ if (cm == NULL) {
+ err = EINVAL;
+ goto Ret;
+ }
+
+ /* Copy the header in. Only a small fixup is needed. */
+ task = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
+ bcopy(&tmphdr, task, data->RequestSize);
+ task->TaskMID = cm->cm_desc.Default.SMID;
+
+ cm->cm_data = NULL;
+ cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ cm->cm_complete = NULL;
+ cm->cm_complete_data = NULL;
+
+ err = mpr_wait_command(sc, cm, 30, CAN_SLEEP);
+
+ if (err != 0) {
+ err = EIO;
+ mpr_dprint(sc, MPR_FAULT, "%s: task management failed",
+ __func__);
+ }
+ /*
+ * Copy the reply data and sense data to user space.
+ */
+ if (cm->cm_reply != NULL) {
+ rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
+ sz = rpl->MsgLength * 4;
+
+ if (sz > data->ReplySize) {
+ mpr_printf(sc, "%s: user reply buffer (%d) "
+ "smaller than returned buffer (%d)\n",
+ __func__, data->ReplySize, sz);
+ }
+ mpr_unlock(sc);
+ copyout(cm->cm_reply, PTRIN(data->PtrReply),
+ data->ReplySize);
+ mpr_lock(sc);
+ }
+ mprsas_free_tm(sc, cm);
+ goto Ret;
+ }
+
+ mpr_lock(sc);
+ cm = mpr_alloc_command(sc);
+
+ if (cm == NULL) {
+ mpr_printf(sc, "%s: no mpr requests\n", __func__);
+ err = ENOMEM;
+ goto Ret;
+ }
+ mpr_unlock(sc);
+
+ hdr = (MPI2_REQUEST_HEADER *)cm->cm_req;
+ bcopy(&tmphdr, hdr, data->RequestSize);
+
+ /*
+ * Do some checking to make sure the IOCTL request contains a valid
+ * request. Then set the SGL info.
+ */
+ mpr_init_sge(cm, hdr, (void *)((uint8_t *)hdr + data->RequestSize));
+
+ /*
+ * Set up for read, write or both. From check above, DataOutSize will
+ * be 0 if direction is READ or WRITE, but it will have some non-zero
+ * value if the direction is BOTH. So, just use the biggest size to get
+ * the cm_data buffer size. If direction is BOTH, 2 SGLs need to be set
+ * up; the first is for the request and the second will contain the
+ * response data. cm_out_len needs to be set here and this will be used
+ * when the SGLs are set up.
+ */
+ cm->cm_data = NULL;
+ cm->cm_length = MAX(data->DataSize, data->DataOutSize);
+ cm->cm_out_len = data->DataOutSize;
+ cm->cm_flags = 0;
+ if (cm->cm_length != 0) {
+ cm->cm_data = malloc(cm->cm_length, M_MPRUSER, M_WAITOK |
+ M_ZERO);
+ if (cm->cm_data == NULL) {
+ mpr_dprint(sc, MPR_FAULT, "%s: alloc failed for IOCTL "
+ "passthru length %d\n", __func__, cm->cm_length);
+ } else {
+ cm->cm_flags = MPR_CM_FLAGS_DATAIN;
+ if (data->DataOutSize) {
+ cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
+ err = copyin(PTRIN(data->PtrDataOut),
+ cm->cm_data, data->DataOutSize);
+ } else if (data->DataDirection ==
+ MPR_PASS_THRU_DIRECTION_WRITE) {
+ cm->cm_flags = MPR_CM_FLAGS_DATAOUT;
+ err = copyin(PTRIN(data->PtrData),
+ cm->cm_data, data->DataSize);
+ }
+ if (err != 0)
+ mpr_dprint(sc, MPR_FAULT, "%s: failed to copy "
+ "IOCTL data from user space\n", __func__);
+ }
+ }
+ /*
+ * Set this flag only if processing a command that does not need an
+ * IEEE SGL. The CLI Tool within the Toolbox uses IEEE SGLs, so clear
+ * the flag only for that tool if processing a Toolbox function.
+ */
+ cm->cm_flags |= MPR_CM_FLAGS_SGE_SIMPLE;
+ for (i = 0; i < sizeof (ieee_sgl_func_list); i++) {
+ if (function == ieee_sgl_func_list[i]) {
+ if (function == MPI2_FUNCTION_TOOLBOX)
+ {
+ tool = (uint8_t)hdr->FunctionDependent1;
+ if (tool != MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
+ break;
+ }
+ cm->cm_flags &= ~MPR_CM_FLAGS_SGE_SIMPLE;
+ break;
+ }
+ }
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+
+ /*
+ * Set up Sense buffer and SGL offset for IO passthru. SCSI IO request
+ * uses SCSI IO or Fast Path SCSI IO descriptor.
+ */
+ if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ MPI2_SCSI_IO_REQUEST *scsi_io_req;
+
+ scsi_io_req = (MPI2_SCSI_IO_REQUEST *)hdr;
+ /*
+ * Put SGE for data and data_out buffer at the end of
+ * scsi_io_request message header (64 bytes in total).
+ * Following above SGEs, the residual space will be used by
+ * sense data.
+ */
+ scsi_io_req->SenseBufferLength = (uint8_t)(data->RequestSize -
+ 64);
+ scsi_io_req->SenseBufferLowAddress =
+ htole32(cm->cm_sense_busaddr);
+
+ /*
+ * Set SGLOffset0 value. This is the number of dwords that SGL
+ * is offset from the beginning of MPI2_SCSI_IO_REQUEST struct.
+ */
+ scsi_io_req->SGLOffset0 = 24;
+
+ /*
+ * Setup descriptor info. RAID passthrough must use the
+ * default request descriptor which is already set, so if this
+ * is a SCSI IO request, change the descriptor to SCSI IO or
+ * Fast Path SCSI IO. Also, if this is a SCSI IO request,
+ * handle the reply in the mprsas_scsio_complete function.
+ */
+ if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
+ targ = mprsas_find_target_by_handle(sc->sassc, 0,
+ scsi_io_req->DevHandle);
+
+ if (!targ) {
+ printf("No Target found for handle %d\n",
+ scsi_io_req->DevHandle);
+ err = EINVAL;
+ goto RetFreeUnlocked;
+ }
+
+ if (targ->scsi_req_desc_type ==
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
+ cm->cm_desc.FastPathSCSIIO.RequestFlags =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ cm->cm_desc.FastPathSCSIIO.DevHandle =
+ scsi_io_req->DevHandle;
+ scsi_io_req->IoFlags |=
+ MPI25_SCSIIO_IOFLAGS_FAST_PATH;
+ } else {
+ cm->cm_desc.SCSIIO.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ cm->cm_desc.SCSIIO.DevHandle =
+ scsi_io_req->DevHandle;
+ }
+
+ /*
+ * Make sure the DevHandle is not 0 because this is a
+ * likely error.
+ */
+ if (scsi_io_req->DevHandle == 0) {
+ err = EINVAL;
+ goto RetFreeUnlocked;
+ }
+ }
+ }
+
+ mpr_lock(sc);
+
+ err = mpr_wait_command(sc, cm, 30, CAN_SLEEP);
+
+ if (err) {
+ mpr_printf(sc, "%s: invalid request: error %d\n", __func__,
+ err);
+ mpr_unlock(sc);
+ goto RetFreeUnlocked;
+ }
+
+ /*
+ * Sync the DMA data, if any. Then copy the data to user space.
+ */
+ if (cm->cm_data != NULL) {
+ if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
+ dir = BUS_DMASYNC_POSTREAD;
+ else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
+ dir = BUS_DMASYNC_POSTWRITE;;
+ bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
+ bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
+
+ if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
+ mpr_unlock(sc);
+ err = copyout(cm->cm_data,
+ PTRIN(data->PtrData), data->DataSize);
+ mpr_lock(sc);
+ if (err != 0)
+ mpr_dprint(sc, MPR_FAULT, "%s: failed to copy "
+ "IOCTL data to user space\n", __func__);
+ }
+ }
+
+ /*
+ * Copy the reply data and sense data to user space.
+ */
+ if (cm->cm_reply != NULL) {
+ rpl = (MPI2_DEFAULT_REPLY *)cm->cm_reply;
+ sz = rpl->MsgLength * 4;
+
+ if (sz > data->ReplySize) {
+ mpr_printf(sc, "%s: user reply buffer (%d) smaller "
+ "than returned buffer (%d)\n", __func__,
+ data->ReplySize, sz);
+ }
+ mpr_unlock(sc);
+ copyout(cm->cm_reply, PTRIN(data->PtrReply), data->ReplySize);
+ mpr_lock(sc);
+
+ if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ if (((MPI2_SCSI_IO_REPLY *)rpl)->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sense_len =
+ MIN((le32toh(((MPI2_SCSI_IO_REPLY *)rpl)->
+ SenseCount)), sizeof(struct
+ scsi_sense_data));
+ mpr_unlock(sc);
+ copyout(cm->cm_sense, cm->cm_req + 64,
+ sense_len);
+ mpr_lock(sc);
+ }
+ }
+ }
+ mpr_unlock(sc);
+
+RetFreeUnlocked:
+ mpr_lock(sc);
+
+ if (cm != NULL) {
+ if (cm->cm_data)
+ free(cm->cm_data, M_MPRUSER);
+ mpr_free_command(sc, cm);
+ }
+Ret:
+ sc->mpr_flags &= ~MPR_FLAGS_BUSY;
+ mpr_unlock(sc);
+
+ return (err);
+}
+
+static void
+mpr_user_get_adapter_data(struct mpr_softc *sc, mpr_adapter_data_t *data)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2BiosPage3_t config_page;
+
+ /*
+ * Use the PCI interface functions to get the Bus, Device, and Function
+ * information.
+ */
+ data->PciInformation.u.bits.BusNumber = pci_get_bus(sc->mpr_dev);
+ data->PciInformation.u.bits.DeviceNumber = pci_get_slot(sc->mpr_dev);
+ data->PciInformation.u.bits.FunctionNumber =
+ pci_get_function(sc->mpr_dev);
+
+ /*
+ * Get the FW version that should already be saved in IOC Facts.
+ */
+ data->MpiFirmwareVersion = sc->facts->FWVersion.Word;
+
+ /*
+ * General device info.
+ */
+ data->AdapterType = MPRIOCTL_ADAPTER_TYPE_SAS3;
+ data->PCIDeviceHwId = pci_get_device(sc->mpr_dev);
+ data->PCIDeviceHwRev = pci_read_config(sc->mpr_dev, PCIR_REVID, 1);
+ data->SubSystemId = pci_get_subdevice(sc->mpr_dev);
+ data->SubsystemVendorId = pci_get_subvendor(sc->mpr_dev);
+
+ /*
+ * Get the driver version.
+ */
+ strcpy((char *)&data->DriverVersion[0], MPR_DRIVER_VERSION);
+
+ /*
+ * Need to get BIOS Config Page 3 for the BIOS Version.
+ */
+ data->BiosVersion = 0;
+ mpr_lock(sc);
+ if (mpr_config_get_bios_pg3(sc, &mpi_reply, &config_page))
+ printf("%s: Error while retrieving BIOS Version\n", __func__);
+ else
+ data->BiosVersion = config_page.BiosVersion;
+ mpr_unlock(sc);
+}
+
+static void
+mpr_user_read_pci_info(struct mpr_softc *sc, mpr_pci_info_t *data)
+{
+ int i;
+
+ /*
+ * Use the PCI interface functions to get the Bus, Device, and Function
+ * information.
+ */
+ data->BusNumber = pci_get_bus(sc->mpr_dev);
+ data->DeviceNumber = pci_get_slot(sc->mpr_dev);
+ data->FunctionNumber = pci_get_function(sc->mpr_dev);
+
+ /*
+ * Now get the interrupt vector and the pci header. The vector can
+ * only be 0 right now. The header is the first 256 bytes of config
+ * space.
+ */
+ data->InterruptVector = 0;
+ for (i = 0; i < sizeof (data->PciHeader); i++) {
+ data->PciHeader[i] = pci_read_config(sc->mpr_dev, i, 1);
+ }
+}
+
+static uint8_t
+mpr_get_fw_diag_buffer_number(struct mpr_softc *sc, uint32_t unique_id)
+{
+ uint8_t index;
+
+ for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
+ if (sc->fw_diag_buffer_list[index].unique_id == unique_id) {
+ return (index);
+ }
+ }
+
+ return (MPR_FW_DIAGNOSTIC_UID_NOT_FOUND);
+}
+
+static int
+mpr_post_fw_diag_buffer(struct mpr_softc *sc,
+ mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
+{
+ MPI2_DIAG_BUFFER_POST_REQUEST *req;
+ MPI2_DIAG_BUFFER_POST_REPLY *reply;
+ struct mpr_command *cm = NULL;
+ int i, status;
+
+ /*
+ * If buffer is not enabled, just leave.
+ */
+ *return_code = MPR_FW_DIAG_ERROR_POST_FAILED;
+ if (!pBuffer->enabled) {
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Clear some flags initially.
+ */
+ pBuffer->force_release = FALSE;
+ pBuffer->valid_data = FALSE;
+ pBuffer->owned_by_firmware = FALSE;
+
+ /*
+ * Get a command.
+ */
+ cm = mpr_alloc_command(sc);
+ if (cm == NULL) {
+ mpr_printf(sc, "%s: no mpr requests\n", __func__);
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Build the request for releasing the FW Diag Buffer and send it.
+ */
+ req = (MPI2_DIAG_BUFFER_POST_REQUEST *)cm->cm_req;
+ req->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ req->BufferType = pBuffer->buffer_type;
+ req->ExtendedType = pBuffer->extended_type;
+ req->BufferLength = pBuffer->size;
+ for (i = 0; i < (sizeof(req->ProductSpecific) / 4); i++)
+ req->ProductSpecific[i] = pBuffer->product_specific[i];
+ mpr_from_u64(sc->fw_diag_busaddr, &req->BufferAddress);
+ cm->cm_data = NULL;
+ cm->cm_length = 0;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete_data = NULL;
+
+ /*
+ * Send command synchronously.
+ */
+ status = mpr_wait_command(sc, cm, 30, CAN_SLEEP);
+ if (status) {
+ mpr_printf(sc, "%s: invalid request: error %d\n", __func__,
+ status);
+ status = MPR_DIAG_FAILURE;
+ goto done;
+ }
+
+ /*
+ * Process POST reply.
+ */
+ reply = (MPI2_DIAG_BUFFER_POST_REPLY *)cm->cm_reply;
+ if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
+ status = MPR_DIAG_FAILURE;
+ mpr_dprint(sc, MPR_FAULT, "%s: post of FW Diag Buffer failed "
+ "with IOCStatus = 0x%x, IOCLogInfo = 0x%x and "
+ "TransferLength = 0x%x\n", __func__, reply->IOCStatus,
+ reply->IOCLogInfo, reply->TransferLength);
+ goto done;
+ }
+
+ /*
+ * Post was successful.
+ */
+ pBuffer->valid_data = TRUE;
+ pBuffer->owned_by_firmware = TRUE;
+ *return_code = MPR_FW_DIAG_ERROR_SUCCESS;
+ status = MPR_DIAG_SUCCESS;
+
+done:
+ mpr_free_command(sc, cm);
+ return (status);
+}
+
+static int
+mpr_release_fw_diag_buffer(struct mpr_softc *sc,
+ mpr_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
+ uint32_t diag_type)
+{
+ MPI2_DIAG_RELEASE_REQUEST *req;
+ MPI2_DIAG_RELEASE_REPLY *reply;
+ struct mpr_command *cm = NULL;
+ int status;
+
+ /*
+ * If buffer is not enabled, just leave.
+ */
+ *return_code = MPR_FW_DIAG_ERROR_RELEASE_FAILED;
+ if (!pBuffer->enabled) {
+ mpr_dprint(sc, MPR_USER, "%s: This buffer type is not "
+ "supported by the IOC", __func__);
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Clear some flags initially.
+ */
+ pBuffer->force_release = FALSE;
+ pBuffer->valid_data = FALSE;
+ pBuffer->owned_by_firmware = FALSE;
+
+ /*
+ * Get a command.
+ */
+ cm = mpr_alloc_command(sc);
+ if (cm == NULL) {
+ mpr_printf(sc, "%s: no mpr requests\n", __func__);
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Build the request for releasing the FW Diag Buffer and send it.
+ */
+ req = (MPI2_DIAG_RELEASE_REQUEST *)cm->cm_req;
+ req->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ req->BufferType = pBuffer->buffer_type;
+ cm->cm_data = NULL;
+ cm->cm_length = 0;
+ cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ cm->cm_complete_data = NULL;
+
+ /*
+ * Send command synchronously.
+ */
+ status = mpr_wait_command(sc, cm, 30, CAN_SLEEP);
+ if (status) {
+ mpr_printf(sc, "%s: invalid request: error %d\n", __func__,
+ status);
+ status = MPR_DIAG_FAILURE;
+ goto done;
+ }
+
+ /*
+ * Process RELEASE reply.
+ */
+ reply = (MPI2_DIAG_RELEASE_REPLY *)cm->cm_reply;
+ if ((reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) ||
+ pBuffer->owned_by_firmware) {
+ status = MPR_DIAG_FAILURE;
+ mpr_dprint(sc, MPR_FAULT, "%s: release of FW Diag Buffer "
+ "failed with IOCStatus = 0x%x and IOCLogInfo = 0x%x\n",
+ __func__, reply->IOCStatus, reply->IOCLogInfo);
+ goto done;
+ }
+
+ /*
+ * Release was successful.
+ */
+ *return_code = MPR_FW_DIAG_ERROR_SUCCESS;
+ status = MPR_DIAG_SUCCESS;
+
+ /*
+ * If this was for an UNREGISTER diag type command, clear the unique ID.
+ */
+ if (diag_type == MPR_FW_DIAG_TYPE_UNREGISTER) {
+ pBuffer->unique_id = MPR_FW_DIAG_INVALID_UID;
+ }
+
+done:
+ return (status);
+}
+
+static int
+mpr_diag_register(struct mpr_softc *sc,
+ mpr_fw_diag_register_t *diag_register, uint32_t *return_code)
+{
+ mpr_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t extended_type, buffer_type, i;
+ uint32_t buffer_size;
+ uint32_t unique_id;
+ int status;
+
+ extended_type = diag_register->ExtendedType;
+ buffer_type = diag_register->BufferType;
+ buffer_size = diag_register->RequestedBufferSize;
+ unique_id = diag_register->UniqueId;
+
+ /*
+ * Check for valid buffer type
+ */
+ if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should not be found. If it is, the ID is already in use.
+ */
+ i = mpr_get_fw_diag_buffer_number(sc, unique_id);
+ pBuffer = &sc->fw_diag_buffer_list[buffer_type];
+ if (i != MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * The buffer's unique ID should not be registered yet, and the given
+ * unique ID cannot be 0.
+ */
+ if ((pBuffer->unique_id != MPR_FW_DIAG_INVALID_UID) ||
+ (unique_id == MPR_FW_DIAG_INVALID_UID)) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * If this buffer is already posted as immediate, just change owner.
+ */
+ if (pBuffer->immediate && pBuffer->owned_by_firmware &&
+ (pBuffer->unique_id == MPR_FW_DIAG_INVALID_UID)) {
+ pBuffer->immediate = FALSE;
+ pBuffer->unique_id = unique_id;
+ return (MPR_DIAG_SUCCESS);
+ }
+
+ /*
+ * Post a new buffer after checking if it's enabled. The DMA buffer
+ * that is allocated will be contiguous (nsegments = 1).
+ */
+ if (!pBuffer->enabled) {
+ *return_code = MPR_FW_DIAG_ERROR_NO_BUFFER;
+ return (MPR_DIAG_FAILURE);
+ }
+ if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
+ 1, 0, /* algnmnt, boundary */
+ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ buffer_size, /* maxsize */
+ 1, /* nsegments */
+ buffer_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->fw_diag_dmat)) {
+ device_printf(sc->mpr_dev, "Cannot allocate FW diag buffer DMA "
+ "tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->fw_diag_dmat, (void **)&sc->fw_diag_buffer,
+ BUS_DMA_NOWAIT, &sc->fw_diag_map)) {
+ device_printf(sc->mpr_dev, "Cannot allocate FW diag buffer "
+ "memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->fw_diag_buffer, buffer_size);
+ bus_dmamap_load(sc->fw_diag_dmat, sc->fw_diag_map, sc->fw_diag_buffer,
+ buffer_size, mpr_memaddr_cb, &sc->fw_diag_busaddr, 0);
+ pBuffer->size = buffer_size;
+
+ /*
+ * Copy the given info to the diag buffer and post the buffer.
+ */
+ pBuffer->buffer_type = buffer_type;
+ pBuffer->immediate = FALSE;
+ if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
+ for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
+ i++) {
+ pBuffer->product_specific[i] =
+ diag_register->ProductSpecific[i];
+ }
+ }
+ pBuffer->extended_type = extended_type;
+ pBuffer->unique_id = unique_id;
+ status = mpr_post_fw_diag_buffer(sc, pBuffer, return_code);
+
+ /*
+ * In case there was a failure, free the DMA buffer.
+ */
+ if (status == MPR_DIAG_FAILURE) {
+ if (sc->fw_diag_busaddr != 0)
+ bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
+ if (sc->fw_diag_buffer != NULL)
+ bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
+ sc->fw_diag_map);
+ if (sc->fw_diag_dmat != NULL)
+ bus_dma_tag_destroy(sc->fw_diag_dmat);
+ }
+
+ return (status);
+}
+
+static int
+mpr_diag_unregister(struct mpr_softc *sc,
+ mpr_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
+{
+ mpr_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i;
+ uint32_t unique_id;
+ int status;
+
+ unique_id = diag_unregister->UniqueId;
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should be there.
+ */
+ i = mpr_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ pBuffer = &sc->fw_diag_buffer_list[i];
+
+ /*
+ * Try to release the buffer from FW before freeing it. If release
+ * fails, don't free the DMA buffer in case FW tries to access it
+ * later. If buffer is not owned by firmware, can't release it.
+ */
+ if (!pBuffer->owned_by_firmware) {
+ status = MPR_DIAG_SUCCESS;
+ } else {
+ status = mpr_release_fw_diag_buffer(sc, pBuffer, return_code,
+ MPR_FW_DIAG_TYPE_UNREGISTER);
+ }
+
+ /*
+ * At this point, return the current status no matter what happens with
+ * the DMA buffer.
+ */
+ pBuffer->unique_id = MPR_FW_DIAG_INVALID_UID;
+ if (status == MPR_DIAG_SUCCESS) {
+ if (sc->fw_diag_busaddr != 0)
+ bus_dmamap_unload(sc->fw_diag_dmat, sc->fw_diag_map);
+ if (sc->fw_diag_buffer != NULL)
+ bus_dmamem_free(sc->fw_diag_dmat, sc->fw_diag_buffer,
+ sc->fw_diag_map);
+ if (sc->fw_diag_dmat != NULL)
+ bus_dma_tag_destroy(sc->fw_diag_dmat);
+ }
+
+ return (status);
+}
+
+static int
+mpr_diag_query(struct mpr_softc *sc, mpr_fw_diag_query_t *diag_query,
+ uint32_t *return_code)
+{
+ mpr_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i;
+ uint32_t unique_id;
+
+ unique_id = diag_query->UniqueId;
+
+ /*
+ * If ID is valid, query on ID.
+ * If ID is invalid, query on buffer type.
+ */
+ if (unique_id == MPR_FW_DIAG_INVALID_UID) {
+ i = diag_query->BufferType;
+ if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+ } else {
+ i = mpr_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+ }
+
+ /*
+ * Fill query structure with the diag buffer info.
+ */
+ pBuffer = &sc->fw_diag_buffer_list[i];
+ diag_query->BufferType = pBuffer->buffer_type;
+ diag_query->ExtendedType = pBuffer->extended_type;
+ if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
+ for (i = 0; i < (sizeof(diag_query->ProductSpecific) / 4);
+ i++) {
+ diag_query->ProductSpecific[i] =
+ pBuffer->product_specific[i];
+ }
+ }
+ diag_query->TotalBufferSize = pBuffer->size;
+ diag_query->DriverAddedBufferSize = 0;
+ diag_query->UniqueId = pBuffer->unique_id;
+ diag_query->ApplicationFlags = 0;
+ diag_query->DiagnosticFlags = 0;
+
+ /*
+ * Set/Clear application flags
+ */
+ if (pBuffer->immediate) {
+ diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_APP_OWNED;
+ } else {
+ diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_APP_OWNED;
+ }
+ if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
+ diag_query->ApplicationFlags |= MPR_FW_DIAG_FLAG_BUFFER_VALID;
+ } else {
+ diag_query->ApplicationFlags &= ~MPR_FW_DIAG_FLAG_BUFFER_VALID;
+ }
+ if (pBuffer->owned_by_firmware) {
+ diag_query->ApplicationFlags |=
+ MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
+ } else {
+ diag_query->ApplicationFlags &=
+ ~MPR_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
+ }
+
+ return (MPR_DIAG_SUCCESS);
+}
+
+static int
+mpr_diag_read_buffer(struct mpr_softc *sc,
+ mpr_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
+ uint32_t *return_code)
+{
+ mpr_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i, *pData;
+ uint32_t unique_id;
+ int status;
+
+ unique_id = diag_read_buffer->UniqueId;
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should be there.
+ */
+ i = mpr_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ pBuffer = &sc->fw_diag_buffer_list[i];
+
+ /*
+ * Make sure requested read is within limits
+ */
+ if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
+ pBuffer->size) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Copy the requested data from DMA to the diag_read_buffer. The DMA
+ * buffer that was allocated is one contiguous buffer.
+ */
+ pData = (uint8_t *)(sc->fw_diag_buffer +
+ diag_read_buffer->StartingOffset);
+ if (copyout(pData, ioctl_buf, diag_read_buffer->BytesToRead) != 0)
+ return (MPR_DIAG_FAILURE);
+ diag_read_buffer->Status = 0;
+
+ /*
+ * Set or clear the Force Release flag.
+ */
+ if (pBuffer->force_release) {
+ diag_read_buffer->Flags |= MPR_FW_DIAG_FLAG_FORCE_RELEASE;
+ } else {
+ diag_read_buffer->Flags &= ~MPR_FW_DIAG_FLAG_FORCE_RELEASE;
+ }
+
+ /*
+ * If buffer is to be reregistered, make sure it's not already owned by
+ * firmware first.
+ */
+ status = MPR_DIAG_SUCCESS;
+ if (!pBuffer->owned_by_firmware) {
+ if (diag_read_buffer->Flags & MPR_FW_DIAG_FLAG_REREGISTER) {
+ status = mpr_post_fw_diag_buffer(sc, pBuffer,
+ return_code);
+ }
+ }
+
+ return (status);
+}
+
+static int
+mpr_diag_release(struct mpr_softc *sc, mpr_fw_diag_release_t *diag_release,
+ uint32_t *return_code)
+{
+ mpr_fw_diagnostic_buffer_t *pBuffer;
+ uint8_t i;
+ uint32_t unique_id;
+ int status;
+
+ unique_id = diag_release->UniqueId;
+
+ /*
+ * Get the current buffer and look up the unique ID. The unique ID
+ * should be there.
+ */
+ i = mpr_get_fw_diag_buffer_number(sc, unique_id);
+ if (i == MPR_FW_DIAGNOSTIC_UID_NOT_FOUND) {
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_UID;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ pBuffer = &sc->fw_diag_buffer_list[i];
+
+ /*
+ * If buffer is not owned by firmware, it's already been released.
+ */
+ if (!pBuffer->owned_by_firmware) {
+ *return_code = MPR_FW_DIAG_ERROR_ALREADY_RELEASED;
+ return (MPR_DIAG_FAILURE);
+ }
+
+ /*
+ * Release the buffer.
+ */
+ status = mpr_release_fw_diag_buffer(sc, pBuffer, return_code,
+ MPR_FW_DIAG_TYPE_RELEASE);
+ return (status);
+}
+
+static int
+mpr_do_diag_action(struct mpr_softc *sc, uint32_t action,
+ uint8_t *diag_action, uint32_t length, uint32_t *return_code)
+{
+ mpr_fw_diag_register_t diag_register;
+ mpr_fw_diag_unregister_t diag_unregister;
+ mpr_fw_diag_query_t diag_query;
+ mpr_diag_read_buffer_t diag_read_buffer;
+ mpr_fw_diag_release_t diag_release;
+ int status = MPR_DIAG_SUCCESS;
+ uint32_t original_return_code;
+
+ original_return_code = *return_code;
+ *return_code = MPR_FW_DIAG_ERROR_SUCCESS;
+
+ switch (action) {
+ case MPR_FW_DIAG_TYPE_REGISTER:
+ if (!length) {
+ *return_code =
+ MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPR_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_register,
+ sizeof(diag_register)) != 0)
+ return (MPR_DIAG_FAILURE);
+ status = mpr_diag_register(sc, &diag_register,
+ return_code);
+ break;
+
+ case MPR_FW_DIAG_TYPE_UNREGISTER:
+ if (length < sizeof(diag_unregister)) {
+ *return_code =
+ MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPR_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_unregister,
+ sizeof(diag_unregister)) != 0)
+ return (MPR_DIAG_FAILURE);
+ status = mpr_diag_unregister(sc, &diag_unregister,
+ return_code);
+ break;
+
+ case MPR_FW_DIAG_TYPE_QUERY:
+ if (length < sizeof (diag_query)) {
+ *return_code =
+ MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPR_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_query, sizeof(diag_query))
+ != 0)
+ return (MPR_DIAG_FAILURE);
+ status = mpr_diag_query(sc, &diag_query, return_code);
+ if (status == MPR_DIAG_SUCCESS)
+ if (copyout(&diag_query, diag_action,
+ sizeof (diag_query)) != 0)
+ return (MPR_DIAG_FAILURE);
+ break;
+
+ case MPR_FW_DIAG_TYPE_READ_BUFFER:
+ if (copyin(diag_action, &diag_read_buffer,
+ sizeof(diag_read_buffer)) != 0)
+ return (MPR_DIAG_FAILURE);
+ if (length < diag_read_buffer.BytesToRead) {
+ *return_code =
+ MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPR_DIAG_FAILURE;
+ break;
+ }
+ status = mpr_diag_read_buffer(sc, &diag_read_buffer,
+ PTRIN(diag_read_buffer.PtrDataBuffer),
+ return_code);
+ if (status == MPR_DIAG_SUCCESS) {
+ if (copyout(&diag_read_buffer, diag_action,
+ sizeof(diag_read_buffer) -
+ sizeof(diag_read_buffer.PtrDataBuffer)) !=
+ 0)
+ return (MPR_DIAG_FAILURE);
+ }
+ break;
+
+ case MPR_FW_DIAG_TYPE_RELEASE:
+ if (length < sizeof(diag_release)) {
+ *return_code =
+ MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPR_DIAG_FAILURE;
+ break;
+ }
+ if (copyin(diag_action, &diag_release,
+ sizeof(diag_release)) != 0)
+ return (MPR_DIAG_FAILURE);
+ status = mpr_diag_release(sc, &diag_release,
+ return_code);
+ break;
+
+ default:
+ *return_code = MPR_FW_DIAG_ERROR_INVALID_PARAMETER;
+ status = MPR_DIAG_FAILURE;
+ break;
+ }
+
+ if ((status == MPR_DIAG_FAILURE) &&
+ (original_return_code == MPR_FW_DIAG_NEW) &&
+ (*return_code != MPR_FW_DIAG_ERROR_SUCCESS))
+ status = MPR_DIAG_SUCCESS;
+
+ return (status);
+}
+
+static int
+mpr_user_diag_action(struct mpr_softc *sc, mpr_diag_action_t *data)
+{
+ int status;
+
+ /*
+ * Only allow one diag action at one time.
+ */
+ if (sc->mpr_flags & MPR_FLAGS_BUSY) {
+ mpr_dprint(sc, MPR_USER, "%s: Only one FW diag command "
+ "allowed at a single time.", __func__);
+ return (EBUSY);
+ }
+ sc->mpr_flags |= MPR_FLAGS_BUSY;
+
+ /*
+ * Send diag action request
+ */
+ if (data->Action == MPR_FW_DIAG_TYPE_REGISTER ||
+ data->Action == MPR_FW_DIAG_TYPE_UNREGISTER ||
+ data->Action == MPR_FW_DIAG_TYPE_QUERY ||
+ data->Action == MPR_FW_DIAG_TYPE_READ_BUFFER ||
+ data->Action == MPR_FW_DIAG_TYPE_RELEASE) {
+ status = mpr_do_diag_action(sc, data->Action,
+ PTRIN(data->PtrDiagAction), data->Length,
+ &data->ReturnCode);
+ } else
+ status = EINVAL;
+
+ sc->mpr_flags &= ~MPR_FLAGS_BUSY;
+ return (status);
+}
+
+/*
+ * Copy the event recording mask and the event queue size out. For
+ * clarification, the event recording mask (events_to_record) is not the same
+ * thing as the event mask (event_mask). events_to_record has a bit set for
+ * every event type that is to be recorded by the driver, and event_mask has a
+ * bit cleared for every event that is allowed into the driver from the IOC.
+ * They really have nothing to do with each other.
+ */
+static void
+mpr_user_event_query(struct mpr_softc *sc, mpr_event_query_t *data)
+{
+ uint8_t i;
+
+ mpr_lock(sc);
+ data->Entries = MPR_EVENT_QUEUE_SIZE;
+
+ for (i = 0; i < 4; i++) {
+ data->Types[i] = sc->events_to_record[i];
+ }
+ mpr_unlock(sc);
+}
+
+/*
+ * Set the driver's event mask according to what's been given. See
+ * mpr_user_event_query for explanation of the event recording mask and the IOC
+ * event mask. It's the app's responsibility to enable event logging by setting
+ * the bits in events_to_record. Initially, no events will be logged.
+ */
+static void
+mpr_user_event_enable(struct mpr_softc *sc, mpr_event_enable_t *data)
+{
+ uint8_t i;
+
+ mpr_lock(sc);
+ for (i = 0; i < 4; i++) {
+ sc->events_to_record[i] = data->Types[i];
+ }
+ mpr_unlock(sc);
+}
+
+/*
+ * Copy out the events that have been recorded, up to the max events allowed.
+ */
+static int
+mpr_user_event_report(struct mpr_softc *sc, mpr_event_report_t *data)
+{
+ int status = 0;
+ uint32_t size;
+
+ mpr_lock(sc);
+ size = data->Size;
+ if ((size >= sizeof(sc->recorded_events)) && (status == 0)) {
+ mpr_unlock(sc);
+ if (copyout((void *)sc->recorded_events,
+ PTRIN(data->PtrEvents), size) != 0)
+ status = EFAULT;
+ mpr_lock(sc);
+ } else {
+ /*
+ * data->Size value is not large enough to copy event data.
+ */
+ status = EFAULT;
+ }
+
+ /*
+ * Change size value to match the number of bytes that were copied.
+ */
+ if (status == 0)
+ data->Size = sizeof(sc->recorded_events);
+ mpr_unlock(sc);
+
+ return (status);
+}
+
+/*
+ * Record events into the driver from the IOC if they are not masked.
+ */
+void
+mprsas_record_event(struct mpr_softc *sc,
+ MPI2_EVENT_NOTIFICATION_REPLY *event_reply)
+{
+ uint32_t event;
+ int i, j;
+ uint16_t event_data_len;
+ boolean_t sendAEN = FALSE;
+
+ event = event_reply->Event;
+
+ /*
+ * Generate a system event to let anyone who cares know that a
+ * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
+ * event mask is set to.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
+ sendAEN = TRUE;
+ }
+
+ /*
+ * Record the event only if its corresponding bit is set in
+ * events_to_record. event_index is the index into recorded_events and
+ * event_number is the overall number of an event being recorded since
+ * start-of-day. event_index will roll over; event_number will never
+ * roll over.
+ */
+ i = (uint8_t)(event / 32);
+ j = (uint8_t)(event % 32);
+ if ((i < 4) && ((1 << j) & sc->events_to_record[i])) {
+ i = sc->event_index;
+ sc->recorded_events[i].Type = event;
+ sc->recorded_events[i].Number = ++sc->event_number;
+ bzero(sc->recorded_events[i].Data, MPR_MAX_EVENT_DATA_LENGTH *
+ 4);
+ event_data_len = event_reply->EventDataLength;
+
+ if (event_data_len > 0) {
+ /*
+ * Limit data to size in m_event entry
+ */
+ if (event_data_len > MPR_MAX_EVENT_DATA_LENGTH) {
+ event_data_len = MPR_MAX_EVENT_DATA_LENGTH;
+ }
+ for (j = 0; j < event_data_len; j++) {
+ sc->recorded_events[i].Data[j] =
+ event_reply->EventData[j];
+ }
+
+ /*
+ * check for index wrap-around
+ */
+ if (++i == MPR_EVENT_QUEUE_SIZE) {
+ i = 0;
+ }
+ sc->event_index = (uint8_t)i;
+
+ /*
+ * Set flag to send the event.
+ */
+ sendAEN = TRUE;
+ }
+ }
+
+ /*
+ * Generate a system event if flag is set to let anyone who cares know
+ * that an event has occurred.
+ */
+ if (sendAEN) {
+//SLM-how to send a system event (see kqueue, kevent)
+// (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
+// "SAS", NULL, NULL, DDI_NOSLEEP);
+ }
+}
+
+static int
+mpr_user_reg_access(struct mpr_softc *sc, mpr_reg_access_t *data)
+{
+ int status = 0;
+
+ switch (data->Command) {
+ /*
+ * IO access is not supported.
+ */
+ case REG_IO_READ:
+ case REG_IO_WRITE:
+ mpr_dprint(sc, MPR_USER, "IO access is not supported. "
+ "Use memory access.");
+ status = EINVAL;
+ break;
+
+ case REG_MEM_READ:
+ data->RegData = mpr_regread(sc, data->RegOffset);
+ break;
+
+ case REG_MEM_WRITE:
+ mpr_regwrite(sc, data->RegOffset, data->RegData);
+ break;
+
+ default:
+ status = EINVAL;
+ break;
+ }
+
+ return (status);
+}
+
+static int
+mpr_user_btdh(struct mpr_softc *sc, mpr_btdh_mapping_t *data)
+{
+ uint8_t bt2dh = FALSE;
+ uint8_t dh2bt = FALSE;
+ uint16_t dev_handle, bus, target;
+
+ bus = data->Bus;
+ target = data->TargetID;
+ dev_handle = data->DevHandle;
+
+ /*
+ * When DevHandle is 0xFFFF and Bus/Target are not 0xFFFF, use Bus/
+ * Target to get DevHandle. When Bus/Target are 0xFFFF and DevHandle is
+ * not 0xFFFF, use DevHandle to get Bus/Target. Anything else is
+ * invalid.
+ */
+ if ((bus == 0xFFFF) && (target == 0xFFFF) && (dev_handle != 0xFFFF))
+ dh2bt = TRUE;
+ if ((dev_handle == 0xFFFF) && (bus != 0xFFFF) && (target != 0xFFFF))
+ bt2dh = TRUE;
+ if (!dh2bt && !bt2dh)
+ return (EINVAL);
+
+ /*
+ * Only handle bus of 0. Make sure target is within range.
+ */
+ if (bt2dh) {
+ if (bus != 0)
+ return (EINVAL);
+
+ if (target > sc->max_devices) {
+ mpr_dprint(sc, MPR_FAULT, "Target ID is out of range "
+ "for Bus/Target to DevHandle mapping.");
+ return (EINVAL);
+ }
+ dev_handle = sc->mapping_table[target].dev_handle;
+ if (dev_handle)
+ data->DevHandle = dev_handle;
+ } else {
+ bus = 0;
+ target = mpr_mapping_get_sas_id_from_handle(sc, dev_handle);
+ data->Bus = bus;
+ data->TargetID = target;
+ }
+
+ return (0);
+}
+
+static int
+mpr_ioctl(struct cdev *dev, u_long cmd, void *arg, int flag,
+ struct thread *td)
+{
+ struct mpr_softc *sc;
+ struct mpr_cfg_page_req *page_req;
+ struct mpr_ext_cfg_page_req *ext_page_req;
+ void *mpr_page;
+ int error, msleep_ret;
+
+ mpr_page = NULL;
+ sc = dev->si_drv1;
+ page_req = (void *)arg;
+ ext_page_req = (void *)arg;
+
+ switch (cmd) {
+ case MPRIO_READ_CFG_HEADER:
+ mpr_lock(sc);
+ error = mpr_user_read_cfg_header(sc, page_req);
+ mpr_unlock(sc);
+ break;
+ case MPRIO_READ_CFG_PAGE:
+ mpr_page = malloc(page_req->len, M_MPRUSER, M_WAITOK | M_ZERO);
+ if (!mpr_page) {
+ mpr_printf(sc, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ error = copyin(page_req->buf, mpr_page,
+ sizeof(MPI2_CONFIG_PAGE_HEADER));
+ if (error)
+ break;
+ mpr_lock(sc);
+ error = mpr_user_read_cfg_page(sc, page_req, mpr_page);
+ mpr_unlock(sc);
+ if (error)
+ break;
+ error = copyout(mpr_page, page_req->buf, page_req->len);
+ break;
+ case MPRIO_READ_EXT_CFG_HEADER:
+ mpr_lock(sc);
+ error = mpr_user_read_extcfg_header(sc, ext_page_req);
+ mpr_unlock(sc);
+ break;
+ case MPRIO_READ_EXT_CFG_PAGE:
+ mpr_page = malloc(ext_page_req->len, M_MPRUSER,
+ M_WAITOK | M_ZERO);
+ if (!mpr_page) {
+ mpr_printf(sc, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ error = copyin(ext_page_req->buf, mpr_page,
+ sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER));
+ if (error)
+ break;
+ mpr_lock(sc);
+ error = mpr_user_read_extcfg_page(sc, ext_page_req, mpr_page);
+ mpr_unlock(sc);
+ if (error)
+ break;
+ error = copyout(mpr_page, ext_page_req->buf, ext_page_req->len);
+ break;
+ case MPRIO_WRITE_CFG_PAGE:
+ mpr_page = malloc(page_req->len, M_MPRUSER, M_WAITOK|M_ZERO);
+ if (!mpr_page) {
+ mpr_printf(sc, "Cannot allocate memory %s %d\n",
+ __func__, __LINE__);
+ return (ENOMEM);
+ }
+ error = copyin(page_req->buf, mpr_page, page_req->len);
+ if (error)
+ break;
+ mpr_lock(sc);
+ error = mpr_user_write_cfg_page(sc, page_req, mpr_page);
+ mpr_unlock(sc);
+ break;
+ case MPRIO_MPR_COMMAND:
+ error = mpr_user_command(sc, (struct mpr_usr_command *)arg);
+ break;
+ case MPTIOCTL_PASS_THRU:
+ /*
+ * The user has requested to pass through a command to be
+ * executed by the MPT firmware. Call our routine which does
+ * this. Only allow one passthru IOCTL at one time.
+ */
+ error = mpr_user_pass_thru(sc, (mpr_pass_thru_t *)arg);
+ break;
+ case MPTIOCTL_GET_ADAPTER_DATA:
+ /*
+ * The user has requested to read adapter data. Call our
+ * routine which does this.
+ */
+ error = 0;
+ mpr_user_get_adapter_data(sc, (mpr_adapter_data_t *)arg);
+ break;
+ case MPTIOCTL_GET_PCI_INFO:
+ /*
+ * The user has requested to read pci info. Call
+ * our routine which does this.
+ */
+ mpr_lock(sc);
+ error = 0;
+ mpr_user_read_pci_info(sc, (mpr_pci_info_t *)arg);
+ mpr_unlock(sc);
+ break;
+ case MPTIOCTL_RESET_ADAPTER:
+ mpr_lock(sc);
+ sc->port_enable_complete = 0;
+ uint32_t reinit_start = time_uptime;
+ error = mpr_reinit(sc);
+ /* Sleep for 300 second. */
+ msleep_ret = msleep(&sc->port_enable_complete, &sc->mpr_mtx,
+ PRIBIO, "mpr_porten", 300 * hz);
+ mpr_unlock(sc);
+ if (msleep_ret)
+ printf("Port Enable did not complete after Diag "
+ "Reset msleep error %d.\n", msleep_ret);
+ else
+ mpr_dprint(sc, MPR_USER, "Hard Reset with Port Enable "
+ "completed in %d seconds.\n",
+ (uint32_t)(time_uptime - reinit_start));
+ break;
+ case MPTIOCTL_DIAG_ACTION:
+ /*
+ * The user has done a diag buffer action. Call our routine
+ * which does this. Only allow one diag action at one time.
+ */
+ mpr_lock(sc);
+ error = mpr_user_diag_action(sc, (mpr_diag_action_t *)arg);
+ mpr_unlock(sc);
+ break;
+ case MPTIOCTL_EVENT_QUERY:
+ /*
+ * The user has done an event query. Call our routine which does
+ * this.
+ */
+ error = 0;
+ mpr_user_event_query(sc, (mpr_event_query_t *)arg);
+ break;
+ case MPTIOCTL_EVENT_ENABLE:
+ /*
+ * The user has done an event enable. Call our routine which
+ * does this.
+ */
+ error = 0;
+ mpr_user_event_enable(sc, (mpr_event_enable_t *)arg);
+ break;
+ case MPTIOCTL_EVENT_REPORT:
+ /*
+ * The user has done an event report. Call our routine which
+ * does this.
+ */
+ error = mpr_user_event_report(sc, (mpr_event_report_t *)arg);
+ break;
+ case MPTIOCTL_REG_ACCESS:
+ /*
+ * The user has requested register access. Call our routine
+ * which does this.
+ */
+ mpr_lock(sc);
+ error = mpr_user_reg_access(sc, (mpr_reg_access_t *)arg);
+ mpr_unlock(sc);
+ break;
+ case MPTIOCTL_BTDH_MAPPING:
+ /*
+ * The user has requested to translate a bus/target to a
+ * DevHandle or a DevHandle to a bus/target. Call our routine
+ * which does this.
+ */
+ error = mpr_user_btdh(sc, (mpr_btdh_mapping_t *)arg);
+ break;
+ default:
+ error = ENOIOCTL;
+ break;
+ }
+
+ if (mpr_page != NULL)
+ free(mpr_page, M_MPRUSER);
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+
+struct mpr_cfg_page_req32 {
+ MPI2_CONFIG_PAGE_HEADER header;
+ uint32_t page_address;
+ uint32_t buf;
+ int len;
+ uint16_t ioc_status;
+};
+
+struct mpr_ext_cfg_page_req32 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER header;
+ uint32_t page_address;
+ uint32_t buf;
+ int len;
+ uint16_t ioc_status;
+};
+
+struct mpr_raid_action32 {
+ uint8_t action;
+ uint8_t volume_bus;
+ uint8_t volume_id;
+ uint8_t phys_disk_num;
+ uint32_t action_data_word;
+ uint32_t buf;
+ int len;
+ uint32_t volume_status;
+ uint32_t action_data[4];
+ uint16_t action_status;
+ uint16_t ioc_status;
+ uint8_t write;
+};
+
+struct mpr_usr_command32 {
+ uint32_t req;
+ uint32_t req_len;
+ uint32_t rpl;
+ uint32_t rpl_len;
+ uint32_t buf;
+ int len;
+ uint32_t flags;
+};
+
+#define MPRIO_READ_CFG_HEADER32 _IOWR('M', 200, struct mpr_cfg_page_req32)
+#define MPRIO_READ_CFG_PAGE32 _IOWR('M', 201, struct mpr_cfg_page_req32)
+#define MPRIO_READ_EXT_CFG_HEADER32 _IOWR('M', 202, struct mpr_ext_cfg_page_req32)
+#define MPRIO_READ_EXT_CFG_PAGE32 _IOWR('M', 203, struct mpr_ext_cfg_page_req32)
+#define MPRIO_WRITE_CFG_PAGE32 _IOWR('M', 204, struct mpr_cfg_page_req32)
+#define MPRIO_RAID_ACTION32 _IOWR('M', 205, struct mpr_raid_action32)
+#define MPRIO_MPR_COMMAND32 _IOWR('M', 210, struct mpr_usr_command32)
+
+static int
+mpr_ioctl32(struct cdev *dev, u_long cmd32, void *_arg, int flag,
+ struct thread *td)
+{
+ struct mpr_cfg_page_req32 *page32 = _arg;
+ struct mpr_ext_cfg_page_req32 *ext32 = _arg;
+ struct mpr_raid_action32 *raid32 = _arg;
+ struct mpr_usr_command32 *user32 = _arg;
+ union {
+ struct mpr_cfg_page_req page;
+ struct mpr_ext_cfg_page_req ext;
+ struct mpr_raid_action raid;
+ struct mpr_usr_command user;
+ } arg;
+ u_long cmd;
+ int error;
+
+ switch (cmd32) {
+ case MPRIO_READ_CFG_HEADER32:
+ case MPRIO_READ_CFG_PAGE32:
+ case MPRIO_WRITE_CFG_PAGE32:
+ if (cmd32 == MPRIO_READ_CFG_HEADER32)
+ cmd = MPRIO_READ_CFG_HEADER;
+ else if (cmd32 == MPRIO_READ_CFG_PAGE32)
+ cmd = MPRIO_READ_CFG_PAGE;
+ else
+ cmd = MPRIO_WRITE_CFG_PAGE;
+ CP(*page32, arg.page, header);
+ CP(*page32, arg.page, page_address);
+ PTRIN_CP(*page32, arg.page, buf);
+ CP(*page32, arg.page, len);
+ CP(*page32, arg.page, ioc_status);
+ break;
+
+ case MPRIO_READ_EXT_CFG_HEADER32:
+ case MPRIO_READ_EXT_CFG_PAGE32:
+ if (cmd32 == MPRIO_READ_EXT_CFG_HEADER32)
+ cmd = MPRIO_READ_EXT_CFG_HEADER;
+ else
+ cmd = MPRIO_READ_EXT_CFG_PAGE;
+ CP(*ext32, arg.ext, header);
+ CP(*ext32, arg.ext, page_address);
+ PTRIN_CP(*ext32, arg.ext, buf);
+ CP(*ext32, arg.ext, len);
+ CP(*ext32, arg.ext, ioc_status);
+ break;
+
+ case MPRIO_RAID_ACTION32:
+ cmd = MPRIO_RAID_ACTION;
+ CP(*raid32, arg.raid, action);
+ CP(*raid32, arg.raid, volume_bus);
+ CP(*raid32, arg.raid, volume_id);
+ CP(*raid32, arg.raid, phys_disk_num);
+ CP(*raid32, arg.raid, action_data_word);
+ PTRIN_CP(*raid32, arg.raid, buf);
+ CP(*raid32, arg.raid, len);
+ CP(*raid32, arg.raid, volume_status);
+ bcopy(raid32->action_data, arg.raid.action_data,
+ sizeof arg.raid.action_data);
+ CP(*raid32, arg.raid, ioc_status);
+ CP(*raid32, arg.raid, write);
+ break;
+
+ case MPRIO_MPR_COMMAND32:
+ cmd = MPRIO_MPR_COMMAND;
+ PTRIN_CP(*user32, arg.user, req);
+ CP(*user32, arg.user, req_len);
+ PTRIN_CP(*user32, arg.user, rpl);
+ CP(*user32, arg.user, rpl_len);
+ PTRIN_CP(*user32, arg.user, buf);
+ CP(*user32, arg.user, len);
+ CP(*user32, arg.user, flags);
+ break;
+ default:
+ return (ENOIOCTL);
+ }
+
+ error = mpr_ioctl(dev, cmd, &arg, flag, td);
+ if (error == 0 && (cmd32 & IOC_OUT) != 0) {
+ switch (cmd32) {
+ case MPRIO_READ_CFG_HEADER32:
+ case MPRIO_READ_CFG_PAGE32:
+ case MPRIO_WRITE_CFG_PAGE32:
+ CP(arg.page, *page32, header);
+ CP(arg.page, *page32, page_address);
+ PTROUT_CP(arg.page, *page32, buf);
+ CP(arg.page, *page32, len);
+ CP(arg.page, *page32, ioc_status);
+ break;
+
+ case MPRIO_READ_EXT_CFG_HEADER32:
+ case MPRIO_READ_EXT_CFG_PAGE32:
+ CP(arg.ext, *ext32, header);
+ CP(arg.ext, *ext32, page_address);
+ PTROUT_CP(arg.ext, *ext32, buf);
+ CP(arg.ext, *ext32, len);
+ CP(arg.ext, *ext32, ioc_status);
+ break;
+
+ case MPRIO_RAID_ACTION32:
+ CP(arg.raid, *raid32, action);
+ CP(arg.raid, *raid32, volume_bus);
+ CP(arg.raid, *raid32, volume_id);
+ CP(arg.raid, *raid32, phys_disk_num);
+ CP(arg.raid, *raid32, action_data_word);
+ PTROUT_CP(arg.raid, *raid32, buf);
+ CP(arg.raid, *raid32, len);
+ CP(arg.raid, *raid32, volume_status);
+ bcopy(arg.raid.action_data, raid32->action_data,
+ sizeof arg.raid.action_data);
+ CP(arg.raid, *raid32, ioc_status);
+ CP(arg.raid, *raid32, write);
+ break;
+
+ case MPRIO_MPR_COMMAND32:
+ PTROUT_CP(arg.user, *user32, req);
+ CP(arg.user, *user32, req_len);
+ PTROUT_CP(arg.user, *user32, rpl);
+ CP(arg.user, *user32, rpl_len);
+ PTROUT_CP(arg.user, *user32, buf);
+ CP(arg.user, *user32, len);
+ CP(arg.user, *user32, flags);
+ break;
+ }
+ }
+
+ return (error);
+}
+#endif /* COMPAT_FREEBSD32 */
+
+static int
+mpr_ioctl_devsw(struct cdev *dev, u_long com, caddr_t arg, int flag,
+ struct thread *td)
+{
+#ifdef COMPAT_FREEBSD32
+ if (SV_CURPROC_FLAG(SV_ILP32))
+ return (mpr_ioctl32(dev, com, arg, flag, td));
+#endif
+ return (mpr_ioctl(dev, com, arg, flag, td));
+}
diff --git a/sys/dev/mpr/mprvar.h b/sys/dev/mpr/mprvar.h
new file mode 100644
index 0000000000000..9752dcd35f6f4
--- /dev/null
+++ b/sys/dev/mpr/mprvar.h
@@ -0,0 +1,766 @@
+/*-
+ * Copyright (c) 2009 Yahoo! Inc.
+ * Copyright (c) 2011-2014 LSI Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MPRVAR_H
+#define _MPRVAR_H
+
+#define MPR_DRIVER_VERSION "05.255.05.00-fbsd"
+
+#define MPR_DB_MAX_WAIT 2500
+
+#define MPR_REQ_FRAMES 1024
+#define MPR_EVT_REPLY_FRAMES 32
+#define MPR_REPLY_FRAMES MPR_REQ_FRAMES
+#define MPR_CHAIN_FRAMES 2048
+#define MPR_SENSE_LEN SSD_FULL_SIZE
+#define MPR_MSI_COUNT 1
+#define MPR_SGE64_SIZE 12
+#define MPR_SGE32_SIZE 8
+#define MPR_SGC_SIZE 8
+
+#define MPR_FUNCTRACE(sc) \
+ mpr_dprint((sc), MPR_TRACE, "%s\n", __func__)
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define MPR_PERIODIC_DELAY 1 /* 1 second heartbeat/watchdog check */
+
+#define IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED 0x2810
+
+#define MPR_SCSI_RI_INVALID_FRAME (0x00000002)
+#define MPR_STRING_LENGTH 64
+
+#include <sys/endian.h>
+
+/*
+ * host mapping related macro definitions
+ */
+#define MPR_MAPTABLE_BAD_IDX 0xFFFFFFFF
+#define MPR_DPM_BAD_IDX 0xFFFF
+#define MPR_ENCTABLE_BAD_IDX 0xFF
+#define MPR_MAX_MISSING_COUNT 0x0F
+#define MPR_DEV_RESERVED 0x20000000
+#define MPR_MAP_IN_USE 0x10000000
+#define MPR_RAID_CHANNEL 1
+#define MPR_MAP_BAD_ID 0xFFFFFFFF
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+/**
+ * struct dev_mapping_table - device mapping information
+ * @physical_id: SAS address for drives or WWID for RAID volumes
+ * @device_info: bitfield provides detailed info about the device
+ * @phy_bits: bitfields indicating controller phys
+ * @dpm_entry_num: index of this device in device persistent map table
+ * @dev_handle: device handle for the device pointed by this entry
+ * @channel: target channel
+ * @id: target id
+ * @missing_count: number of times the device not detected by driver
+ * @hide_flag: Hide this physical disk/not (foreign configuration)
+ * @init_complete: Whether the start of the day checks completed or not
+ * @TLR_bits: Turn TLR support on or off
+ */
+struct dev_mapping_table {
+ u64 physical_id;
+ u32 device_info;
+ u32 phy_bits;
+ u16 dpm_entry_num;
+ u16 dev_handle;
+ u8 reserved1;
+ u8 channel;
+ u16 id;
+ u8 missing_count;
+ u8 init_complete;
+ u8 TLR_bits;
+ u8 reserved2;
+};
+
+/**
+ * struct enc_mapping_table - mapping information about an enclosure
+ * @enclosure_id: Logical ID of this enclosure
+ * @start_index: index to the entry in dev_mapping_table
+ * @phy_bits: bitfields indicating controller phys
+ * @dpm_entry_num: index of this enclosure in device persistent map table
+ * @enc_handle: device handle for the enclosure pointed by this entry
+ * @num_slots: number of slots in the enclosure
+ * @start_slot: Starting slot id
+ * @missing_count: number of times the device not detected by driver
+ * @removal_flag: used to mark the device for removal
+ * @skip_search: used as a flag to include/exclude enclosure for search
+ * @init_complete: Whether the start of the day checks completed or not
+ */
+struct enc_mapping_table {
+ u64 enclosure_id;
+ u32 start_index;
+ u32 phy_bits;
+ u16 dpm_entry_num;
+ u16 enc_handle;
+ u16 num_slots;
+ u16 start_slot;
+ u8 missing_count;
+ u8 removal_flag;
+ u8 skip_search;
+ u8 init_complete;
+};
+
+/**
+ * struct map_removal_table - entries to be removed from mapping table
+ * @dpm_entry_num: index of this device in device persistent map table
+ * @dev_handle: device handle for the device pointed by this entry
+ */
+struct map_removal_table{
+ u16 dpm_entry_num;
+ u16 dev_handle;
+};
+
+typedef struct mpr_fw_diagnostic_buffer {
+ size_t size;
+ uint8_t extended_type;
+ uint8_t buffer_type;
+ uint8_t force_release;
+ uint32_t product_specific[23];
+ uint8_t immediate;
+ uint8_t enabled;
+ uint8_t valid_data;
+ uint8_t owned_by_firmware;
+ uint32_t unique_id;
+} mpr_fw_diagnostic_buffer_t;
+
+struct mpr_softc;
+struct mpr_command;
+struct mprsas_softc;
+union ccb;
+struct mprsas_target;
+struct mpr_column_map;
+
+MALLOC_DECLARE(M_MPR);
+
+typedef void mpr_evt_callback_t(struct mpr_softc *, uintptr_t,
+ MPI2_EVENT_NOTIFICATION_REPLY *reply);
+typedef void mpr_command_callback_t(struct mpr_softc *, struct mpr_command *cm);
+
+struct mpr_chain {
+ TAILQ_ENTRY(mpr_chain) chain_link;
+ void *chain;
+ uint64_t chain_busaddr;
+};
+
+/*
+ * This needs to be at least 2 to support SMP passthrough.
+ */
+#define MPR_IOVEC_COUNT 2
+
+struct mpr_command {
+ TAILQ_ENTRY(mpr_command) cm_link;
+ TAILQ_ENTRY(mpr_command) cm_recovery;
+ struct mpr_softc *cm_sc;
+ union ccb *cm_ccb;
+ void *cm_data;
+ u_int cm_length;
+ u_int cm_out_len;
+ struct uio cm_uio;
+ struct iovec cm_iovec[MPR_IOVEC_COUNT];
+ u_int cm_max_segs;
+ u_int cm_sglsize;
+ void *cm_sge;
+ uint8_t *cm_req;
+ uint8_t *cm_reply;
+ uint32_t cm_reply_data;
+ mpr_command_callback_t *cm_complete;
+ void *cm_complete_data;
+ struct mprsas_target *cm_targ;
+ MPI2_REQUEST_DESCRIPTOR_UNION cm_desc;
+ u_int cm_lun;
+ u_int cm_flags;
+#define MPR_CM_FLAGS_POLLED (1 << 0)
+#define MPR_CM_FLAGS_COMPLETE (1 << 1)
+#define MPR_CM_FLAGS_SGE_SIMPLE (1 << 2)
+#define MPR_CM_FLAGS_DATAOUT (1 << 3)
+#define MPR_CM_FLAGS_DATAIN (1 << 4)
+#define MPR_CM_FLAGS_WAKEUP (1 << 5)
+#define MPR_CM_FLAGS_USE_UIO (1 << 6)
+#define MPR_CM_FLAGS_SMP_PASS (1 << 7)
+#define MPR_CM_FLAGS_CHAIN_FAILED (1 << 8)
+#define MPR_CM_FLAGS_ERROR_MASK MPR_CM_FLAGS_CHAIN_FAILED
+#define MPR_CM_FLAGS_USE_CCB (1 << 9)
+ u_int cm_state;
+#define MPR_CM_STATE_FREE 0
+#define MPR_CM_STATE_BUSY 1
+#define MPR_CM_STATE_TIMEDOUT 2
+ bus_dmamap_t cm_dmamap;
+ struct scsi_sense_data *cm_sense;
+ TAILQ_HEAD(, mpr_chain) cm_chain_list;
+ uint32_t cm_req_busaddr;
+ uint32_t cm_sense_busaddr;
+ struct callout cm_callout;
+};
+
+struct mpr_column_map {
+ uint16_t dev_handle;
+ uint8_t phys_disk_num;
+};
+
+struct mpr_event_handle {
+ TAILQ_ENTRY(mpr_event_handle) eh_list;
+ mpr_evt_callback_t *callback;
+ void *data;
+ uint8_t mask[16];
+};
+
+struct mpr_softc {
+ device_t mpr_dev;
+ struct cdev *mpr_cdev;
+ u_int mpr_flags;
+#define MPR_FLAGS_INTX (1 << 0)
+#define MPR_FLAGS_MSI (1 << 1)
+#define MPR_FLAGS_BUSY (1 << 2)
+#define MPR_FLAGS_SHUTDOWN (1 << 3)
+#define MPR_FLAGS_DIAGRESET (1 << 4)
+#define MPR_FLAGS_ATTACH_DONE (1 << 5)
+ u_int mpr_debug;
+ u_int disable_msix;
+ u_int disable_msi;
+ int tm_cmds_active;
+ int io_cmds_active;
+ int io_cmds_highwater;
+ int chain_free;
+ int max_chains;
+ int chain_free_lowwater;
+#if __FreeBSD_version >= 900030
+ uint64_t chain_alloc_fail;
+#endif
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ char fw_version[16];
+ struct mpr_command *commands;
+ struct mpr_chain *chains;
+ struct callout periodic;
+
+ struct mprsas_softc *sassc;
+ char tmp_string[MPR_STRING_LENGTH];
+ TAILQ_HEAD(, mpr_command) req_list;
+ TAILQ_HEAD(, mpr_command) high_priority_req_list;
+ TAILQ_HEAD(, mpr_chain) chain_list;
+ TAILQ_HEAD(, mpr_command) tm_list;
+ int replypostindex;
+ int replyfreeindex;
+
+ struct resource *mpr_regs_resource;
+ bus_space_handle_t mpr_bhandle;
+ bus_space_tag_t mpr_btag;
+ int mpr_regs_rid;
+
+ bus_dma_tag_t mpr_parent_dmat;
+ bus_dma_tag_t buffer_dmat;
+
+ MPI2_IOC_FACTS_REPLY *facts;
+ int num_reqs;
+ int num_replies;
+ int fqdepth; /* Free queue */
+ int pqdepth; /* Post queue */
+
+ uint8_t event_mask[16];
+ TAILQ_HEAD(, mpr_event_handle) event_list;
+ struct mpr_event_handle *mpr_log_eh;
+
+ struct mtx mpr_mtx;
+ struct intr_config_hook mpr_ich;
+ struct resource *mpr_irq[MPR_MSI_COUNT];
+ void *mpr_intrhand[MPR_MSI_COUNT];
+ int mpr_irq_rid[MPR_MSI_COUNT];
+
+ uint8_t *req_frames;
+ bus_addr_t req_busaddr;
+ bus_dma_tag_t req_dmat;
+ bus_dmamap_t req_map;
+
+ uint8_t *reply_frames;
+ bus_addr_t reply_busaddr;
+ bus_dma_tag_t reply_dmat;
+ bus_dmamap_t reply_map;
+
+ struct scsi_sense_data *sense_frames;
+ bus_addr_t sense_busaddr;
+ bus_dma_tag_t sense_dmat;
+ bus_dmamap_t sense_map;
+
+ uint8_t *chain_frames;
+ bus_addr_t chain_busaddr;
+ bus_dma_tag_t chain_dmat;
+ bus_dmamap_t chain_map;
+
+ MPI2_REPLY_DESCRIPTORS_UNION *post_queue;
+ bus_addr_t post_busaddr;
+ uint32_t *free_queue;
+ bus_addr_t free_busaddr;
+ bus_dma_tag_t queues_dmat;
+ bus_dmamap_t queues_map;
+
+ uint8_t *fw_diag_buffer;
+ bus_addr_t fw_diag_busaddr;
+ bus_dma_tag_t fw_diag_dmat;
+ bus_dmamap_t fw_diag_map;
+
+ uint8_t ir_firmware;
+
+ /* static config pages */
+ Mpi2IOCPage8_t ioc_pg8;
+ Mpi2IOUnitPage8_t iounit_pg8;
+
+ /* host mapping support */
+ struct dev_mapping_table *mapping_table;
+ struct enc_mapping_table *enclosure_table;
+ struct map_removal_table *removal_table;
+ uint8_t *dpm_entry_used;
+ uint8_t *dpm_flush_entry;
+ Mpi2DriverMappingPage0_t *dpm_pg0;
+ uint16_t max_devices;
+ uint16_t max_enclosures;
+ uint16_t max_expanders;
+ uint8_t max_volumes;
+ uint8_t num_enc_table_entries;
+ uint8_t num_rsvd_entries;
+ uint8_t num_channels;
+ uint16_t max_dpm_entries;
+ uint8_t is_dpm_enable;
+ uint8_t track_mapping_events;
+ uint32_t pending_map_events;
+ uint8_t mt_full_retry;
+ uint8_t mt_add_device_failed;
+
+ /* FW diag Buffer List */
+ mpr_fw_diagnostic_buffer_t
+ fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_COUNT];
+
+ /* Event Recording IOCTL support */
+ uint32_t events_to_record[4];
+ mpr_event_entry_t recorded_events[MPR_EVENT_QUEUE_SIZE];
+ uint8_t event_index;
+ uint32_t event_number;
+
+ /* EEDP and TLR support */
+ uint8_t eedp_enabled;
+ uint8_t control_TLR;
+
+ /* Shutdown Event Handler */
+ eventhandler_tag shutdown_eh;
+
+ /* To track topo events during reset */
+#define MPR_DIAG_RESET_TIMEOUT 300000
+ uint8_t wait_for_port_enable;
+ uint8_t port_enable_complete;
+ uint8_t msleep_fake_chan;
+
+ /* StartStopUnit command handling at shutdown */
+ uint32_t SSU_refcount;
+ uint8_t SSU_started;
+
+ char exclude_ids[80];
+ struct timeval lastfail;
+};
+
+struct mpr_config_params {
+ MPI2_CONFIG_EXT_PAGE_HEADER_UNION hdr;
+ u_int action;
+ u_int page_address; /* Attributes, not a phys address */
+ u_int status;
+ void *buffer;
+ u_int length;
+ int timeout;
+ void (*callback)(struct mpr_softc *, struct mpr_config_params *);
+ void *cbdata;
+};
+
+struct scsi_read_capacity_eedp
+{
+ uint8_t addr[8];
+ uint8_t length[4];
+ uint8_t protect;
+};
+
+static __inline uint32_t
+mpr_regread(struct mpr_softc *sc, uint32_t offset)
+{
+ return (bus_space_read_4(sc->mpr_btag, sc->mpr_bhandle, offset));
+}
+
+static __inline void
+mpr_regwrite(struct mpr_softc *sc, uint32_t offset, uint32_t val)
+{
+ bus_space_write_4(sc->mpr_btag, sc->mpr_bhandle, offset, val);
+}
+
+/* free_queue must have Little Endian address
+ * TODO- cm_reply_data is unwanted. We can remove it.
+ * */
+static __inline void
+mpr_free_reply(struct mpr_softc *sc, uint32_t busaddr)
+{
+ if (++sc->replyfreeindex >= sc->fqdepth)
+ sc->replyfreeindex = 0;
+ sc->free_queue[sc->replyfreeindex] = htole32(busaddr);
+ mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
+}
+
+static __inline struct mpr_chain *
+mpr_alloc_chain(struct mpr_softc *sc)
+{
+ struct mpr_chain *chain;
+
+ if ((chain = TAILQ_FIRST(&sc->chain_list)) != NULL) {
+ TAILQ_REMOVE(&sc->chain_list, chain, chain_link);
+ sc->chain_free--;
+ if (sc->chain_free < sc->chain_free_lowwater)
+ sc->chain_free_lowwater = sc->chain_free;
+ }
+#if __FreeBSD_version >= 900030
+ else
+ sc->chain_alloc_fail++;
+#endif
+ return (chain);
+}
+
+static __inline void
+mpr_free_chain(struct mpr_softc *sc, struct mpr_chain *chain)
+{
+#if 0
+ bzero(chain->chain, 128);
+#endif
+ sc->chain_free++;
+ TAILQ_INSERT_TAIL(&sc->chain_list, chain, chain_link);
+}
+
+static __inline void
+mpr_free_command(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ struct mpr_chain *chain, *chain_temp;
+
+ if (cm->cm_reply != NULL)
+ mpr_free_reply(sc, cm->cm_reply_data);
+ cm->cm_reply = NULL;
+ cm->cm_flags = 0;
+ cm->cm_complete = NULL;
+ cm->cm_complete_data = NULL;
+ cm->cm_ccb = NULL;
+ cm->cm_targ = NULL;
+ cm->cm_max_segs = 0;
+ cm->cm_lun = 0;
+ cm->cm_state = MPR_CM_STATE_FREE;
+ cm->cm_data = NULL;
+ cm->cm_length = 0;
+ cm->cm_out_len = 0;
+ cm->cm_sglsize = 0;
+ cm->cm_sge = NULL;
+
+ TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, chain_temp) {
+ TAILQ_REMOVE(&cm->cm_chain_list, chain, chain_link);
+ mpr_free_chain(sc, chain);
+ }
+ TAILQ_INSERT_TAIL(&sc->req_list, cm, cm_link);
+}
+
+static __inline struct mpr_command *
+mpr_alloc_command(struct mpr_softc *sc)
+{
+ struct mpr_command *cm;
+
+ cm = TAILQ_FIRST(&sc->req_list);
+ if (cm == NULL)
+ return (NULL);
+
+ TAILQ_REMOVE(&sc->req_list, cm, cm_link);
+ KASSERT(cm->cm_state == MPR_CM_STATE_FREE, ("mpr: Allocating busy command\n"));
+ cm->cm_state = MPR_CM_STATE_BUSY;
+ return (cm);
+}
+
+static __inline void
+mpr_free_high_priority_command(struct mpr_softc *sc, struct mpr_command *cm)
+{
+ struct mpr_chain *chain, *chain_temp;
+
+ if (cm->cm_reply != NULL)
+ mpr_free_reply(sc, cm->cm_reply_data);
+ cm->cm_reply = NULL;
+ cm->cm_flags = 0;
+ cm->cm_complete = NULL;
+ cm->cm_complete_data = NULL;
+ cm->cm_ccb = NULL;
+ cm->cm_targ = NULL;
+ cm->cm_lun = 0;
+ cm->cm_state = MPR_CM_STATE_FREE;
+ TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, chain_temp) {
+ TAILQ_REMOVE(&cm->cm_chain_list, chain, chain_link);
+ mpr_free_chain(sc, chain);
+ }
+ TAILQ_INSERT_TAIL(&sc->high_priority_req_list, cm, cm_link);
+}
+
+static __inline struct mpr_command *
+mpr_alloc_high_priority_command(struct mpr_softc *sc)
+{
+ struct mpr_command *cm;
+
+ cm = TAILQ_FIRST(&sc->high_priority_req_list);
+ if (cm == NULL)
+ return (NULL);
+
+ TAILQ_REMOVE(&sc->high_priority_req_list, cm, cm_link);
+ KASSERT(cm->cm_state == MPR_CM_STATE_FREE, ("mpr: Allocating busy command\n"));
+ cm->cm_state = MPR_CM_STATE_BUSY;
+ return (cm);
+}
+
+static __inline void
+mpr_lock(struct mpr_softc *sc)
+{
+ mtx_lock(&sc->mpr_mtx);
+}
+
+static __inline void
+mpr_unlock(struct mpr_softc *sc)
+{
+ mtx_unlock(&sc->mpr_mtx);
+}
+
+#define MPR_INFO (1 << 0) /* Basic info */
+#define MPR_FAULT (1 << 1) /* Hardware faults */
+#define MPR_EVENT (1 << 2) /* Event data from the controller */
+#define MPR_LOG (1 << 3) /* Log data from the controller */
+#define MPR_RECOVERY (1 << 4) /* Command error recovery tracing */
+#define MPR_ERROR (1 << 5) /* Parameter errors, programming bugs */
+#define MPR_INIT (1 << 6) /* Things related to system init */
+#define MPR_XINFO (1 << 7) /* More detailed/noisy info */
+#define MPR_USER (1 << 8) /* Trace user-generated commands */
+#define MPR_MAPPING (1 << 9) /* Trace device mappings */
+#define MPR_TRACE (1 << 10) /* Function-by-function trace */
+
+#define mpr_printf(sc, args...) \
+ device_printf((sc)->mpr_dev, ##args)
+
+#define mpr_vprintf(sc, args...) \
+do { \
+ if (bootverbose) \
+ mpr_printf(sc, ##args); \
+} while (0)
+
+#define mpr_dprint(sc, level, msg, args...) \
+do { \
+ if ((sc)->mpr_debug & level) \
+ device_printf((sc)->mpr_dev, msg, ##args); \
+} while (0)
+
+#define mpr_dprint_field(sc, level, msg, args...) \
+do { \
+ if ((sc)->mpr_debug & level) \
+ printf("\t" msg, ##args); \
+} while (0)
+
+#define MPR_PRINTFIELD_START(sc, tag...) \
+ mpr_dprint((sc), MPR_INFO, ##tag); \
+ mpr_dprint_field((sc), MPR_INFO, ":\n")
+#define MPR_PRINTFIELD_END(sc, tag) \
+ mpr_dprint((sc), MPR_INFO, tag "\n")
+#define MPR_PRINTFIELD(sc, facts, attr, fmt) \
+ mpr_dprint_field((sc), MPR_INFO, #attr ": " #fmt "\n", (facts)->attr)
+
+#define MPR_EVENTFIELD_START(sc, tag...) \
+ mpr_dprint((sc), MPR_EVENT, ##tag); \
+ mpr_dprint_field((sc), MPR_EVENT, ":\n")
+#define MPR_EVENTFIELD(sc, facts, attr, fmt) \
+ mpr_dprint_field((sc), MPR_EVENT, #attr ": " #fmt "\n", (facts)->attr)
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+static __inline void
+mpr_from_u64(uint64_t data, U64 *mpr)
+{
+ (mpr)->High = htole32((uint32_t)((data) >> 32));
+ (mpr)->Low = htole32((uint32_t)((data) & 0xffffffff));
+}
+
+static __inline uint64_t
+mpr_to_u64(U64 *data)
+{
+
+ return (((uint64_t)le32toh(data->High) << 32) | le32toh(data->Low));
+}
+
+static __inline void
+mpr_mask_intr(struct mpr_softc *sc)
+{
+ uint32_t mask;
+
+ mask = mpr_regread(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET);
+ mask |= MPI2_HIM_REPLY_INT_MASK;
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET, mask);
+}
+
+static __inline void
+mpr_unmask_intr(struct mpr_softc *sc)
+{
+ uint32_t mask;
+
+ mask = mpr_regread(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET);
+ mask &= ~MPI2_HIM_REPLY_INT_MASK;
+ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_MASK_OFFSET, mask);
+}
+
+int mpr_pci_setup_interrupts(struct mpr_softc *sc);
+int mpr_pci_restore(struct mpr_softc *sc);
+
+int mpr_attach(struct mpr_softc *sc);
+int mpr_free(struct mpr_softc *sc);
+void mpr_intr(void *);
+void mpr_intr_msi(void *);
+void mpr_intr_locked(void *);
+int mpr_register_events(struct mpr_softc *, uint8_t *, mpr_evt_callback_t *,
+ void *, struct mpr_event_handle **);
+int mpr_restart(struct mpr_softc *);
+int mpr_update_events(struct mpr_softc *, struct mpr_event_handle *,
+ uint8_t *);
+int mpr_deregister_events(struct mpr_softc *, struct mpr_event_handle *);
+int mpr_push_sge(struct mpr_command *, MPI2_SGE_SIMPLE64 *, size_t, int);
+int mpr_push_ieee_sge(struct mpr_command *, void *, int);
+int mpr_add_dmaseg(struct mpr_command *, vm_paddr_t, size_t, u_int, int);
+int mpr_attach_sas(struct mpr_softc *sc);
+int mpr_detach_sas(struct mpr_softc *sc);
+int mpr_read_config_page(struct mpr_softc *, struct mpr_config_params *);
+int mpr_write_config_page(struct mpr_softc *, struct mpr_config_params *);
+void mpr_memaddr_cb(void *, bus_dma_segment_t *, int , int );
+void mpr_init_sge(struct mpr_command *cm, void *req, void *sge);
+int mpr_attach_user(struct mpr_softc *);
+void mpr_detach_user(struct mpr_softc *);
+void mprsas_record_event(struct mpr_softc *sc,
+ MPI2_EVENT_NOTIFICATION_REPLY *event_reply);
+
+int mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm);
+int mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm,
+ int timeout, int sleep_flag);
+int mpr_request_polled(struct mpr_softc *sc, struct mpr_command *cm);
+
+int mpr_config_get_bios_pg3(struct mpr_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpr_config_get_raid_volume_pg0(struct mpr_softc *sc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address);
+int mpr_config_get_ioc_pg8(struct mpr_softc *sc, Mpi2ConfigReply_t *,
+ Mpi2IOCPage8_t *);
+int mpr_config_get_iounit_pg8(struct mpr_softc *sc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page);
+int mpr_config_get_sas_device_pg0(struct mpr_softc *, Mpi2ConfigReply_t *,
+ Mpi2SasDevicePage0_t *, u32 , u16 );
+int mpr_config_get_dpm_pg0(struct mpr_softc *, Mpi2ConfigReply_t *,
+ Mpi2DriverMappingPage0_t *, u16 );
+int mpr_config_get_raid_volume_pg1(struct mpr_softc *sc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u16 handle);
+int mpr_config_get_volume_wwid(struct mpr_softc *sc, u16 volume_handle,
+ u64 *wwid);
+int mpr_config_get_raid_pd_pg0(struct mpr_softc *sc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page,
+ u32 page_address);
+void mprsas_ir_shutdown(struct mpr_softc *sc);
+
+int mpr_reinit(struct mpr_softc *sc);
+void mprsas_handle_reinit(struct mpr_softc *sc);
+
+void mpr_base_static_config_pages(struct mpr_softc *sc);
+
+int mpr_mapping_initialize(struct mpr_softc *);
+void mpr_mapping_topology_change_event(struct mpr_softc *,
+ Mpi2EventDataSasTopologyChangeList_t *);
+int mpr_mapping_is_reinit_required(struct mpr_softc *);
+void mpr_mapping_free_memory(struct mpr_softc *sc);
+int mpr_config_set_dpm_pg0(struct mpr_softc *, Mpi2ConfigReply_t *,
+ Mpi2DriverMappingPage0_t *, u16 );
+void mpr_mapping_exit(struct mpr_softc *);
+void mpr_mapping_check_devices(struct mpr_softc *, int);
+int mpr_mapping_allocate_memory(struct mpr_softc *sc);
+unsigned int mpr_mapping_get_sas_id(struct mpr_softc *, uint64_t , u16);
+unsigned int mpr_mapping_get_sas_id_from_handle(struct mpr_softc *sc,
+ u16 handle);
+unsigned int mpr_mapping_get_raid_id(struct mpr_softc *sc, u64 wwid,
+ u16 handle);
+unsigned int mpr_mapping_get_raid_id_from_handle(struct mpr_softc *sc,
+ u16 volHandle);
+void mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data);
+void mpr_mapping_ir_config_change_event(struct mpr_softc *sc,
+ Mpi2EventDataIrConfigChangeList_t *event_data);
+
+void mprsas_evt_handler(struct mpr_softc *sc, uintptr_t data,
+ MPI2_EVENT_NOTIFICATION_REPLY *event);
+void mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle);
+void mprsas_prepare_volume_remove(struct mprsas_softc *sassc,
+ uint16_t handle);
+int mprsas_startup(struct mpr_softc *sc);
+struct mprsas_target * mprsas_find_target_by_handle(struct mprsas_softc *,
+ int, uint16_t);
+
+SYSCTL_DECL(_hw_mpr);
+
+/* Compatibility shims for different OS versions */
+#if __FreeBSD_version >= 800001
+#define mpr_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mpr_kproc_exit(arg) kproc_exit(arg)
+#else
+#define mpr_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mpr_kproc_exit(arg) kthread_exit(arg)
+#endif
+
+#if defined(CAM_PRIORITY_XPT)
+#define MPR_PRIORITY_XPT CAM_PRIORITY_XPT
+#else
+#define MPR_PRIORITY_XPT 5
+#endif
+
+#if __FreeBSD_version < 800107
+// Prior to FreeBSD-8.0 scp3_flags was not defined.
+#define spc3_flags reserved
+
+#define SPC3_SID_PROTECT 0x01
+#define SPC3_SID_3PC 0x08
+#define SPC3_SID_TPGS_MASK 0x30
+#define SPC3_SID_TPGS_IMPLICIT 0x10
+#define SPC3_SID_TPGS_EXPLICIT 0x20
+#define SPC3_SID_ACC 0x40
+#define SPC3_SID_SCCS 0x80
+
+#define CAM_PRIORITY_NORMAL CAM_PRIORITY_NONE
+#endif
+
+#endif
+
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index 3e140ca6da5c6..de34de0e68ab6 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -2316,8 +2316,9 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
(csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
(csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
- (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
- T_SEQUENTIAL) && (sc->control_TLR) &&
+ (csio->data_ptr != NULL) &&
+ ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
+ (sc->control_TLR) &&
(sc->mapping_table[csio->ccb_h.target_id].device_info &
MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
vpd_list = (struct scsi_vpd_supported_page_list *)
@@ -2328,6 +2329,7 @@ mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
csio->cdb_io.cdb_bytes[4];
+ alloc_len -= csio->resid;
for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
if (vpd_list->list[i] == 0x90) {
*TLR_bits = TLR_on;
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
new file mode 100644
index 0000000000000..b74012870d06f
--- /dev/null
+++ b/sys/dev/mrsas/mrsas.c
@@ -0,0 +1,3672 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/mrsas/mrsas.h>
+#include <dev/mrsas/mrsas_ioctl.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/kthread.h>
+#include <sys/taskqueue.h>
+
+
+/*
+ * Function prototypes
+ */
+static d_open_t mrsas_open;
+static d_close_t mrsas_close;
+static d_read_t mrsas_read;
+static d_write_t mrsas_write;
+static d_ioctl_t mrsas_ioctl;
+
+static struct mrsas_ident *mrsas_find_ident(device_t);
+static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
+static void mrsas_flush_cache(struct mrsas_softc *sc);
+static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
+static void mrsas_ocr_thread(void *arg);
+static int mrsas_get_map_info(struct mrsas_softc *sc);
+static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
+static int mrsas_sync_map_info(struct mrsas_softc *sc);
+static int mrsas_get_pd_list(struct mrsas_softc *sc);
+static int mrsas_get_ld_list(struct mrsas_softc *sc);
+static int mrsas_setup_irq(struct mrsas_softc *sc);
+static int mrsas_alloc_mem(struct mrsas_softc *sc);
+static int mrsas_init_fw(struct mrsas_softc *sc);
+static int mrsas_setup_raidmap(struct mrsas_softc *sc);
+static int mrsas_complete_cmd(struct mrsas_softc *sc);
+static int mrsas_clear_intr(struct mrsas_softc *sc);
+static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
+ struct mrsas_ctrl_info *ctrl_info);
+static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd_to_abort);
+u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
+u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *mfi_cmd);
+int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
+int mrsas_init_adapter(struct mrsas_softc *sc);
+int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
+int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
+int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
+int mrsas_ioc_init(struct mrsas_softc *sc);
+int mrsas_bus_scan(struct mrsas_softc *sc);
+int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+int mrsas_reset_ctrl(struct mrsas_softc *sc);
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
+int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
+ int size);
+void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
+void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_disable_intr(struct mrsas_softc *sc);
+void mrsas_enable_intr(struct mrsas_softc *sc);
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
+void mrsas_free_mem(struct mrsas_softc *sc);
+void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
+void mrsas_isr(void *arg);
+void mrsas_teardown_intr(struct mrsas_softc *sc);
+void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
+void mrsas_kill_hba (struct mrsas_softc *sc);
+void mrsas_aen_handler(struct mrsas_softc *sc);
+void mrsas_write_reg(struct mrsas_softc *sc, int offset,
+ u_int32_t value);
+void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi);
+void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
+void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd, u_int8_t status);
+void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
+ u_int8_t extStatus);
+struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
+MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+
+extern int mrsas_cam_attach(struct mrsas_softc *sc);
+extern void mrsas_cam_detach(struct mrsas_softc *sc);
+extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
+extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
+extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
+extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
+extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
+extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
+extern void mrsas_xpt_release(struct mrsas_softc *sc);
+extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
+ u_int16_t index);
+extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
+static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
+static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
+SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
+
+
+/**
+ * PCI device struct and table
+ *
+ */
+typedef struct mrsas_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ const char *desc;
+} MRSAS_CTLR_ID;
+
+MRSAS_CTLR_ID device_table[] = {
+ {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
+ {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
+ {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
+ {0, 0, 0, 0, NULL}
+};
+
+/**
+ * Character device entry points
+ *
+ */
+static struct cdevsw mrsas_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = mrsas_open,
+ .d_close = mrsas_close,
+ .d_read = mrsas_read,
+ .d_write = mrsas_write,
+ .d_ioctl = mrsas_ioctl,
+ .d_name = "mrsas",
+};
+
+MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
+
+/**
+ * In the cdevsw routines, we find our softc by using the si_drv1 member
+ * of struct cdev. We set this variable to point to our softc in our
+ * attach routine when we create the /dev entry.
+ */
+int
+mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+
+int
+mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+
+int
+mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+int
+mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
+{
+ struct mrsas_softc *sc;
+
+ sc = dev->si_drv1;
+ return (0);
+}
+
+/**
+ * Register Read/Write Functions
+ *
+ */
+void
+mrsas_write_reg(struct mrsas_softc *sc, int offset,
+ u_int32_t value)
+{
+ bus_space_tag_t bus_tag = sc->bus_tag;
+ bus_space_handle_t bus_handle = sc->bus_handle;
+
+ bus_space_write_4(bus_tag, bus_handle, offset, value);
+}
+
+u_int32_t
+mrsas_read_reg(struct mrsas_softc *sc, int offset)
+{
+ bus_space_tag_t bus_tag = sc->bus_tag;
+ bus_space_handle_t bus_handle = sc->bus_handle;
+
+ return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
+}
+
+
+/**
+ * Interrupt Disable/Enable/Clear Functions
+ *
+ */
+void mrsas_disable_intr(struct mrsas_softc *sc)
+{
+ u_int32_t mask = 0xFFFFFFFF;
+ u_int32_t status;
+
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
+ /* Dummy read to force pci flush */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
+}
+
+void mrsas_enable_intr(struct mrsas_softc *sc)
+{
+ u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
+ u_int32_t status;
+
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
+}
+
+static int mrsas_clear_intr(struct mrsas_softc *sc)
+{
+ u_int32_t status, fw_status, fw_state;
+
+ /* Read received interrupt */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+
+ /* If FW state change interrupt is received, write to it again to clear */
+ if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
+ fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ fw_state = fw_status & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
+ if(sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+ }
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
+ mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
+ return(1);
+ }
+
+ /* Not our interrupt, so just return */
+ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return(0);
+
+ /* We got a reply interrupt */
+ return(1);
+}
+
+/**
+ * PCI Support Functions
+ *
+ */
+static struct mrsas_ident * mrsas_find_ident(device_t dev)
+{
+ struct mrsas_ident *pci_device;
+
+ for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
+ {
+ if ((pci_device->vendor == pci_get_vendor(dev)) &&
+ (pci_device->device == pci_get_device(dev)) &&
+ ((pci_device->subvendor == pci_get_subvendor(dev)) ||
+ (pci_device->subvendor == 0xffff)) &&
+ ((pci_device->subdevice == pci_get_subdevice(dev)) ||
+ (pci_device->subdevice == 0xffff)))
+ return (pci_device);
+ }
+ return (NULL);
+}
+
+static int mrsas_probe(device_t dev)
+{
+ static u_int8_t first_ctrl = 1;
+ struct mrsas_ident *id;
+
+ if ((id = mrsas_find_ident(dev)) != NULL) {
+ if (first_ctrl) {
+ printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
+ first_ctrl = 0;
+ }
+ device_set_desc(dev, id->desc);
+ /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
+ return (-30);
+ }
+ return (ENXIO);
+}
+
+/**
+ * mrsas_setup_sysctl: setup sysctl values for mrsas
+ * input: Adapter instance soft state
+ *
+ * Setup sysctl entries for mrsas driver.
+ */
+static void
+mrsas_setup_sysctl(struct mrsas_softc *sc)
+{
+ struct sysctl_ctx_list *sysctl_ctx = NULL;
+ struct sysctl_oid *sysctl_tree = NULL;
+ char tmpstr[80], tmpstr2[80];
+
+ /*
+ * Setup the sysctl variable so the user can change the debug level
+ * on the fly.
+ */
+ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
+ device_get_unit(sc->mrsas_dev));
+ snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
+
+ sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
+ if (sysctl_ctx != NULL)
+ sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
+
+ if (sysctl_tree == NULL) {
+ sysctl_ctx_init(&sc->sysctl_ctx);
+ sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
+ SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
+ CTLFLAG_RD, 0, tmpstr);
+ if (sc->sysctl_tree == NULL)
+ return;
+ sysctl_ctx = &sc->sysctl_ctx;
+ sysctl_tree = sc->sysctl_tree;
+ }
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
+ "Disable the use of OCR");
+
+ SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
+ strlen(MRSAS_VERSION), "driver version");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "reset_count", CTLFLAG_RD,
+ &sc->reset_count, 0, "number of ocr from start of the day");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "fw_outstanding", CTLFLAG_RD,
+ &sc->fw_outstanding, 0, "FW outstanding commands");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
+ &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
+ "Driver debug level");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
+ 0, "Driver IO timeout value in mili-second.");
+
+ SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
+ &sc->mrsas_fw_fault_check_delay,
+ 0, "FW fault check thread delay in seconds. <default is 1 sec>");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "reset_in_progress", CTLFLAG_RD,
+ &sc->reset_in_progress, 0, "ocr in progress status");
+
+}
+
+/**
+ * mrsas_get_tunables: get tunable parameters.
+ * input: Adapter instance soft state
+ *
+ * Get tunable parameters. This will help to debug driver at boot time.
+ */
+static void
+mrsas_get_tunables(struct mrsas_softc *sc)
+{
+ char tmpstr[80];
+
+ /* XXX default to some debugging for now */
+ sc->mrsas_debug = MRSAS_FAULT;
+ sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
+ sc->mrsas_fw_fault_check_delay = 1;
+ sc->reset_count = 0;
+ sc->reset_in_progress = 0;
+
+ /*
+ * Grab the global variables.
+ */
+ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
+
+ /* Grab the unit-instance variables */
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
+ device_get_unit(sc->mrsas_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
+}
+
+/**
+ * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
+ * Used to get sequence number at driver load time.
+ * input: Adapter soft state
+ *
+ * Allocates DMAable memory for the event log info internal command.
+ */
+int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
+{
+ int el_info_size;
+
+ /* Allocate get event log info command */
+ el_info_size = sizeof(struct mrsas_evt_log_info);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ el_info_size, // maxsize
+ 1, // msegments
+ el_info_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->el_info_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
+ BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
+ sc->el_info_mem, el_info_size, mrsas_addr_cb,
+ &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
+ return (ENOMEM);
+ }
+
+ memset(sc->el_info_mem, 0, el_info_size);
+ return (0);
+}
+
+/**
+ * mrsas_free_evt_info_cmd: Free memory for Event log info command
+ * input: Adapter soft state
+ *
+ * Deallocates memory for the event log info internal command.
+ */
+void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
+{
+ if (sc->el_info_phys_addr)
+ bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
+ if (sc->el_info_mem != NULL)
+ bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
+ if (sc->el_info_tag != NULL)
+ bus_dma_tag_destroy(sc->el_info_tag);
+}
+
+/**
+ * mrsas_get_seq_num: Get latest event sequence number
+ * @sc: Adapter soft state
+ * @eli: Firmware event log sequence number information.
+ * Firmware maintains a log of all events in a non-volatile area.
+ * Driver get the sequence number using DCMD
+ * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
+ */
+
+static int
+mrsas_get_seq_num(struct mrsas_softc *sc,
+ struct mrsas_evt_log_info *eli)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return -ENOMEM;
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
+ dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+
+ /*
+ * Copy the data back into callers buffer
+ */
+ memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
+ mrsas_free_evt_log_info_cmd(sc);
+ mrsas_release_mfi_cmd(cmd);
+
+ return 0;
+}
+
+
+/**
+ * mrsas_register_aen: Register for asynchronous event notification
+ * @sc: Adapter soft state
+ * @seq_num: Starting sequence number
+ * @class_locale: Class of the event
+ * This function subscribes for events beyond the @seq_num
+ * and type @class_locale.
+ *
+ * */
+static int
+mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
+ u_int32_t class_locale_word)
+{
+ int ret_val;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ union mrsas_evt_class_locale curr_aen;
+ union mrsas_evt_class_locale prev_aen;
+
+/*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ * */
+
+ curr_aen.word = class_locale_word;
+
+ if (sc->aen_cmd) {
+
+ prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
+
+/*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ * Locale numbers don't have such hierarchy. They are bitmap values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+ return 0;
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ sc->aen_cmd->abort_aen = 1;
+ ret_val = mrsas_issue_blocked_abort_cmd(sc,
+ sc->aen_cmd);
+
+ if (ret_val) {
+ printf("mrsas: Failed to abort "
+ "previous AEN command\n");
+ return ret_val;
+ }
+ }
+ }
+
+ cmd = mrsas_get_mfi_cmd(sc);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
+
+/*
+ * Prepare DCMD for aen registration
+ */
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
+ dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
+ dcmd->mbox.w[0] = seq_num;
+ sc->last_seq_num = seq_num;
+ dcmd->mbox.w[1] = curr_aen.word;
+ dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
+
+ if (sc->aen_cmd != NULL) {
+ mrsas_release_mfi_cmd(cmd);
+ return 0;
+ }
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ sc->aen_cmd = cmd;
+
+ /*
+ Issue the aen registration frame
+ */
+ if (mrsas_issue_dcmd(sc, cmd)){
+ device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
+ return(1);
+ }
+
+ return 0;
+}
+/**
+ * mrsas_start_aen - Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
+ */
+static int mrsas_start_aen(struct mrsas_softc *sc)
+{
+ struct mrsas_evt_log_info eli;
+ union mrsas_evt_class_locale class_locale;
+
+
+ /* Get the latest sequence number from FW*/
+
+ memset(&eli, 0, sizeof(eli));
+
+ if (mrsas_get_seq_num(sc, &eli))
+ return -1;
+
+ /* Register AEN with FW for latest sequence number plus 1*/
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ return mrsas_register_aen(sc, eli.newest_seq_num + 1,
+ class_locale.word);
+}
+
+/**
+ * mrsas_attach: PCI entry point
+ * input: device struct pointer
+ *
+ * Performs setup of PCI and registers, initializes mutexes and
+ * linked lists, registers interrupts and CAM, and initializes
+ * the adapter/controller to its proper state.
+ */
+static int mrsas_attach(device_t dev)
+{
+ struct mrsas_softc *sc = device_get_softc(dev);
+ uint32_t cmd, bar, error;
+
+ /* Look up our softc and initialize its fields. */
+ sc->mrsas_dev = dev;
+ sc->device_id = pci_get_device(dev);
+
+ mrsas_get_tunables(sc);
+
+ /*
+ * Set up PCI and registers
+ */
+ cmd = pci_read_config(dev, PCIR_COMMAND, 2);
+ if ( (cmd & PCIM_CMD_PORTEN) == 0) {
+ return (ENXIO);
+ }
+ /* Force the busmaster enable bit on. */
+ cmd |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, cmd, 2);
+
+ //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
+ bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
+
+ sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
+ if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
+ &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
+ == NULL) {
+ device_printf(dev, "Cannot allocate PCI registers\n");
+ goto attach_fail;
+ }
+ sc->bus_tag = rman_get_bustag(sc->reg_res);
+ sc->bus_handle = rman_get_bushandle(sc->reg_res);
+
+ /* Intialize mutexes */
+ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
+ mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
+ mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
+ mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
+ mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
+ mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
+ mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
+ mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
+
+ /* Intialize linked list */
+ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
+ TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
+
+ atomic_set(&sc->fw_outstanding,0);
+
+ sc->io_cmds_highwater = 0;
+
+ /* Create a /dev entry for this device. */
+ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
+ GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
+ device_get_unit(dev));
+ if (sc->mrsas_cdev)
+ sc->mrsas_cdev->si_drv1 = sc;
+
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ sc->UnevenSpanSupport = 0;
+
+ /* Initialize Firmware */
+ if (mrsas_init_fw(sc) != SUCCESS) {
+ goto attach_fail_fw;
+ }
+
+ /* Register SCSI mid-layer */
+ if ((mrsas_cam_attach(sc) != SUCCESS)) {
+ goto attach_fail_cam;
+ }
+
+ /* Register IRQs */
+ if (mrsas_setup_irq(sc) != SUCCESS) {
+ goto attach_fail_irq;
+ }
+
+ /* Enable Interrupts */
+ mrsas_enable_intr(sc);
+
+ error = mrsas_kproc_create(mrsas_ocr_thread, sc,
+ &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
+ device_get_unit(sc->mrsas_dev));
+ if (error) {
+ printf("Error %d starting rescan thread\n", error);
+ goto attach_fail_irq;
+ }
+
+ mrsas_setup_sysctl(sc);
+
+ /* Initiate AEN (Asynchronous Event Notification)*/
+
+ if (mrsas_start_aen(sc)) {
+ printf("Error: start aen failed\n");
+ goto fail_start_aen;
+ }
+
+ return (0);
+
+fail_start_aen:
+attach_fail_irq:
+ mrsas_teardown_intr(sc);
+attach_fail_cam:
+ mrsas_cam_detach(sc);
+attach_fail_fw:
+//attach_fail_raidmap:
+ mrsas_free_mem(sc);
+ mtx_destroy(&sc->sim_lock);
+ mtx_destroy(&sc->aen_lock);
+ mtx_destroy(&sc->pci_lock);
+ mtx_destroy(&sc->io_lock);
+ mtx_destroy(&sc->ioctl_lock);
+ mtx_destroy(&sc->mpt_cmd_pool_lock);
+ mtx_destroy(&sc->mfi_cmd_pool_lock);
+ mtx_destroy(&sc->raidmap_lock);
+attach_fail:
+ destroy_dev(sc->mrsas_cdev);
+ if (sc->reg_res){
+ bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
+ sc->reg_res_id, sc->reg_res);
+ }
+ return (ENXIO);
+}
+
+/**
+ * mrsas_detach: De-allocates and teardown resources
+ * input: device struct pointer
+ *
+ * This function is the entry point for device disconnect and detach. It
+ * performs memory de-allocations, shutdown of the controller and various
+ * teardown and destroy resource functions.
+ */
+static int mrsas_detach(device_t dev)
+{
+ struct mrsas_softc *sc;
+ int i = 0;
+
+ sc = device_get_softc(dev);
+ sc->remove_in_progress = 1;
+ if(sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+ while(sc->reset_in_progress){
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for ocr to be finished\n",i);
+ }
+ pause("mr_shutdown", hz);
+ }
+ i = 0;
+ while(sc->ocr_thread_active){
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for "
+ "mrsas_ocr thread to quit ocr %d\n",i,
+ sc->ocr_thread_active);
+ }
+ pause("mr_shutdown", hz);
+ }
+ mrsas_flush_cache(sc);
+ mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
+ mrsas_disable_intr(sc);
+ mrsas_cam_detach(sc);
+ mrsas_teardown_intr(sc);
+ mrsas_free_mem(sc);
+ mtx_destroy(&sc->sim_lock);
+ mtx_destroy(&sc->aen_lock);
+ mtx_destroy(&sc->pci_lock);
+ mtx_destroy(&sc->io_lock);
+ mtx_destroy(&sc->ioctl_lock);
+ mtx_destroy(&sc->mpt_cmd_pool_lock);
+ mtx_destroy(&sc->mfi_cmd_pool_lock);
+ mtx_destroy(&sc->raidmap_lock);
+ if (sc->reg_res){
+ bus_release_resource(sc->mrsas_dev,
+ SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
+ }
+ destroy_dev(sc->mrsas_cdev);
+ if (sc->sysctl_tree != NULL)
+ sysctl_ctx_free(&sc->sysctl_ctx);
+ return (0);
+}
+
+/**
+ * mrsas_free_mem: Frees allocated memory
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_detach() to free previously allocated
+ * memory.
+ */
+void mrsas_free_mem(struct mrsas_softc *sc)
+{
+ int i;
+ u_int32_t max_cmd;
+ struct mrsas_mfi_cmd *mfi_cmd;
+ struct mrsas_mpt_cmd *mpt_cmd;
+
+ /*
+ * Free RAID map memory
+ */
+ for (i=0; i < 2; i++)
+ {
+ if (sc->raidmap_phys_addr[i])
+ bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
+ if (sc->raidmap_mem[i] != NULL)
+ bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
+ if (sc->raidmap_tag[i] != NULL)
+ bus_dma_tag_destroy(sc->raidmap_tag[i]);
+ }
+
+ /*
+ * Free version buffer memroy
+ */
+ if (sc->verbuf_phys_addr)
+ bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
+ if (sc->verbuf_mem != NULL)
+ bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
+ if (sc->verbuf_tag != NULL)
+ bus_dma_tag_destroy(sc->verbuf_tag);
+
+
+ /*
+ * Free sense buffer memory
+ */
+ if (sc->sense_phys_addr)
+ bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
+ if (sc->sense_mem != NULL)
+ bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
+ if (sc->sense_tag != NULL)
+ bus_dma_tag_destroy(sc->sense_tag);
+
+ /*
+ * Free chain frame memory
+ */
+ if (sc->chain_frame_phys_addr)
+ bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
+ if (sc->chain_frame_mem != NULL)
+ bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
+ if (sc->chain_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->chain_frame_tag);
+
+ /*
+ * Free IO Request memory
+ */
+ if (sc->io_request_phys_addr)
+ bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
+ if (sc->io_request_mem != NULL)
+ bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
+ if (sc->io_request_tag != NULL)
+ bus_dma_tag_destroy(sc->io_request_tag);
+
+ /*
+ * Free Reply Descriptor memory
+ */
+ if (sc->reply_desc_phys_addr)
+ bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
+ if (sc->reply_desc_mem != NULL)
+ bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
+ if (sc->reply_desc_tag != NULL)
+ bus_dma_tag_destroy(sc->reply_desc_tag);
+
+ /*
+ * Free event detail memory
+ */
+ if (sc->evt_detail_phys_addr)
+ bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
+ if (sc->evt_detail_mem != NULL)
+ bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
+ if (sc->evt_detail_tag != NULL)
+ bus_dma_tag_destroy(sc->evt_detail_tag);
+
+ /*
+ * Free MFI frames
+ */
+ if (sc->mfi_cmd_list) {
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ mfi_cmd = sc->mfi_cmd_list[i];
+ mrsas_free_frame(sc, mfi_cmd);
+ }
+ }
+ if (sc->mficmd_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->mficmd_frame_tag);
+
+ /*
+ * Free MPT internal command list
+ */
+ max_cmd = sc->max_fw_cmds;
+ if (sc->mpt_cmd_list) {
+ for (i = 0; i < max_cmd; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+ bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
+ free(sc->mpt_cmd_list[i], M_MRSAS);
+ }
+ free(sc->mpt_cmd_list, M_MRSAS);
+ sc->mpt_cmd_list = NULL;
+ }
+
+ /*
+ * Free MFI internal command list
+ */
+
+ if (sc->mfi_cmd_list) {
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ free(sc->mfi_cmd_list[i], M_MRSAS);
+ }
+ free(sc->mfi_cmd_list, M_MRSAS);
+ sc->mfi_cmd_list = NULL;
+ }
+
+ /*
+ * Free request descriptor memory
+ */
+ free(sc->req_desc, M_MRSAS);
+ sc->req_desc = NULL;
+
+ /*
+ * Destroy parent tag
+ */
+ if (sc->mrsas_parent_tag != NULL)
+ bus_dma_tag_destroy(sc->mrsas_parent_tag);
+}
+
+/**
+ * mrsas_teardown_intr: Teardown interrupt
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_detach() to teardown and release
+ * bus interrupt resourse.
+ */
+void mrsas_teardown_intr(struct mrsas_softc *sc)
+{
+ if (sc->intr_handle)
+ bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
+ if (sc->mrsas_irq != NULL)
+ bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
+ sc->intr_handle = NULL;
+}
+
+/**
+ * mrsas_suspend: Suspend entry point
+ * input: Device struct pointer
+ *
+ * This function is the entry point for system suspend from the OS.
+ */
+static int mrsas_suspend(device_t dev)
+{
+ struct mrsas_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (0);
+}
+
+/**
+ * mrsas_resume: Resume entry point
+ * input: Device struct pointer
+ *
+ * This function is the entry point for system resume from the OS.
+ */
+static int mrsas_resume(device_t dev)
+{
+ struct mrsas_softc *sc;
+
+ sc = device_get_softc(dev);
+ return (0);
+}
+
+/**
+ * mrsas_ioctl: IOCtl commands entry point.
+ *
+ * This function is the entry point for IOCtls from the OS. It calls the
+ * appropriate function for processing depending on the command received.
+ */
+static int
+mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
+{
+ struct mrsas_softc *sc;
+ int ret = 0, i = 0;
+
+ sc = (struct mrsas_softc *)(dev->si_drv1);
+
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "Driver remove or shutdown called.\n");
+ return ENOENT;
+ }
+
+ mtx_lock_spin(&sc->ioctl_lock);
+ if (!sc->reset_in_progress) {
+ mtx_unlock_spin(&sc->ioctl_lock);
+ goto do_ioctl;
+ }
+
+ /* Release ioclt_lock, and wait for OCR
+ * to be finished */
+ mtx_unlock_spin(&sc->ioctl_lock);
+ while(sc->reset_in_progress){
+ i++;
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_INFO,
+ "[%2d]waiting for "
+ "OCR to be finished %d\n",i,
+ sc->ocr_thread_active);
+ }
+ pause("mr_ioctl", hz);
+ }
+
+do_ioctl:
+ switch (cmd) {
+ case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
+ ret = mrsas_passthru(sc, (void *)arg);
+ break;
+ case MRSAS_IOC_SCAN_BUS:
+ ret = mrsas_bus_scan(sc);
+ break;
+ }
+
+ return (ret);
+}
+
+/**
+ * mrsas_setup_irq: Set up interrupt.
+ * input: Adapter instance soft state
+ *
+ * This function sets up interrupts as a bus resource, with flags indicating
+ * resource permitting contemporaneous sharing and for resource to activate
+ * atomically.
+ */
+static int mrsas_setup_irq(struct mrsas_softc *sc)
+{
+ sc->irq_id = 0;
+ sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
+ &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
+ if (sc->mrsas_irq == NULL){
+ device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
+ return (FAIL);
+ }
+ if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
+ NULL, mrsas_isr, sc, &sc->intr_handle)) {
+ device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
+ return (FAIL);
+ }
+
+ return (0);
+}
+
+/*
+ * mrsas_isr: ISR entry point
+ * input: argument pointer
+ *
+ * This function is the interrupt service routine entry point. There
+ * are two types of interrupts, state change interrupt and response
+ * interrupt. If an interrupt is not ours, we just return.
+ */
+void mrsas_isr(void *arg)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)arg;
+ int status;
+
+ /* Clear FW state change interrupt */
+ status = mrsas_clear_intr(sc);
+
+ /* Not our interrupt */
+ if (!status)
+ return;
+
+ /* If we are resetting, bail */
+ if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
+ printf(" Entered into ISR when OCR is going active. \n");
+ mrsas_clear_intr(sc);
+ return;
+ }
+ /* Process for reply request and clear response interrupt */
+ if (mrsas_complete_cmd(sc) != SUCCESS)
+ mrsas_clear_intr(sc);
+
+ return;
+}
+
+/*
+ * mrsas_complete_cmd: Process reply request
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_isr() to process reply request and
+ * clear response interrupt. Processing of the reply request entails
+ * walking through the reply descriptor array for the command request
+ * pended from Firmware. We look at the Function field to determine
+ * the command type and perform the appropriate action. Before we
+ * return, we clear the response interrupt.
+ */
+static int mrsas_complete_cmd(struct mrsas_softc *sc)
+{
+ Mpi2ReplyDescriptorsUnion_t *desc;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+ MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ struct mrsas_mpt_cmd *cmd_mpt;
+ struct mrsas_mfi_cmd *cmd_mfi;
+ u_int8_t arm, reply_descript_type;
+ u_int16_t smid, num_completed;
+ u_int8_t status, extStatus;
+ union desc_value desc_val;
+ PLD_LOAD_BALANCE_INFO lbinfo;
+ u_int32_t device_id;
+ int threshold_reply_count = 0;
+
+
+ /* If we have a hardware error, not need to continue */
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return (DONE);
+
+ desc = sc->reply_desc_mem;
+ desc += sc->last_reply_idx;
+
+ reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ desc_val.word = desc->Words;
+ num_completed = 0;
+
+ reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ /* Find our reply descriptor for the command and process */
+ while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
+ {
+ smid = reply_desc->SMID;
+ cmd_mpt = sc->mpt_cmd_list[smid -1];
+ scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
+
+ status = scsi_io_req->RaidContext.status;
+ extStatus = scsi_io_req->RaidContext.exStatus;
+
+ switch (scsi_io_req->Function)
+ {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
+ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
+ lbinfo = &sc->load_balance_info[device_id];
+ if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
+ arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
+ atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+ cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
+ }
+ //Fall thru and complete IO
+ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
+ mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
+ mrsas_cmd_done(sc, cmd_mpt);
+ scsi_io_req->RaidContext.status = 0;
+ scsi_io_req->RaidContext.exStatus = 0;
+ atomic_dec(&sc->fw_outstanding);
+ break;
+ case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
+ mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
+ cmd_mpt->flags = 0;
+ mrsas_release_mpt_cmd(cmd_mpt);
+ break;
+ }
+
+ sc->last_reply_idx++;
+ if (sc->last_reply_idx >= sc->reply_q_depth)
+ sc->last_reply_idx = 0;
+
+ desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
+ num_completed++;
+ threshold_reply_count++;
+
+ /* Get the next reply descriptor */
+ if (!sc->last_reply_idx)
+ desc = sc->reply_desc_mem;
+ else
+ desc++;
+
+ reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+ desc_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+
+ /*
+ * Write to reply post index after completing threshold reply count
+ * and still there are more replies in reply queue pending to be
+ * completed.
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
+ sc->last_reply_idx);
+ threshold_reply_count = 0;
+ }
+ }
+
+ /* No match, just return */
+ if (num_completed == 0)
+ return (DONE);
+
+ /* Clear response interrupt */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
+
+ return(0);
+}
+
+/*
+ * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
+ * It checks the command status and maps the appropriate CAM status for the CCB.
+ */
+void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
+{
+ struct mrsas_softc *sc = cmd->sc;
+ u_int8_t *sense_data;
+
+ switch (status) {
+ case MFI_STAT_OK:
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+ cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
+ if (sense_data) {
+ /* For now just copy 18 bytes back */
+ memcpy(sense_data, cmd->sense, 18);
+ cmd->ccb_ptr->csio.sense_len = 18;
+ cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
+ }
+ break;
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ if (cmd->ccb_ptr->ccb_h.target_lun)
+ cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
+ else
+ cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
+ break;
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ /*send status to CAM layer to retry sending command without
+ * decrementing retry counter*/
+ cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
+ cmd->ccb_ptr->csio.scsi_status = status;
+ }
+ return;
+}
+
+/*
+ * mrsas_alloc_mem: Allocate DMAable memory.
+ * input: Adapter instance soft state
+ *
+ * This function creates the parent DMA tag and allocates DMAable memory.
+ * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
+ * into Kernel virtual address. Callback argument is physical memory address.
+ */
+static int mrsas_alloc_mem(struct mrsas_softc *sc)
+{
+ u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
+ chain_frame_size, evt_detail_size;
+
+ /*
+ * Allocate parent DMA tag
+ */
+ if (bus_dma_tag_create(NULL, /* parent */
+ 1, /* alignment */
+ 0, /* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MRSAS_MAX_IO_SIZE,/* maxsize */
+ MRSAS_MAX_SGL, /* nsegments */
+ MRSAS_MAX_IO_SIZE,/* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->mrsas_parent_tag /* tag */
+ )) {
+ device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Allocate for version buffer
+ */
+ verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ verbuf_size, // maxsize
+ 1, // msegments
+ verbuf_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->verbuf_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
+ BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->verbuf_mem, verbuf_size);
+ if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
+ verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
+ device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * Allocate IO Request Frames
+ */
+ io_req_size = sc->io_frames_alloc_sz;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 16, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ io_req_size, // maxsize
+ 1, // msegments
+ io_req_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->io_request_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
+ BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->io_request_mem, io_req_size);
+ if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
+ sc->io_request_mem, io_req_size, mrsas_addr_cb,
+ &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate Chain Frames
+ */
+ chain_frame_size = sc->chain_frames_alloc_sz;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 4, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ chain_frame_size, // maxsize
+ 1, // msegments
+ chain_frame_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->chain_frame_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
+ BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->chain_frame_mem, chain_frame_size);
+ if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
+ sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
+ &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate Reply Descriptor Array
+ */
+ reply_desc_size = sc->reply_alloc_sz;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 16, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ reply_desc_size, // maxsize
+ 1, // msegments
+ reply_desc_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->reply_desc_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
+ BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
+ sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
+ &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate Sense Buffer Array. Keep in lower 4GB
+ */
+ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 64, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ sense_size, // maxsize
+ 1, // nsegments
+ sense_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->sense_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
+ BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
+ sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
+ BUS_DMA_NOWAIT)){
+ device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
+ return (ENOMEM);
+ }
+
+ /*
+ * Allocate for Event detail structure
+ */
+ evt_detail_size = sizeof(struct mrsas_evt_detail);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ evt_detail_size, // maxsize
+ 1, // msegments
+ evt_detail_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->evt_detail_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
+ BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
+ return (ENOMEM);
+ }
+ bzero(sc->evt_detail_mem, evt_detail_size);
+ if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
+ sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
+ &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
+ return (ENOMEM);
+ }
+
+
+ /*
+ * Create a dma tag for data buffers; size will be the maximum
+ * possible I/O size (280kB).
+ */
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 1, // alignment
+ 0, // boundary
+ BUS_SPACE_MAXADDR, // lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ MRSAS_MAX_IO_SIZE, // maxsize
+ MRSAS_MAX_SGL, // nsegments
+ MRSAS_MAX_IO_SIZE, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ busdma_lock_mutex, // lockfunc
+ &sc->io_lock, // lockfuncarg
+ &sc->data_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
+ return(ENOMEM);
+ }
+
+ return(0);
+}
+
+/*
+ * mrsas_addr_cb: Callback function of bus_dmamap_load()
+ * input: callback argument,
+ * machine dependent type that describes DMA segments,
+ * number of segments,
+ * error code.
+ *
+ * This function is for the driver to receive mapping information resultant
+ * of the bus_dmamap_load(). The information is actually not being used,
+ * but the address is saved anyway.
+ */
+void
+mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
+}
+
+/*
+ * mrsas_setup_raidmap: Set up RAID map.
+ * input: Adapter instance soft state
+ *
+ * Allocate DMA memory for the RAID maps and perform setup.
+ */
+static int mrsas_setup_raidmap(struct mrsas_softc *sc)
+{
+ sc->map_sz = sizeof(MR_FW_RAID_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
+
+ for (int i=0; i < 2; i++)
+ {
+ if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
+ 4, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ sc->map_sz, // maxsize
+ 1, // nsegments
+ sc->map_sz, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->raidmap_tag[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
+ BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
+ sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
+ BUS_DMA_NOWAIT)){
+ device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
+ return (ENOMEM);
+ }
+ if (!sc->raidmap_mem[i]) {
+ device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
+ return (ENOMEM);
+ }
+ }
+
+ if (!mrsas_get_map_info(sc))
+ mrsas_sync_map_info(sc);
+
+ return (0);
+}
+
+/**
+ * mrsas_init_fw: Initialize Firmware
+ * input: Adapter soft state
+ *
+ * Calls transition_to_ready() to make sure Firmware is in operational
+ * state and calls mrsas_init_adapter() to send IOC_INIT command to
+ * Firmware. It issues internal commands to get the controller info
+ * after the IOC_INIT command response is received by Firmware.
+ * Note: code relating to get_pdlist, get_ld_list and max_sectors
+ * are currently not being used, it is left here as placeholder.
+ */
+static int mrsas_init_fw(struct mrsas_softc *sc)
+{
+ u_int32_t max_sectors_1;
+ u_int32_t max_sectors_2;
+ u_int32_t tmp_sectors;
+ struct mrsas_ctrl_info *ctrl_info;
+
+ int ret, ocr = 0;
+
+
+ /* Make sure Firmware is ready */
+ ret = mrsas_transition_to_ready(sc, ocr);
+ if (ret != SUCCESS) {
+ return(ret);
+ }
+
+ /* Get operational params, sge flags, send init cmd to ctlr */
+ if (mrsas_init_adapter(sc) != SUCCESS){
+ device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
+ return(1);
+ }
+
+ /* Allocate internal commands for pass-thru */
+ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
+ device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
+ return(1);
+ }
+
+ if (mrsas_setup_raidmap(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
+ return(1);
+ }
+
+ /* For pass-thru, get PD/LD list and controller info */
+ memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ mrsas_get_pd_list(sc);
+
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
+ mrsas_get_ld_list(sc);
+
+ //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
+
+ ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has two
+ * limits on max sectors. Driver should use the minimum of these two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information
+ * to calculate max_sectors_1. So the number ended up as zero always.
+ */
+ tmp_sectors = 0;
+ if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ ctrl_info->max_strips_per_io;
+ max_sectors_2 = ctrl_info->max_request_size;
+ tmp_sectors = min(max_sectors_1 , max_sectors_2);
+ sc->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ sc->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if(sc->UnevenSpanSupport) {
+ device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
+ sc->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 1;
+ else
+ sc->fast_path_io = 0;
+
+ }
+ }
+ sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
+
+ if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
+ sc->max_sectors_per_req = tmp_sectors;
+
+ if (ctrl_info)
+ free(ctrl_info, M_MRSAS);
+
+ return(0);
+}
+
+/**
+ * mrsas_init_adapter: Initializes the adapter/controller
+ * input: Adapter soft state
+ *
+ * Prepares for the issuing of the IOC Init cmd to FW for initializing the
+ * ROC/controller. The FW register is read to determined the number of
+ * commands that is supported. All memory allocations for IO is based on
+ * max_cmd. Appropriate calculations are performed in this function.
+ */
+int mrsas_init_adapter(struct mrsas_softc *sc)
+{
+ uint32_t status;
+ u_int32_t max_cmd;
+ int ret;
+
+ /* Read FW status register */
+ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+
+ /* Get operational params from status register */
+ sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
+
+ /* Decrement the max supported by 1, to correlate with FW */
+ sc->max_fw_cmds = sc->max_fw_cmds-1;
+ max_cmd = sc->max_fw_cmds;
+
+ /* Determine allocation size of command frames */
+ sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
+ sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
+ sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
+ sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
+ sc->chain_frames_alloc_sz = 1024 * max_cmd;
+ sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+ sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
+ sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
+
+ /* Used for pass thru MFI frame (DCMD) */
+ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+ sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ sizeof(MPI2_SGE_IO_UNION))/16;
+
+ sc->last_reply_idx = 0;
+
+ ret = mrsas_alloc_mem(sc);
+ if (ret != SUCCESS)
+ return(ret);
+
+ ret = mrsas_alloc_mpt_cmds(sc);
+ if (ret != SUCCESS)
+ return(ret);
+
+ ret = mrsas_ioc_init(sc);
+ if (ret != SUCCESS)
+ return(ret);
+
+
+ return(0);
+}
+
+/**
+ * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
+ * input: Adapter soft state
+ *
+ * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
+ */
+int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
+{
+ int ioc_init_size;
+
+ /* Allocate IOC INIT command */
+ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ioc_init_size, // maxsize
+ 1, // msegments
+ ioc_init_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->ioc_init_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
+ BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
+ return (ENOMEM);
+ }
+ bzero(sc->ioc_init_mem, ioc_init_size);
+ if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
+ sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
+ &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+/**
+ * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
+ * input: Adapter soft state
+ *
+ * Deallocates memory of the IOC Init cmd.
+ */
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
+{
+ if (sc->ioc_init_phys_mem)
+ bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
+ if (sc->ioc_init_mem != NULL)
+ bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
+ if (sc->ioc_init_tag != NULL)
+ bus_dma_tag_destroy(sc->ioc_init_tag);
+}
+
+/**
+ * mrsas_ioc_init: Sends IOC Init command to FW
+ * input: Adapter soft state
+ *
+ * Issues the IOC Init cmd to FW to initialize the ROC/controller.
+ */
+int mrsas_ioc_init(struct mrsas_softc *sc)
+{
+ struct mrsas_init_frame *init_frame;
+ pMpi2IOCInitRequest_t IOCInitMsg;
+ MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
+ u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
+ bus_addr_t phys_addr;
+ int i, retcode = 0;
+
+ /* Allocate memory for the IOC INIT command */
+ if (mrsas_alloc_ioc_cmd(sc)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
+ return(1);
+ }
+
+ IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
+ IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
+ IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ IOCInitMsg->MsgVersion = MPI2_VERSION;
+ IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
+ IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
+ IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
+ IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
+ IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
+
+ init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ if (sc->verbuf_mem) {
+ snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
+ MRSAS_VERSION);
+ init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
+ init_frame->driver_ver_hi = 0;
+ }
+
+ phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
+ init_frame->queue_info_new_phys_addr_lo = phys_addr;
+ init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
+
+ req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
+ req_desc.MFAIo.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ mrsas_disable_intr(sc);
+ mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
+ //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
+ mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
+
+ /*
+ * Poll response timer to wait for Firmware response. While this
+ * timer with the DELAY call could block CPU, the time interval for
+ * this is only 1 millisecond.
+ */
+ if (init_frame->cmd_status == 0xFF) {
+ for (i=0; i < (max_wait * 1000); i++){
+ if (init_frame->cmd_status == 0xFF)
+ DELAY(1000);
+ else
+ break;
+ }
+ }
+
+ if (init_frame->cmd_status == 0)
+ mrsas_dprint(sc, MRSAS_OCR,
+ "IOC INIT response received from FW.\n");
+ //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
+ else
+ {
+ if (init_frame->cmd_status == 0xFF)
+ device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
+ else
+ device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
+ retcode = 1;
+ }
+
+ mrsas_free_ioc_cmd(sc);
+ return (retcode);
+}
+
+/**
+ * mrsas_alloc_mpt_cmds: Allocates the command packets
+ * input: Adapter instance soft state
+ *
+ * This function allocates the internal commands for IOs. Each command that is
+ * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
+ * An array is allocated with mrsas_mpt_cmd context. The free commands are
+ * maintained in a linked list (cmd pool). SMID value range is from 1 to
+ * max_fw_cmds.
+ */
+int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
+{
+ int i, j;
+ u_int32_t max_cmd;
+ struct mrsas_mpt_cmd *cmd;
+ pMpi2ReplyDescriptorsUnion_t reply_desc;
+ u_int32_t offset, chain_offset, sense_offset;
+ bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
+ u_int8_t *io_req_base, *chain_frame_base, *sense_base;
+
+ max_cmd = sc->max_fw_cmds;
+
+ sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
+ if (!sc->req_desc) {
+ device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
+ return(ENOMEM);
+ }
+ memset(sc->req_desc, 0, sc->request_alloc_sz);
+
+ /*
+ * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
+ * dynamic array first and then allocate individual commands.
+ */
+ sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
+ if (!sc->mpt_cmd_list) {
+ device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
+ return(ENOMEM);
+ }
+ memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
+ for (i = 0; i < max_cmd; i++) {
+ sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
+ M_MRSAS, M_NOWAIT);
+ if (!sc->mpt_cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ free(sc->mpt_cmd_list[j],M_MRSAS);
+ free(sc->mpt_cmd_list, M_MRSAS);
+ sc->mpt_cmd_list = NULL;
+ return(ENOMEM);
+ }
+ }
+
+ io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
+ chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
+ sense_base = (u_int8_t*)sc->sense_mem;
+ sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
+ for (i = 0; i < max_cmd; i++) {
+ cmd = sc->mpt_cmd_list[i];
+ offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+ chain_offset = 1024 * i;
+ sense_offset = MRSAS_SENSE_LEN * i;
+ memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
+ cmd->index = i + 1;
+ cmd->ccb_ptr = NULL;
+ callout_init(&cmd->cm_callout, 0);
+ cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ cmd->sc = sc;
+ cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
+ memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+ cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
+ cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
+ cmd->sense = sense_base + sense_offset;
+ cmd->sense_phys_addr = sense_base_phys + sense_offset;
+ if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
+ return(FAIL);
+ }
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
+ }
+
+ /* Initialize reply descriptor array to 0xFFFFFFFF */
+ reply_desc = sc->reply_desc_mem;
+ for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
+ reply_desc->Words = MRSAS_ULONG_MAX;
+ }
+ return(0);
+}
+
+/**
+ * mrsas_fire_cmd: Sends command to FW
+ * input: Adapter soft state
+ * request descriptor address low
+ * request descriptor address high
+ *
+ * This functions fires the command to Firmware by writing to the
+ * inbound_low_queue_port and inbound_high_queue_port.
+ */
+void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi)
+{
+ mtx_lock(&sc->pci_lock);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
+ req_desc_lo);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
+ req_desc_hi);
+ mtx_unlock(&sc->pci_lock);
+}
+
+/**
+ * mrsas_transition_to_ready: Move FW to Ready state
+ * input: Adapter instance soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
+{
+ int i;
+ u_int8_t max_wait;
+ u_int32_t val, fw_state;
+ u_int32_t cur_state;
+ u_int32_t abs_state, curr_abs_state;
+
+ val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ fw_state = val & MFI_STATE_MASK;
+ max_wait = MRSAS_RESET_WAIT_TIME;
+
+ if (fw_state != MFI_STATE_READY)
+ device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
+
+ while (fw_state != MFI_STATE_READY) {
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
+ switch (fw_state) {
+ case MFI_STATE_FAULT:
+ device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
+ if (ocr) {
+ cur_state = MFI_STATE_FAULT;
+ break;
+ }
+ else
+ return -ENODEV;
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /* Set the CLR bit in inbound doorbell */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_INIT_HOTPLUG);
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+ case MFI_STATE_OPERATIONAL:
+ /* Bring it to READY state; assuming max wait 10 secs */
+ mrsas_disable_intr(sc);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
+ for (i=0; i < max_wait * 1000; i++) {
+ if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
+ DELAY(1000);
+ else
+ break;
+ }
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+ case MFI_STATE_UNDEFINED:
+ /* This state should not last for more than 2 seconds */
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+ case MFI_STATE_BB_INIT:
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+ case MFI_STATE_FW_INIT:
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+ case MFI_STATE_FW_INIT_2:
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+ case MFI_STATE_DEVICE_SCAN:
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+ case MFI_STATE_FLUSH_CACHE:
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+ default:
+ device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad))& MFI_STATE_MASK);
+ curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ if (abs_state == curr_abs_state)
+ DELAY(1000);
+ else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (curr_abs_state == abs_state) {
+ device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+ }
+ mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
+ //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
+ return 0;
+}
+
+/**
+ * mrsas_get_mfi_cmd: Get a cmd from free command pool
+ * input: Adapter soft state
+ *
+ * This function removes an MFI command from the command list.
+ */
+struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
+{
+ struct mrsas_mfi_cmd *cmd = NULL;
+
+ mtx_lock(&sc->mfi_cmd_pool_lock);
+ if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
+ cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
+ TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
+ }
+ mtx_unlock(&sc->mfi_cmd_pool_lock);
+
+ return cmd;
+}
+
+/**
+ * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
+ * input: Adapter Context.
+ *
+ * This function will check FW status register and flag
+ * do_timeout_reset flag. It will do OCR/Kill adapter if
+ * FW is in fault state or IO timed out has trigger reset.
+ */
+static void
+mrsas_ocr_thread(void *arg)
+{
+ struct mrsas_softc *sc;
+ u_int32_t fw_status, fw_state;
+
+ sc = (struct mrsas_softc *)arg;
+
+ mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
+
+ sc->ocr_thread_active = 1;
+ mtx_lock(&sc->sim_lock);
+ for (;;) {
+ /* Sleep for 1 second and check the queue status*/
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
+ "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Exit due to shutdown from %s\n", __func__);
+ break;
+ }
+ fw_status = mrsas_read_reg(sc,
+ offsetof(mrsas_reg_set, outbound_scratch_pad));
+ fw_state = fw_status & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
+ device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
+ sc->do_timedout_reset?"IO Timeout":
+ "FW fault detected");
+ mtx_lock_spin(&sc->ioctl_lock);
+ sc->reset_in_progress = 1;
+ sc->reset_count++;
+ mtx_unlock_spin(&sc->ioctl_lock);
+ mrsas_xpt_freeze(sc);
+ mrsas_reset_ctrl(sc);
+ mrsas_xpt_release(sc);
+ sc->reset_in_progress = 0;
+ sc->do_timedout_reset = 0;
+ }
+ }
+ mtx_unlock(&sc->sim_lock);
+ sc->ocr_thread_active = 0;
+ mrsas_kproc_exit(0);
+}
+
+/**
+ * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
+ * input: Adapter Context.
+ *
+ * This function will clear reply descriptor so that post OCR
+ * driver and FW will lost old history.
+ */
+void mrsas_reset_reply_desc(struct mrsas_softc *sc)
+{
+ int i;
+ pMpi2ReplyDescriptorsUnion_t reply_desc;
+
+ sc->last_reply_idx = 0;
+ reply_desc = sc->reply_desc_mem;
+ for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
+ reply_desc->Words = MRSAS_ULONG_MAX;
+ }
+}
+
+/**
+ * mrsas_reset_ctrl Core function to OCR/Kill adapter.
+ * input: Adapter Context.
+ *
+ * This function will run from thread context so that it can sleep.
+ * 1. Do not handle OCR if FW is in HW critical error.
+ * 2. Wait for outstanding command to complete for 180 seconds.
+ * 3. If #2 does not find any outstanding command Controller is in working
+ * state, so skip OCR.
+ * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
+ * 4. Start of the OCR, return all SCSI command back to CAM layer which has
+ * ccb_ptr.
+ * 5. Post OCR, Re-fire Managment command and move Controller to Operation
+ * state.
+ */
+int mrsas_reset_ctrl(struct mrsas_softc *sc)
+{
+ int retval = SUCCESS, i, j, retry = 0;
+ u_int32_t host_diag, abs_state, status_reg, reset_adapter;
+ union ccb *ccb;
+ struct mrsas_mfi_cmd *mfi_cmd;
+ struct mrsas_mpt_cmd *mpt_cmd;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
+ device_printf(sc->mrsas_dev,
+ "mrsas: Hardware critical error, returning FAIL.\n");
+ return FAIL;
+ }
+
+ set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
+ mrsas_disable_intr(sc);
+ DELAY(1000 * 1000);
+
+ /* First try waiting for commands to complete */
+ if (mrsas_wait_for_outstanding(sc)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "resetting adapter from %s.\n",
+ __func__);
+ /* Now return commands back to the CAM layer */
+ for (i = 0 ; i < sc->max_fw_cmds; i++) {
+ mpt_cmd = sc->mpt_cmd_list[i];
+ if (mpt_cmd->ccb_ptr) {
+ ccb = (union ccb *)(mpt_cmd->ccb_ptr);
+ ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+ mrsas_cmd_done(sc, mpt_cmd);
+ atomic_dec(&sc->fw_outstanding);
+ }
+ }
+
+ status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad));
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (sc->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+ /* Reset not supported, kill adapter */
+ mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
+ mrsas_kill_hba(sc);
+ sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
+ retval = FAIL;
+ goto out;
+ }
+
+ /* Now try to reset the chip */
+ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_FLUSH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_1ST_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_2ND_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_3RD_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_4TH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_5TH_KEY_VALUE);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
+ MPI2_WRSEQ_6TH_KEY_VALUE);
+
+ /* Check that the diag write enable (DRWE) bit is on */
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ retry = 0;
+ while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+ DELAY(100 * 1000);
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ if (retry++ == 100) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Host diag unlock failed!\n");
+ break;
+ }
+ }
+ if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+ continue;
+
+ /* Send chip reset command */
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
+ host_diag | HOST_DIAG_RESET_ADAPTER);
+ DELAY(3000 * 1000);
+
+ /* Make sure reset adapter bit is cleared */
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ retry = 0;
+ while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+ DELAY(100 * 1000);
+ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ fusion_host_diag));
+ if (retry++ == 1000) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Diag reset adapter never cleared!\n");
+ break;
+ }
+ }
+ if (host_diag & HOST_DIAG_RESET_ADAPTER)
+ continue;
+
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ retry = 0;
+
+ while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
+ DELAY(100 * 1000);
+ abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ }
+ if (abs_state <= MFI_STATE_FW_INIT) {
+ mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
+ " state = 0x%x\n", abs_state);
+ continue;
+ }
+
+ /* Wait for FW to become ready */
+ if (mrsas_transition_to_ready(sc, 1)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "mrsas: Failed to transition controller to ready.\n");
+ continue;
+ }
+
+ mrsas_reset_reply_desc(sc);
+ if (mrsas_ioc_init(sc)) {
+ mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
+ continue;
+ }
+
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_enable_intr(sc);
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+
+ /* Re-fire management commands */
+ for (j = 0 ; j < sc->max_fw_cmds; j++) {
+ mpt_cmd = sc->mpt_cmd_list[j];
+ if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
+ mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
+ if (mfi_cmd->frame->dcmd.opcode ==
+ MR_DCMD_LD_MAP_GET_INFO) {
+ mrsas_release_mfi_cmd(mfi_cmd);
+ mrsas_release_mpt_cmd(mpt_cmd);
+ } else {
+ req_desc = mrsas_get_request_desc(sc,
+ mfi_cmd->cmd_id.context.smid - 1);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Re-fire command DCMD opcode 0x%x index %d\n ",
+ mfi_cmd->frame->dcmd.opcode, j);
+ if (!req_desc)
+ device_printf(sc->mrsas_dev,
+ "Cannot build MPT cmd.\n");
+ else
+ mrsas_fire_cmd(sc, req_desc->addr.u.low,
+ req_desc->addr.u.high);
+ }
+ }
+ }
+
+ /* Reset load balance info */
+ memset(sc->load_balance_info, 0,
+ sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
+
+ if (!mrsas_get_map_info(sc))
+ mrsas_sync_map_info(sc);
+
+ /* Adapter reset completed successfully */
+ device_printf(sc->mrsas_dev, "Reset successful\n");
+ retval = SUCCESS;
+ goto out;
+ }
+ /* Reset failed, kill the adapter */
+ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
+ mrsas_kill_hba(sc);
+ retval = FAIL;
+ } else {
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_enable_intr(sc);
+ sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ }
+out:
+ clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Reset Exit with %d.\n", retval);
+ return retval;
+}
+
+/**
+ * mrsas_kill_hba Kill HBA when OCR is not supported.
+ * input: Adapter Context.
+ *
+ * This function will kill HBA when OCR is not supported.
+ */
+void mrsas_kill_hba (struct mrsas_softc *sc)
+{
+ mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
+ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
+ MFI_STOP_ADP);
+ /* Flush */
+ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
+}
+
+/**
+ * mrsas_wait_for_outstanding Wait for outstanding commands
+ * input: Adapter Context.
+ *
+ * This function will wait for 180 seconds for outstanding
+ * commands to be completed.
+ */
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
+{
+ int i, outstanding, retval = 0;
+ u_int32_t fw_state;
+
+ for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
+ if (sc->remove_in_progress) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Driver remove or shutdown called.\n");
+ retval = 1;
+ goto out;
+ }
+ /* Check if firmware is in fault state */
+ fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad)) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "Found FW in FAULT state, will reset adapter.\n");
+ retval = 1;
+ goto out;
+ }
+ outstanding = atomic_read(&sc->fw_outstanding);
+ if (!outstanding)
+ goto out;
+
+ if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
+ mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
+ "commands to complete\n",i,outstanding);
+ mrsas_complete_cmd(sc);
+ }
+ DELAY(1000 * 1000);
+ }
+
+ if (atomic_read(&sc->fw_outstanding)) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ " pending commands remain after waiting,"
+ " will reset adapter.\n");
+ retval = 1;
+ }
+out:
+ return retval;
+}
+
+/**
+ * mrsas_release_mfi_cmd: Return a cmd to free command pool
+ * input: Command packet for return to free cmd pool
+ *
+ * This function returns the MFI command to the command list.
+ */
+void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
+{
+ struct mrsas_softc *sc = cmd->sc;
+
+ mtx_lock(&sc->mfi_cmd_pool_lock);
+ cmd->ccb_ptr = NULL;
+ cmd->cmd_id.frame_count = 0;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ mtx_unlock(&sc->mfi_cmd_pool_lock);
+
+ return;
+}
+
+/**
+ * mrsas_get_controller_info - Returns FW's controller structure
+ * input: Adapter soft state
+ * Controller information structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
+ struct mrsas_ctrl_info *ctrl_info)
+{
+ int retcode = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return -ENOMEM;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
+ dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
+
+ if (!mrsas_issue_polled(sc, cmd))
+ memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ else
+ retcode = 1;
+
+ mrsas_free_ctlr_info_cmd(sc);
+ mrsas_release_mfi_cmd(cmd);
+ return(retcode);
+}
+
+/**
+ * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
+ * input: Adapter soft state
+ *
+ * Allocates DMAable memory for the controller info internal command.
+ */
+int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
+{
+ int ctlr_info_size;
+
+ /* Allocate get controller info command */
+ ctlr_info_size = sizeof(struct mrsas_ctrl_info);
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ctlr_info_size, // maxsize
+ 1, // msegments
+ ctlr_info_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->ctlr_info_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
+ BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
+ sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
+ &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
+ return (ENOMEM);
+ }
+
+ memset(sc->ctlr_info_mem, 0, ctlr_info_size);
+ return (0);
+}
+
+/**
+ * mrsas_free_ctlr_info_cmd: Free memory for controller info command
+ * input: Adapter soft state
+ *
+ * Deallocates memory of the get controller info cmd.
+ */
+void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
+{
+ if (sc->ctlr_info_phys_addr)
+ bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
+ if (sc->ctlr_info_mem != NULL)
+ bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
+ if (sc->ctlr_info_tag != NULL)
+ bus_dma_tag_destroy(sc->ctlr_info_tag);
+}
+
+/**
+ * mrsas_issue_polled: Issues a polling command
+ * inputs: Adapter soft state
+ * Command packet to be issued
+ *
+ * This function is for posting of internal commands to Firmware. MFI
+ * requires the cmd_status to be set to 0xFF before posting. The maximun
+ * wait time of the poll response timer is 180 seconds.
+ */
+int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ struct mrsas_header *frame_hdr = &cmd->frame->hdr;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+ int i, retcode = 0;
+
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ /* Issue the frame using inbound queue port */
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
+ return(1);
+ }
+
+ /*
+ * Poll response timer to wait for Firmware response. While this
+ * timer with the DELAY call could block CPU, the time interval for
+ * this is only 1 millisecond.
+ */
+ if (frame_hdr->cmd_status == 0xFF) {
+ for (i=0; i < (max_wait * 1000); i++){
+ if (frame_hdr->cmd_status == 0xFF)
+ DELAY(1000);
+ else
+ break;
+ }
+ }
+ if (frame_hdr->cmd_status != 0)
+ {
+ if (frame_hdr->cmd_status == 0xFF)
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
+ else
+ device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
+ retcode = 1;
+ }
+ return(retcode);
+}
+
+/**
+ * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
+ * input: Adapter soft state
+ * mfi cmd pointer
+ *
+ * This function is called by mrsas_issued_blocked_cmd() and
+ * mrsas_issued_polled(), to build the MPT command and then fire the
+ * command to Firmware.
+ */
+int
+mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ req_desc = mrsas_build_mpt_cmd(sc, cmd);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
+ return(1);
+ }
+
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+
+ return(0);
+}
+
+/**
+ * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
+ * input: Adapter soft state
+ * mfi cmd to build
+ *
+ * This function is called by mrsas_issue_cmd() to build the MPT-MFI
+ * passthru command and prepares the MPT command to send to Firmware.
+ */
+MRSAS_REQUEST_DESCRIPTOR_UNION *
+mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u_int16_t index;
+
+ if (mrsas_build_mptmfi_passthru(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
+ return NULL;
+ }
+
+ index = cmd->cmd_id.context.smid;
+
+ req_desc = mrsas_get_request_desc(sc, index-1);
+ if(!req_desc)
+ return NULL;
+
+ req_desc->addr.Words = 0;
+ req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ req_desc->SCSIIO.SMID = index;
+
+ return(req_desc);
+}
+
+/**
+ * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
+ * input: Adapter soft state
+ * mfi cmd pointer
+ *
+ * The MPT command and the io_request are setup as a passthru command.
+ * The SGE chain address is set to frame_phys_addr of the MFI command.
+ */
+u_int8_t
+mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
+{
+ MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
+ struct mrsas_mpt_cmd *mpt_cmd;
+ struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
+
+ mpt_cmd = mrsas_get_mpt_cmd(sc);
+ if (!mpt_cmd)
+ return(1);
+
+ /* Save the smid. To be used for returning the cmd */
+ mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
+
+ mpt_cmd->sync_cmd_idx = mfi_cmd->index;
+
+ /*
+ * For cmds where the flag is set, store the flag and check
+ * on completion. For cmds with this flag, don't call
+ * mrsas_complete_cmd.
+ */
+
+ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
+ mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ io_req = mpt_cmd->io_request;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
+ sgl_ptr_end += sc->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+ io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
+ io_req->ChainOffset = sc->chain_offset_mfi_pthru;
+
+ mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+
+ mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+ mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
+
+ return(0);
+}
+
+/**
+ * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
+ * input: Adapter soft state
+ * Command to be issued
+ *
+ * This function waits on an event for the command to be returned
+ * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
+ * Used for issuing internal and ioctl commands.
+ */
+int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+ unsigned long total_time = 0;
+ int retcode = 0;
+
+ /* Initialize cmd_status */
+ cmd->cmd_status = ECONNREFUSED;
+
+ /* Build MPT-MFI command for issue to FW */
+ if (mrsas_issue_dcmd(sc, cmd)){
+ device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
+ return(1);
+ }
+
+ sc->chan = (void*)&cmd;
+
+ /* The following is for debug only... */
+ //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
+ //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
+
+ while (1) {
+ if (cmd->cmd_status == ECONNREFUSED){
+ tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
+ }
+ else
+ break;
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
+ retcode = 1;
+ break;
+ }
+ }
+ return(retcode);
+}
+
+/**
+ * mrsas_complete_mptmfi_passthru - Completes a command
+ * input: sc: Adapter soft state
+ * cmd: Command to be completed
+ * status: cmd completion status
+ *
+ * This function is called from mrsas_complete_cmd() after an interrupt
+ * is received from Firmware, and io_request->Function is
+ * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
+ */
+void
+mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
+ u_int8_t status)
+{
+ struct mrsas_header *hdr = &cmd->frame->hdr;
+ u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
+
+ /* Reset the retry counter for future re-tries */
+ cmd->retry_for_fw_reset = 0;
+
+ if (cmd->ccb_ptr)
+ cmd->ccb_ptr = NULL;
+
+ switch (hdr->cmd) {
+ case MFI_CMD_INVALID:
+ device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ mrsas_wakeup(sc, cmd);
+ break;
+ }
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+ /* Check for LD map update */
+ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
+ (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ sc->fast_path_io = 0;
+ mtx_lock(&sc->raidmap_lock);
+ if (cmd_status != 0) {
+ if (cmd_status != MFI_STAT_NOT_FOUND)
+ device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
+ else {
+ mrsas_release_mfi_cmd(cmd);
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
+ }
+ else
+ sc->map_id++;
+ mrsas_release_mfi_cmd(cmd);
+ if (MR_ValidateMapInfo(sc))
+ sc->fast_path_io = 0;
+ else
+ sc->fast_path_io = 1;
+ mrsas_sync_map_info(sc);
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
+#if 0 //currently not supporting event handling, so commenting out
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+ mrsas_poll_wait_aen = 0;
+ }
+#endif
+ /* See if got an event notification */
+ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mrsas_complete_aen(sc, cmd);
+ else
+ mrsas_wakeup(sc, cmd);
+ break;
+ case MFI_CMD_ABORT:
+ /* Command issued to abort another cmd return */
+ mrsas_complete_abort(sc, cmd);
+ break;
+ default:
+ device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
+ break;
+ }
+}
+
+/**
+ * mrsas_wakeup - Completes an internal command
+ * input: Adapter soft state
+ * Command to be completed
+ *
+ * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
+ * a wait timer is started. This function is called from
+ * mrsas_complete_mptmfi_passthru() as it completes the command,
+ * to wake up from the command wait.
+ */
+void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ cmd->cmd_status = cmd->frame->io.cmd_status;
+
+ if (cmd->cmd_status == ECONNREFUSED)
+ cmd->cmd_status = 0;
+
+ /* For debug only ... */
+ //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
+
+ sc->chan = (void*)&cmd;
+ wakeup_one((void *)&sc->chan);
+ return;
+}
+
+/**
+ * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
+ * input: Adapter soft state
+ * Shutdown/Hibernate
+ *
+ * This function issues a DCMD internal command to Firmware to initiate
+ * shutdown of the controller.
+ */
+static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
+ return;
+ }
+
+ if (sc->aen_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
+
+ if (sc->map_update_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
+
+ dcmd = &cmd->frame->dcmd;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = opcode;
+
+ device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+ mrsas_release_mfi_cmd(cmd);
+
+ return;
+}
+
+/**
+ * mrsas_flush_cache: Requests FW to flush all its caches
+ * input: Adapter soft state
+ *
+ * This function is issues a DCMD internal command to Firmware to initiate
+ * flushing of all caches.
+ */
+static void mrsas_flush_cache(struct mrsas_softc *sc)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+
+ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
+ return;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = MFI_FRAME_DIR_NONE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ mrsas_issue_blocked_cmd(sc, cmd);
+ mrsas_release_mfi_cmd(cmd);
+
+ return;
+}
+
+/**
+ * mrsas_get_map_info: Load and validate RAID map
+ * input: Adapter instance soft state
+ *
+ * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
+ * to load and validate RAID map. It returns 0 if successful, 1 other-
+ * wise.
+ */
+static int mrsas_get_map_info(struct mrsas_softc *sc)
+{
+ uint8_t retcode = 0;
+
+ sc->fast_path_io = 0;
+ if (!mrsas_get_ld_map_info(sc)) {
+ retcode = MR_ValidateMapInfo(sc);
+ if (retcode == 0) {
+ sc->fast_path_io = 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * mrsas_get_ld_map_info: Get FW's ld_map structure
+ * input: Adapter instance soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.
+ */
+static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
+{
+ int retcode = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ MR_FW_RAID_MAP_ALL *map;
+ bus_addr_t map_phys_addr = 0;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
+ return 1;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ map = sc->raidmap_mem[(sc->map_id & 1)];
+ map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
+ if (!map) {
+ device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ }
+ memset(map, 0, sizeof(*map));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sc->map_sz;
+ dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
+ dcmd->sgl.sge32[0].length = sc->map_sz;
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ {
+ device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
+ retcode = 1;
+ }
+ mrsas_release_mfi_cmd(cmd);
+ return(retcode);
+}
+
+/**
+ * mrsas_sync_map_info: Get FW's ld_map structure
+ * input: Adapter instance soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.
+ */
+static int mrsas_sync_map_info(struct mrsas_softc *sc)
+{
+ int retcode = 0, i;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ uint32_t size_sync_info, num_lds;
+ MR_LD_TARGET_SYNC *target_map = NULL;
+ MR_FW_RAID_MAP_ALL *map;
+ MR_LD_RAID *raid;
+ MR_LD_TARGET_SYNC *ld_sync;
+ bus_addr_t map_phys_addr = 0;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
+ return 1;
+ }
+
+ map = sc->raidmap_mem[sc->map_id & 1];
+ num_lds = map->raidMap.ldCount;
+
+ dcmd = &cmd->frame->dcmd;
+ size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
+ memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
+
+ map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
+
+ ld_sync = (MR_LD_TARGET_SYNC *)target_map;
+
+ for (i = 0; i < num_lds; i++, ld_sync++) {
+ raid = MR_LdRaidGet(i, map);
+ ld_sync->targetId = MR_GetLDTgtId(i, map);
+ ld_sync->seqNum = raid->seqNum;
+ }
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sc->map_sz;
+ dcmd->mbox.b[0] = num_lds;
+ dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+ dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
+ dcmd->sgl.sge32[0].length = sc->map_sz;
+
+ sc->map_update_cmd = cmd;
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
+ return(1);
+ }
+ return(retcode);
+}
+
+/**
+ * mrsas_get_pd_list: Returns FW's PD list structure
+ * input: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out about
+ * system supported by Firmware.
+ */
+static int mrsas_get_pd_list(struct mrsas_softc *sc)
+{
+ int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ struct MR_PD_LIST *pd_list_mem;
+ struct MR_PD_ADDRESS *pd_addr;
+ bus_addr_t pd_list_phys_addr = 0;
+ struct mrsas_tmp_dcmd *tcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
+ return 1;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
+ pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return(ENOMEM);
+ }
+ else {
+ pd_list_mem = tcmd->tmp_dcmd_mem;
+ pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ dcmd->mbox.b[1] = 0;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+ dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
+ dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
+ dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ retcode = 1;
+
+ /* Get the instance PD list */
+ pd_count = MRSAS_MAX_PD;
+ pd_addr = pd_list_mem->addr;
+ if (retcode == 0 && pd_list_mem->count < pd_count) {
+ memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
+ sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
+ sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
+ sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
+ pd_addr++;
+ }
+ }
+
+ /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
+ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+ mrsas_free_tmp_dcmd(tcmd);
+ mrsas_release_mfi_cmd(cmd);
+ free(tcmd, M_MRSAS);
+ return(retcode);
+}
+
+/**
+ * mrsas_get_ld_list: Returns FW's LD list structure
+ * input: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out about
+ * supported by the FW.
+ */
+static int mrsas_get_ld_list(struct mrsas_softc *sc)
+{
+ int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ld_list_mem;
+ bus_addr_t ld_list_phys_addr = 0;
+ struct mrsas_tmp_dcmd *tcmd;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
+ return 1;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
+ ld_list_size = sizeof(struct MR_LD_LIST);
+ if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
+ mrsas_release_mfi_cmd(cmd);
+ return(ENOMEM);
+ }
+ else {
+ ld_list_mem = tcmd->tmp_dcmd_mem;
+ ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
+ }
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
+ dcmd->opcode = MR_DCMD_LD_GET_LIST;
+ dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+ dcmd->pad_0 = 0;
+
+ if (!mrsas_issue_polled(sc, cmd))
+ retcode = 0;
+ else
+ retcode = 1;
+
+ /* Get the instance LD list */
+ if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
+ sc->CurLdCount = ld_list_mem->ldCount;
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
+ for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
+ if (ld_list_mem->ldList[ld_index].state != 0) {
+ ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
+ sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
+ }
+ }
+ }
+
+ mrsas_free_tmp_dcmd(tcmd);
+ mrsas_release_mfi_cmd(cmd);
+ free(tcmd, M_MRSAS);
+ return(retcode);
+}
+
+/**
+ * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
+ * input: Adapter soft state
+ * Temp command
+ * Size of alloction
+ *
+ * Allocates DMAable memory for a temporary internal command. The allocated
+ * memory is initialized to all zeros upon successful loading of the dma
+ * mapped memory.
+ */
+int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
+ int size)
+{
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ size, // maxsize
+ 1, // msegments
+ size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &tcmd->tmp_dcmd_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
+ BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
+ tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
+ &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
+ return (ENOMEM);
+ }
+
+ memset(tcmd->tmp_dcmd_mem, 0, size);
+ return (0);
+}
+
+/**
+ * mrsas_free_tmp_dcmd: Free memory for temporary command
+ * input: temporary dcmd pointer
+ *
+ * Deallocates memory of the temporary command for use in the construction
+ * of the internal DCMD.
+ */
+void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
+{
+ if (tmp->tmp_dcmd_phys_addr)
+ bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
+ if (tmp->tmp_dcmd_mem != NULL)
+ bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
+ if (tmp->tmp_dcmd_tag != NULL)
+ bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
+}
+
+/**
+ * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
+ * input: Adapter soft state
+ * Previously issued cmd to be aborted
+ *
+ * This function is used to abort previously issued commands, such as AEN and
+ * RAID map sync map commands. The abort command is sent as a DCMD internal
+ * command and subsequently the driver will wait for a return status. The
+ * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
+ */
+static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd_to_abort)
+{
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_abort_frame *abort_fr;
+ u_int8_t retcode = 0;
+ unsigned long total_time = 0;
+ u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
+ return(1);
+ }
+
+ abort_fr = &cmd->frame->abort;
+
+ /* Prepare and issue the abort frame */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = 0;
+ abort_fr->abort_context = cmd_to_abort->index;
+ abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
+ abort_fr->abort_mfi_phys_addr_hi = 0;
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = 0xFF;
+
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
+ return(1);
+ }
+
+ /* Wait for this cmd to complete */
+ sc->chan = (void*)&cmd;
+ while (1) {
+ if (cmd->cmd_status == 0xFF){
+ tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
+ }
+ else
+ break;
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
+ retcode = 1;
+ break;
+ }
+ }
+
+ cmd->sync_cmd = 0;
+ mrsas_release_mfi_cmd(cmd);
+ return(retcode);
+}
+
+/**
+ * mrsas_complete_abort: Completes aborting a command
+ * input: Adapter soft state
+ * Cmd that was issued to abort another cmd
+ *
+ * The mrsas_issue_blocked_abort_cmd() function waits for the command status
+ * to change after sending the command. This function is called from
+ * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
+ */
+void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ sc->chan = (void*)&cmd;
+ wakeup_one((void *)&sc->chan);
+ }
+ return;
+}
+
+/**
+ * mrsas_aen_handler: Callback function for AEN processing from thread context.
+ * input: Adapter soft state
+ *
+ */
+void mrsas_aen_handler(struct mrsas_softc *sc)
+{
+ union mrsas_evt_class_locale class_locale;
+ int doscan = 0;
+ u_int32_t seq_num;
+ int error;
+
+ if (!sc) {
+ device_printf(sc->mrsas_dev, "invalid instance!\n");
+ return;
+ }
+
+ if (sc->evt_detail_mem) {
+ switch (sc->evt_detail_mem->code) {
+ case MR_EVT_PD_INSERTED:
+ mrsas_get_pd_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ doscan = 0;
+ break;
+ case MR_EVT_PD_REMOVED:
+ mrsas_get_pd_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ doscan = 0;
+ break;
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ doscan = 0;
+ break;
+ case MR_EVT_LD_CREATED:
+ mrsas_get_ld_list(sc);
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ doscan = 0;
+ break;
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ doscan = 1;
+ break;
+ default:
+ doscan = 0;
+ break;
+ }
+ } else {
+ device_printf(sc->mrsas_dev, "invalid evt_detail\n");
+ return;
+ }
+ if (doscan) {
+ mrsas_get_pd_list(sc);
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ mrsas_get_ld_list(sc);
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ }
+
+ seq_num = sc->evt_detail_mem->seq_num + 1;
+
+ // Register AEN with FW for latest sequence number plus 1
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ if (sc->aen_cmd != NULL )
+ return ;
+
+ mtx_lock(&sc->aen_lock);
+ error = mrsas_register_aen(sc, seq_num,
+ class_locale.word);
+ mtx_unlock(&sc->aen_lock);
+
+ if (error)
+ device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
+
+}
+
+
+/**
+ * mrsas_complete_aen: Completes AEN command
+ * input: Adapter soft state
+ * Cmd that was issued to abort another cmd
+ *
+ * This function will be called from ISR and will continue
+ * event processing from thread context by enqueuing task
+ * in ev_tq (callback function "mrsas_aen_handler").
+ */
+void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ /*
+ * Don't signal app if it is just an aborted previously registered aen
+ */
+ if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
+ /* TO DO (?) */
+ }
+ else
+ cmd->abort_aen = 0;
+
+ sc->aen_cmd = NULL;
+ mrsas_release_mfi_cmd(cmd);
+
+ if (!sc->remove_in_progress)
+ taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
+
+ return;
+}
+
+static device_method_t mrsas_methods[] = {
+ DEVMETHOD(device_probe, mrsas_probe),
+ DEVMETHOD(device_attach, mrsas_attach),
+ DEVMETHOD(device_detach, mrsas_detach),
+ DEVMETHOD(device_suspend, mrsas_suspend),
+ DEVMETHOD(device_resume, mrsas_resume),
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_driver_added, bus_generic_driver_added),
+ { 0, 0 }
+};
+
+static driver_t mrsas_driver = {
+ "mrsas",
+ mrsas_methods,
+ sizeof(struct mrsas_softc)
+};
+
+static devclass_t mrsas_devclass;
+DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
+MODULE_DEPEND(mrsas, cam, 1,1,1);
+
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
new file mode 100644
index 0000000000000..6ec78915ff748
--- /dev/null
+++ b/sys/dev/mrsas/mrsas.h
@@ -0,0 +1,2464 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Authors: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef MRSAS_H
+#define MRSAS_H
+
+#include <sys/param.h> /* defines used in kernel.h */
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/errno.h>
+#include <sys/kernel.h> /* types used in module initialization */
+#include <sys/conf.h> /* cdevsw struct */
+#include <sys/uio.h> /* uio struct */
+#include <sys/malloc.h>
+#include <sys/bus.h> /* structs, prototypes for pci bus stuff */
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include <dev/pci/pcivar.h> /* For pci_get macros! */
+#include <dev/pci/pcireg.h>
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/stat.h>
+#include <sys/taskqueue.h>
+#include <sys/poll.h>
+#include <sys/selinfo.h>
+
+/*
+ * Device IDs and PCI
+ */
+#define MRSAS_TBOLT 0x005b
+#define MRSAS_INVADER 0x005d
+#define MRSAS_FURY 0x005f
+#define MRSAS_PCI_BAR0 0x10
+#define MRSAS_PCI_BAR1 0x14
+#define MRSAS_PCI_BAR2 0x1C
+
+/*
+ * Firmware State Defines
+ */
+#define MRSAS_FWSTATE_MAXCMD_MASK 0x0000FFFF
+#define MRSAS_FWSTATE_SGE_MASK 0x00FF0000
+#define MRSAS_FW_STATE_CHNG_INTERRUPT 1
+
+/*
+ * Message Frame Defines
+ */
+#define MRSAS_SENSE_LEN 96
+#define MRSAS_FUSION_MAX_RESET_TRIES 3
+
+/*
+ * Miscellaneous Defines
+ */
+#define BYTE_ALIGNMENT 1
+#define MRSAS_MAX_NAME_LENGTH 32
+#define MRSAS_VERSION "06.704.01.00-fbsd"
+#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
+#define MRSAS_DEFAULT_TIMEOUT 0x14 //temp
+#define DONE 0
+#define MRSAS_PAGE_SIZE 4096
+#define MRSAS_RESET_NOTICE_INTERVAL 5
+#define MRSAS_IO_TIMEOUT 180000 /* 180 second timeout */
+#define MRSAS_LDIO_QUEUE_DEPTH 70 /* 70 percent as default */
+#define THRESHOLD_REPLY_COUNT 50
+
+/*
+ Boolean types
+*/
+#if (__FreeBSD_version < 901000)
+ typedef enum _boolean { false, true } boolean;
+#endif
+enum err { SUCCESS, FAIL };
+
+MALLOC_DECLARE(M_MRSAS);
+SYSCTL_DECL(_hw_mrsas);
+
+#define MRSAS_INFO (1 << 0)
+#define MRSAS_TRACE (1 << 1)
+#define MRSAS_FAULT (1 << 2)
+#define MRSAS_OCR (1 << 3)
+#define MRSAS_TOUT MRSAS_OCR
+#define MRSAS_AEN (1 << 4)
+#define MRSAS_PRL11 (1 << 5)
+
+#define mrsas_dprint(sc, level, msg, args...) \
+do { \
+ if (sc->mrsas_debug & level) \
+ device_printf(sc->mrsas_dev, msg, ##args); \
+} while (0)
+
+
+/****************************************************************************
+ * Raid Context structure which describes MegaRAID specific IO Paramenters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ ****************************************************************************/
+
+typedef struct _RAID_CONTEXT {
+ u_int8_t Type:4; // 0x00
+ u_int8_t nseg:4; // 0x00
+ u_int8_t resvd0; // 0x01
+ u_int16_t timeoutValue; // 0x02 -0x03
+ u_int8_t regLockFlags; // 0x04
+ u_int8_t resvd1; // 0x05
+ u_int16_t VirtualDiskTgtId; // 0x06 -0x07
+ u_int64_t regLockRowLBA; // 0x08 - 0x0F
+ u_int32_t regLockLength; // 0x10 - 0x13
+ u_int16_t nextLMId; // 0x14 - 0x15
+ u_int8_t exStatus; // 0x16
+ u_int8_t status; // 0x17 status
+ u_int8_t RAIDFlags; // 0x18 resvd[7:6],ioSubType[5:4],resvd[3:1],preferredCpu[0]
+ u_int8_t numSGE; // 0x19 numSge; not including chain entries
+ u_int16_t configSeqNum; // 0x1A -0x1B
+ u_int8_t spanArm; // 0x1C span[7:5], arm[4:0]
+ u_int8_t resvd2[3]; // 0x1D-0x1f
+} RAID_CONTEXT;
+
+
+/*************************************************************************
+ * MPI2 Defines
+ ************************************************************************/
+
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+#ifndef MPI2_POINTER
+#define MPI2_POINTER *
+#endif
+
+
+/***************************************
+ * MPI2 Structures
+ ***************************************/
+
+typedef struct _MPI25_IEEE_SGE_CHAIN64
+{
+ u_int64_t Address;
+ u_int32_t Length;
+ u_int16_t Reserved1;
+ u_int8_t NextChainOffset;
+ u_int8_t Flags;
+} MPI25_IEEE_SGE_CHAIN64, MPI2_POINTER PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t, MPI2_POINTER pMpi25IeeeSgeChain64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION
+{
+ u_int32_t FlagsLength;
+ union
+ {
+ u_int32_t Address32;
+ u_int64_t Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t, MPI2_POINTER pMpi2SGESimpleUnion_t;
+
+typedef struct
+{
+ u_int8_t CDB[20]; /* 0x00 */
+ u_int32_t PrimaryReferenceTag; /* 0x14 */
+ u_int16_t PrimaryApplicationTag; /* 0x18 */
+ u_int16_t PrimaryApplicationTagMask; /* 0x1A */
+ u_int32_t TransferLength; /* 0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, MPI2_POINTER pMpi2ScsiIoCdbEedp32_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION
+{
+ u_int16_t Length;
+ u_int8_t NextChainOffset;
+ u_int8_t Flags;
+ union
+ {
+ u_int32_t Address32;
+ u_int64_t Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t, MPI2_POINTER pMpi2SGEChainUnion_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE32
+{
+ u_int32_t Address;
+ u_int32_t FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, MPI2_POINTER pMpi2IeeeSgeSimple32_t;
+typedef struct _MPI2_IEEE_SGE_SIMPLE64
+{
+ u_int64_t Address;
+ u_int32_t Length;
+ u_int16_t Reserved1;
+ u_int8_t Reserved2;
+ u_int8_t Flags;
+} MPI2_IEEE_SGE_SIMPLE64, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, MPI2_POINTER pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION
+{
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t, MPI2_POINTER pMpi2IeeeSgeSimpleUnion_t;
+
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION
+{
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION, MPI2_POINTER PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t, MPI2_POINTER pMpi2IeeeSgeChainUnion_t;
+
+typedef union _MPI2_SGE_IO_UNION
+{
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, MPI2_POINTER PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, MPI2_POINTER pMpi2SGEIOUnion_t;
+
+typedef union
+{
+ u_int8_t CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, MPI2_POINTER PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, MPI2_POINTER pMpi2ScsiIoCdb_t;
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ */
+typedef struct _MPI2_RAID_SCSI_IO_REQUEST
+{
+ u_int16_t DevHandle; /* 0x00 */
+ u_int8_t ChainOffset; /* 0x02 */
+ u_int8_t Function; /* 0x03 */
+ u_int16_t Reserved1; /* 0x04 */
+ u_int8_t Reserved2; /* 0x06 */
+ u_int8_t MsgFlags; /* 0x07 */
+ u_int8_t VP_ID; /* 0x08 */
+ u_int8_t VF_ID; /* 0x09 */
+ u_int16_t Reserved3; /* 0x0A */
+ u_int32_t SenseBufferLowAddress; /* 0x0C */
+ u_int16_t SGLFlags; /* 0x10 */
+ u_int8_t SenseBufferLength; /* 0x12 */
+ u_int8_t Reserved4; /* 0x13 */
+ u_int8_t SGLOffset0; /* 0x14 */
+ u_int8_t SGLOffset1; /* 0x15 */
+ u_int8_t SGLOffset2; /* 0x16 */
+ u_int8_t SGLOffset3; /* 0x17 */
+ u_int32_t SkipCount; /* 0x18 */
+ u_int32_t DataLength; /* 0x1C */
+ u_int32_t BidirectionalDataLength; /* 0x20 */
+ u_int16_t IoFlags; /* 0x24 */
+ u_int16_t EEDPFlags; /* 0x26 */
+ u_int32_t EEDPBlockSize; /* 0x28 */
+ u_int32_t SecondaryReferenceTag; /* 0x2C */
+ u_int16_t SecondaryApplicationTag; /* 0x30 */
+ u_int16_t ApplicationTagTranslationMask; /* 0x32 */
+ u_int8_t LUN[8]; /* 0x34 */
+ u_int32_t Control; /* 0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ RAID_CONTEXT RaidContext; /* 0x60 */
+ MPI2_SGE_IO_UNION SGL; /* 0x80 */
+} MRSAS_RAID_SCSI_IO_REQUEST, MPI2_POINTER PTR_MRSAS_RAID_SCSI_IO_REQUEST,
+ MRSASRaidSCSIIORequest_t, MPI2_POINTER pMRSASRaidSCSIIORequest_t;
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+typedef struct _MRSAS_RAID_MFA_IO_DESCRIPTOR {
+ u_int32_t RequestFlags : 8;
+ u_int32_t MessageAddress1 : 24; /* bits 31:8*/
+ u_int32_t MessageAddress2; /* bits 61:32 */
+} MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR,*PMRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR;
+
+/* Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t DescriptorTypeDependent; /* 0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t, MPI2_POINTER pMpi2DefaultRequestDescriptor_t;
+
+/* High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t Reserved1; /* 0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ MPI2_POINTER pMpi2HighPriorityRequestDescriptor_t;
+
+/* SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t DevHandle; /* 0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t, MPI2_POINTER pMpi2SCSIIORequestDescriptor_t;
+
+/* SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ MPI2_POINTER pMpi2SCSITargetRequestDescriptor_t;
+
+/* RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR
+{
+ u_int8_t RequestFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t LMID; /* 0x04 */
+ u_int16_t Reserved; /* 0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+/* union of Request Descriptors */
+typedef union _MRSAS_REQUEST_DESCRIPTOR_UNION
+{
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MRSAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
+ union {
+ struct {
+ u_int32_t low;
+ u_int32_t high;
+ } u;
+ u_int64_t Words;
+ } addr;
+} MRSAS_REQUEST_DESCRIPTOR_UNION;
+
+/* Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t DescriptorTypeDependent1; /* 0x02 */
+ u_int32_t DescriptorTypeDependent2; /* 0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t, MPI2_POINTER pMpi2DefaultReplyDescriptor_t;
+
+/* Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int32_t ReplyFrameAddress; /* 0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR, MPI2_POINTER PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t, MPI2_POINTER pMpi2AddressReplyDescriptor_t;
+
+/* SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int16_t TaskTag; /* 0x04 */
+ u_int16_t Reserved1; /* 0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+/* TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int8_t SequenceNumber; /* 0x04 */
+ u_int8_t Reserved1; /* 0x05 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+/* Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int8_t VP_ID; /* 0x02 */
+ u_int8_t Flags; /* 0x03 */
+ u_int16_t InitiatorDevHandle; /* 0x04 */
+ u_int16_t IoIndex; /* 0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ MPI2_POINTER pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/* RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+{
+ u_int8_t ReplyFlags; /* 0x00 */
+ u_int8_t MSIxIndex; /* 0x01 */
+ u_int16_t SMID; /* 0x02 */
+ u_int32_t Reserved; /* 0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ MPI2_POINTER PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ MPI2_POINTER pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+/* union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION
+{
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ u_int64_t Words;
+} MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+
+typedef struct {
+ volatile unsigned int val;
+} atomic_t;
+
+#define atomic_read(v) atomic_load_acq_int(&(v)->val)
+#define atomic_set(v,i) atomic_store_rel_int(&(v)->val, i)
+#define atomic_dec(v) atomic_fetchadd_int(&(v)->val, -1)
+#define atomic_inc(v) atomic_fetchadd_int(&(v)->val, 1)
+
+/* IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST
+{
+ u_int8_t WhoInit; /* 0x00 */
+ u_int8_t Reserved1; /* 0x01 */
+ u_int8_t ChainOffset; /* 0x02 */
+ u_int8_t Function; /* 0x03 */
+ u_int16_t Reserved2; /* 0x04 */
+ u_int8_t Reserved3; /* 0x06 */
+ u_int8_t MsgFlags; /* 0x07 */
+ u_int8_t VP_ID; /* 0x08 */
+ u_int8_t VF_ID; /* 0x09 */
+ u_int16_t Reserved4; /* 0x0A */
+ u_int16_t MsgVersion; /* 0x0C */
+ u_int16_t HeaderVersion; /* 0x0E */
+ u_int32_t Reserved5; /* 0x10 */
+ u_int16_t Reserved6; /* 0x14 */
+ u_int8_t Reserved7; /* 0x16 */
+ u_int8_t HostMSIxVectors; /* 0x17 */
+ u_int16_t Reserved8; /* 0x18 */
+ u_int16_t SystemRequestFrameSize; /* 0x1A */
+ u_int16_t ReplyDescriptorPostQueueDepth; /* 0x1C */
+ u_int16_t ReplyFreeQueueDepth; /* 0x1E */
+ u_int32_t SenseBufferAddressHigh; /* 0x20 */
+ u_int32_t SystemReplyAddressHigh; /* 0x24 */
+ u_int64_t SystemRequestFrameBaseAddress; /* 0x28 */
+ u_int64_t ReplyDescriptorPostQueueAddress;/* 0x30 */
+ u_int64_t ReplyFreeQueueAddress; /* 0x38 */
+ u_int64_t TimeStamp; /* 0x40 */
+} MPI2_IOC_INIT_REQUEST, MPI2_POINTER PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
+
+/*
+ * MR private defines
+ */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 // get the mapping information of this LD
+
+
+/*******************************************************************
+ * RAID map related structures
+ ********************************************************************/
+
+typedef struct _MR_DEV_HANDLE_INFO {
+ u_int16_t curDevHdl; // the device handle currently used by fw to issue the command.
+ u_int8_t validHandles; // bitmap of valid device handles.
+ u_int8_t reserved;
+ u_int16_t devHandle[2]; // 0x04 dev handles for all the paths.
+} MR_DEV_HANDLE_INFO;
+
+typedef struct _MR_ARRAY_INFO {
+ u_int16_t pd[MAX_RAIDMAP_ROW_SIZE];
+} MR_ARRAY_INFO; // 0x40, Total Size
+
+typedef struct _MR_QUAD_ELEMENT {
+ u_int64_t logStart; // 0x00
+ u_int64_t logEnd; // 0x08
+ u_int64_t offsetInSpan; // 0x10
+ u_int32_t diff; // 0x18
+ u_int32_t reserved1; // 0x1C
+} MR_QUAD_ELEMENT; // 0x20, Total size
+
+typedef struct _MR_SPAN_INFO {
+ u_int32_t noElements; // 0x00
+ u_int32_t reserved1; // 0x04
+ MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; // 0x08
+} MR_SPAN_INFO; // 0x108, Total size
+
+typedef struct _MR_LD_SPAN_ { // SPAN structure
+ u_int64_t startBlk; // 0x00, starting block number in array
+ u_int64_t numBlks; // 0x08, number of blocks
+ u_int16_t arrayRef; // 0x10, array reference
+ u_int8_t spanRowSize; // 0x11, span row size
+ u_int8_t spanRowDataSize; // 0x12, span row data size
+ u_int8_t reserved[4]; // 0x13, reserved
+} MR_LD_SPAN; // 0x18, Total Size
+
+typedef struct _MR_SPAN_BLOCK_INFO {
+ u_int64_t num_rows; // number of rows/span
+ MR_LD_SPAN span; // 0x08
+ MR_SPAN_INFO block_span_info; // 0x20
+} MR_SPAN_BLOCK_INFO;
+
+typedef struct _MR_LD_RAID {
+ struct {
+ u_int32_t fpCapable :1;
+ u_int32_t reserved5 :3;
+ u_int32_t ldPiMode :4;
+ u_int32_t pdPiMode :4; // Every Pd has to be same.
+ u_int32_t encryptionType :8; // FDE or ctlr encryption (MR_LD_ENCRYPTION_TYPE)
+ u_int32_t fpWriteCapable :1;
+ u_int32_t fpReadCapable :1;
+ u_int32_t fpWriteAcrossStripe :1;
+ u_int32_t fpReadAcrossStripe :1;
+ u_int32_t fpNonRWCapable :1; // TRUE if supporting Non RW IO
+ u_int32_t reserved4 :7;
+ } capability; // 0x00
+ u_int32_t reserved6;
+ u_int64_t size; // 0x08, LD size in blocks
+
+ u_int8_t spanDepth; // 0x10, Total Number of Spans
+ u_int8_t level; // 0x11, RAID level
+ u_int8_t stripeShift; // 0x12, shift-count to get stripe size (0=512, 1=1K, 7=64K, etc.)
+ u_int8_t rowSize; // 0x13, number of disks in a row
+
+ u_int8_t rowDataSize; // 0x14, number of data disks in a row
+ u_int8_t writeMode; // 0x15, WRITE_THROUGH or WRITE_BACK
+ u_int8_t PRL; // 0x16, To differentiate between RAID1 and RAID1E
+ u_int8_t SRL; // 0x17
+
+ u_int16_t targetId; // 0x18, ld Target Id.
+ u_int8_t ldState; // 0x1a, state of ld, state corresponds to MR_LD_STATE
+ u_int8_t regTypeReqOnWrite;// 0x1b, Pre calculate region type requests based on MFC etc..
+ u_int8_t modFactor; // 0x1c, same as rowSize,
+ u_int8_t regTypeReqOnRead; // 0x1d, region lock type used for read, valid only if regTypeOnReadIsValid=1
+ u_int16_t seqNum; // 0x1e, LD sequence number
+
+ struct {
+ u_int32_t ldSyncRequired:1; // This LD requires sync command before completing
+ u_int32_t regTypeReqOnReadLsValid:1; // Qualifier for regTypeOnRead
+ u_int32_t reserved:30;
+ } flags; // 0x20
+
+ u_int8_t LUN[8]; // 0x24, 8 byte LUN field used for SCSI
+ u_int8_t fpIoTimeoutForLd; // 0x2C, timeout value for FP IOs
+ u_int8_t reserved2[3]; // 0x2D
+ u_int32_t logicalBlockLength; // 0x30 Logical block size for the LD
+ struct {
+ u_int32_t LdPiExp:4; // 0x34, P_I_EXPONENT for ReadCap 16
+ u_int32_t LdLogicalBlockExp:4; // 0x34, LOGICAL BLOCKS PER PHYS BLOCK
+ u_int32_t reserved1:24; // 0x34
+ } exponent;
+ u_int8_t reserved3[0x80-0x38]; // 0x38
+} MR_LD_RAID; // 0x80, Total Size
+
+typedef struct _MR_LD_SPAN_MAP {
+ MR_LD_RAID ldRaid; // 0x00
+ u_int8_t dataArmMap[MAX_RAIDMAP_ROW_SIZE]; // 0x80, needed for GET_ARM() - R0/1/5 only.
+ MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; // 0xA0
+} MR_LD_SPAN_MAP; // 0x9E0
+
+typedef struct _MR_FW_RAID_MAP {
+ u_int32_t totalSize; // total size of this structure, including this field.
+ union {
+ struct { // Simple method of version checking variables
+ u_int32_t maxLd;
+ u_int32_t maxSpanDepth;
+ u_int32_t maxRowSize;
+ u_int32_t maxPdCount;
+ u_int32_t maxArrays;
+ } validationInfo;
+ u_int32_t version[5];
+ u_int32_t reserved1[5];
+ } raid_desc;
+ u_int32_t ldCount; // count of lds.
+ u_int32_t Reserved1;
+ u_int8_t ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS]; // 0x20
+ // This doesn't correspond to
+ // FW Ld Tgt Id to LD, but will purge. For example: if tgt Id is 4
+ // and FW LD is 2, and there is only one LD, FW will populate the
+ // array like this. [0xFF, 0xFF, 0xFF, 0xFF, 0x0,.....]. This is to
+ // help reduce the entire strcture size if there are few LDs or
+ // driver is looking info for 1 LD only.
+ u_int8_t fpPdIoTimeoutSec; // timeout value used by driver in FP IOs
+ u_int8_t reserved2[7];
+ MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; // 0x00a8
+ MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; // 0x20a8
+ MR_LD_SPAN_MAP ldSpanMap[1]; // 0x28a8-[0-MAX_RAIDMAP_LOGICAL_DRIVES+MAX_RAIDMAP_VIEWS+1];
+} MR_FW_RAID_MAP; // 0x3288, Total Size
+
+typedef struct _LD_LOAD_BALANCE_INFO
+{
+ u_int8_t loadBalanceFlag;
+ u_int8_t reserved1;
+ u_int16_t raid1DevHandle[2];
+ atomic_t scsi_pending_cmds[2];
+ u_int64_t last_accessed_block[2];
+} LD_LOAD_BALANCE_INFO, *PLD_LOAD_BALANCE_INFO;
+
+/* SPAN_SET is info caclulated from span info from Raid map per ld */
+typedef struct _LD_SPAN_SET {
+ u_int64_t log_start_lba;
+ u_int64_t log_end_lba;
+ u_int64_t span_row_start;
+ u_int64_t span_row_end;
+ u_int64_t data_strip_start;
+ u_int64_t data_strip_end;
+ u_int64_t data_row_start;
+ u_int64_t data_row_end;
+ u_int8_t strip_offset[MAX_SPAN_DEPTH];
+ u_int32_t span_row_data_width;
+ u_int32_t diff;
+ u_int32_t reserved[2];
+}LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+ LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
+}LD_SPAN_INFO, *PLD_SPAN_INFO;
+
+#pragma pack(1)
+typedef struct _MR_FW_RAID_MAP_ALL {
+ MR_FW_RAID_MAP raidMap;
+ MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} MR_FW_RAID_MAP_ALL;
+#pragma pack()
+
+struct IO_REQUEST_INFO {
+ u_int64_t ldStartBlock;
+ u_int32_t numBlocks;
+ u_int16_t ldTgtId;
+ u_int8_t isRead;
+ u_int16_t devHandle;
+ u_int64_t pdBlock;
+ u_int8_t fpOkForIo;
+ u_int8_t IoforUnevenSpan;
+ u_int8_t start_span;
+ u_int8_t reserved;
+ u_int64_t start_row;
+};
+
+typedef struct _MR_LD_TARGET_SYNC {
+ u_int8_t targetId;
+ u_int8_t reserved;
+ u_int16_t seqNum;
+} MR_LD_TARGET_SYNC;
+
+#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+union desc_value {
+ u_int64_t word;
+ struct {
+ u_int32_t low;
+ u_int32_t high;
+ } u;
+};
+
+/*******************************************************************
+ * Temporary command
+ ********************************************************************/
+struct mrsas_tmp_dcmd {
+ bus_dma_tag_t tmp_dcmd_tag; // tag for tmp DMCD cmd
+ bus_dmamap_t tmp_dcmd_dmamap; // dmamap for tmp DCMD cmd
+ void *tmp_dcmd_mem; // virtual addr of tmp DCMD cmd
+ bus_addr_t tmp_dcmd_phys_addr; //physical addr of tmp DCMD
+};
+
+/*******************************************************************
+ * Register set, included legacy controllers 1068 and 1078,
+ * structure extended for 1078 registers
+ ********************************************************************/
+#pragma pack(1)
+typedef struct _mrsas_register_set {
+ u_int32_t doorbell; /*0000h*/
+ u_int32_t fusion_seq_offset; /*0004h*/
+ u_int32_t fusion_host_diag; /*0008h*/
+ u_int32_t reserved_01; /*000Ch*/
+
+ u_int32_t inbound_msg_0; /*0010h*/
+ u_int32_t inbound_msg_1; /*0014h*/
+ u_int32_t outbound_msg_0; /*0018h*/
+ u_int32_t outbound_msg_1; /*001Ch*/
+
+ u_int32_t inbound_doorbell; /*0020h*/
+ u_int32_t inbound_intr_status; /*0024h*/
+ u_int32_t inbound_intr_mask; /*0028h*/
+
+ u_int32_t outbound_doorbell; /*002Ch*/
+ u_int32_t outbound_intr_status; /*0030h*/
+ u_int32_t outbound_intr_mask; /*0034h*/
+
+ u_int32_t reserved_1[2]; /*0038h*/
+
+ u_int32_t inbound_queue_port; /*0040h*/
+ u_int32_t outbound_queue_port; /*0044h*/
+
+ u_int32_t reserved_2[9]; /*0048h*/
+ u_int32_t reply_post_host_index; /*006Ch*/
+ u_int32_t reserved_2_2[12]; /*0070h*/
+
+ u_int32_t outbound_doorbell_clear; /*00A0h*/
+
+ u_int32_t reserved_3[3]; /*00A4h*/
+
+ u_int32_t outbound_scratch_pad ; /*00B0h*/
+ u_int32_t outbound_scratch_pad_2; /*00B4h*/
+
+ u_int32_t reserved_4[2]; /*00B8h*/
+
+ u_int32_t inbound_low_queue_port ; /*00C0h*/
+
+ u_int32_t inbound_high_queue_port ; /*00C4h*/
+
+ u_int32_t reserved_5; /*00C8h*/
+ u_int32_t res_6[11]; /*CCh*/
+ u_int32_t host_diag;
+ u_int32_t seq_offset;
+ u_int32_t index_registers[807]; /*00CCh*/
+
+} mrsas_reg_set;
+#pragma pack()
+
+/*******************************************************************
+ * Firmware Interface Defines
+ *******************************************************************
+ * MFI stands for MegaRAID SAS FW Interface. This is just a moniker
+ * for protocol between the software and firmware. Commands are
+ * issued using "message frames".
+ ******************************************************************/
+/*
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_MASK 0xF0000000
+#define MFI_STATE_UNDEFINED 0x00000000
+#define MFI_STATE_BB_INIT 0x10000000
+#define MFI_STATE_FW_INIT 0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_STATE_FW_INIT_2 0x70000000
+#define MFI_STATE_DEVICE_SCAN 0x80000000
+#define MFI_STATE_BOOT_MESSAGE_PENDING 0x90000000
+#define MFI_STATE_FLUSH_CACHE 0xA0000000
+#define MFI_STATE_READY 0xB0000000
+#define MFI_STATE_OPERATIONAL 0xC0000000
+#define MFI_STATE_FAULT 0xF0000000
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
+#define MEGAMFI_FRAME_SIZE 64
+#define MRSAS_MFI_FRAME_SIZE 1024
+#define MRSAS_MFI_SENSE_SIZE 128
+
+/*
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT : Abort all pending cmds
+ * READY : Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ * HOTPLUG : Resume from Hotplug
+ * MFI_STOP_ADP : Send signal to FW to stop processing
+ */
+
+#define WRITE_SEQUENCE_OFFSET (0x0000000FC) // I20
+#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) // I20
+#define DIAG_WRITE_ENABLE (0x00000080)
+#define DIAG_RESET_ADAPTER (0x00000004)
+
+#define MFI_ADP_RESET 0x00000040
+#define MFI_INIT_ABORT 0x00000001
+#define MFI_INIT_READY 0x00000002
+#define MFI_INIT_MFIMODE 0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
+#define MFI_INIT_HOTPLUG 0x00000010
+#define MFI_STOP_ADP 0x00000020
+#define MFI_RESET_FLAGS MFI_INIT_READY| \
+ MFI_INIT_MFIMODE| \
+ MFI_INIT_ABORT
+
+/*
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
+#define MFI_FRAME_SGL32 0x0000
+#define MFI_FRAME_SGL64 0x0002
+#define MFI_FRAME_SENSE32 0x0000
+#define MFI_FRAME_SENSE64 0x0004
+#define MFI_FRAME_DIR_NONE 0x0000
+#define MFI_FRAME_DIR_WRITE 0x0008
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE 0x0020
+
+/*
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE 0xFF
+
+/*
+ * MFI command opcodes
+ */
+#define MFI_CMD_INIT 0x00
+#define MFI_CMD_LD_READ 0x01
+#define MFI_CMD_LD_WRITE 0x02
+#define MFI_CMD_LD_SCSI_IO 0x03
+#define MFI_CMD_PD_SCSI_IO 0x04
+#define MFI_CMD_DCMD 0x05
+#define MFI_CMD_ABORT 0x06
+#define MFI_CMD_SMP 0x07
+#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
+
+#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
+#define MR_FLUSH_CTRL_CACHE 0x01
+#define MR_FLUSH_DISK_CACHE 0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN 0x01060000
+#define MR_ENABLE_DRIVE_SPINDOWN 0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
+#define MR_DCMD_CTRL_EVENT_GET 0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
+
+#define MR_DCMD_CLUSTER 0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+#define MR_DCMD_PD_LIST_QUERY 0x02010100
+
+#define MR_DCMD_CTRL_MISC_CPX 0x0100e200
+#define MR_DCMD_CTRL_MISC_CPX_INIT_DATA_GET 0x0100e201
+#define MR_DCMD_CTRL_MISC_CPX_QUEUE_DATA 0x0100e202
+#define MR_DCMD_CTRL_MISC_CPX_UNREGISTER 0x0100e203
+#define MAX_MR_ROW_SIZE 32
+#define MR_CPX_DIR_WRITE 1
+#define MR_CPX_DIR_READ 0
+#define MR_CPX_VERSION 1
+
+#define MR_DCMD_CTRL_IO_METRICS_GET 0x01170200 // get IO metrics
+
+#define MR_EVT_CFG_CLEARED 0x0004
+
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+#define MR_EVT_CTRL_PERF_COLLECTION 0x017e
+
+/*
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD = 0x01,
+ MFI_STAT_INVALID_DCMD = 0x02,
+ MFI_STAT_INVALID_PARAMETER = 0x03,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+ MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+ MFI_STAT_APP_IN_USE = 0x07,
+ MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+ MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+ MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+ MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+ MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+ MFI_STAT_FLASH_BUSY = 0x0f,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+ MFI_STAT_FLASH_NOT_OPEN = 0x13,
+ MFI_STAT_FLASH_NOT_STARTED = 0x14,
+ MFI_STAT_FLUSH_FAILED = 0x15,
+ MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+ MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+ MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+ MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+ MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+ MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+ MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+ MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+ MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR = 0x21,
+ MFI_STAT_NO_HW_PRESENT = 0x22,
+ MFI_STAT_NOT_FOUND = 0x23,
+ MFI_STAT_NOT_IN_ENCL = 0x24,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+ MFI_STAT_PD_TYPE_WRONG = 0x26,
+ MFI_STAT_PR_DISABLED = 0x27,
+ MFI_STAT_ROW_INDEX_INVALID = 0x28,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+ MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+ MFI_STAT_SCSI_IO_FAILED = 0x2e,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET = 0x31,
+ MFI_STAT_WRONG_STATE = 0x32,
+ MFI_STAT_LD_OFFLINE = 0x33,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+ MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+ MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+ MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+ MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
+
+ MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+/*
+ * Number of mailbox bytes in DCMD message frame
+ */
+#define MFI_MBOX_SIZE 12
+
+enum MR_EVT_CLASS {
+
+ MR_EVT_CLASS_DEBUG = -2,
+ MR_EVT_CLASS_PROGRESS = -1,
+ MR_EVT_CLASS_INFO = 0,
+ MR_EVT_CLASS_WARNING = 1,
+ MR_EVT_CLASS_CRITICAL = 2,
+ MR_EVT_CLASS_FATAL = 3,
+ MR_EVT_CLASS_DEAD = 4,
+
+};
+
+enum MR_EVT_LOCALE {
+
+ MR_EVT_LOCALE_LD = 0x0001,
+ MR_EVT_LOCALE_PD = 0x0002,
+ MR_EVT_LOCALE_ENCL = 0x0004,
+ MR_EVT_LOCALE_BBU = 0x0008,
+ MR_EVT_LOCALE_SAS = 0x0010,
+ MR_EVT_LOCALE_CTRL = 0x0020,
+ MR_EVT_LOCALE_CONFIG = 0x0040,
+ MR_EVT_LOCALE_CLUSTER = 0x0080,
+ MR_EVT_LOCALE_ALL = 0xffff,
+
+};
+
+enum MR_EVT_ARGS {
+
+ MR_EVT_ARGS_NONE,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
+};
+
+
+/*
+ * Thunderbolt (and later) Defines
+ */
+#define MRSAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MRSAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+#define MRSAS_LOAD_BALANCE_FLAG 0x1
+#define MRSAS_DCMD_MBOX_PEND_FLAG 0x1
+#define HOST_DIAG_WRITE_ENABLE 0x80
+#define HOST_DIAG_RESET_ADAPTER 0x4
+#define MRSAS_TBOLT_MAX_RESET_TRIES 3
+#define MRSAS_MAX_MFI_CMDS 32
+
+/*
+ * Invader Defines
+ */
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+
+/*
+ * T10 PI defines
+ */
+#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
+#define MRSAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MRSAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MRSAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MRSAS_SCSI_ADDL_CDB_LEN 0x18
+#define MRSAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MRSAS_RD_WR_PROTECT_CHECK_NONE 0x60
+#define MRSAS_SCSIBLOCKSIZE 512
+
+/*
+ * Raid context flags
+ */
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
+typedef enum MR_RAID_FLAGS_IO_SUB_TYPE {
+ MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+} MR_RAID_FLAGS_IO_SUB_TYPE;
+
+/*
+ * Request descriptor types
+ */
+#define MRSAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MRSAS_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
+#define MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
+#define MRSAS_FP_CMD_LEN 16
+#define MRSAS_FUSION_IN_RESET 0
+
+#define RAID_CTX_SPANARM_ARM_SHIFT (0)
+#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
+#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
+#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+
+/*
+ * Define region lock types
+ */
+typedef enum _REGION_TYPE {
+ REGION_TYPE_UNUSED = 0, // lock is currently not active
+ REGION_TYPE_SHARED_READ = 1, // shared lock (for reads)
+ REGION_TYPE_SHARED_WRITE = 2,
+ REGION_TYPE_EXCLUSIVE = 3, // exclusive lock (for writes)
+} REGION_TYPE;
+
+/*
+ * MR private defines
+ */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+
+/*
+ * SCSI-CAM Related Defines
+ */
+#define MRSAS_SCSI_MAX_LUNS 0 //zero for now
+#define MRSAS_SCSI_INITIATOR_ID 255
+#define MRSAS_SCSI_MAX_CMDS 8
+#define MRSAS_SCSI_MAX_CDB_LEN 16
+#define MRSAS_SCSI_SENSE_BUFFERSIZE 96
+#define MRSAS_MAX_SGL 70
+#define MRSAS_MAX_IO_SIZE (256 * 1024)
+#define MRSAS_INTERNAL_CMDS 32
+
+/* Request types */
+#define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0
+#define MRSAS_REQ_TYPE_AEN_FETCH 0x1
+#define MRSAS_REQ_TYPE_PASSTHRU 0x2
+#define MRSAS_REQ_TYPE_GETSET_PARAM 0x3
+#define MRSAS_REQ_TYPE_SCSI_IO 0x4
+
+/* Request states */
+#define MRSAS_REQ_STATE_FREE 0
+#define MRSAS_REQ_STATE_BUSY 1
+#define MRSAS_REQ_STATE_TRAN 2
+#define MRSAS_REQ_STATE_COMPLETE 3
+
+enum mrsas_req_flags {
+ MRSAS_DIR_UNKNOWN = 0x1,
+ MRSAS_DIR_IN = 0x2,
+ MRSAS_DIR_OUT = 0x4,
+ MRSAS_DIR_NONE = 0x8,
+};
+
+/*
+ * Adapter Reset States
+ */
+enum {
+ MRSAS_HBA_OPERATIONAL = 0,
+ MRSAS_ADPRESET_SM_INFAULT = 1,
+ MRSAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
+ MRSAS_ADPRESET_SM_OPERATIONAL = 3,
+ MRSAS_HW_CRITICAL_ERROR = 4,
+ MRSAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
+};
+
+/*
+ * MPT Command Structure
+ */
+struct mrsas_mpt_cmd {
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ bus_addr_t io_request_phys_addr;
+ MPI2_SGE_IO_UNION *chain_frame;
+ bus_addr_t chain_frame_phys_addr;
+ u_int32_t sge_count;
+ u_int8_t *sense;
+ bus_addr_t sense_phys_addr;
+ u_int8_t retry_for_fw_reset;
+ MRSAS_REQUEST_DESCRIPTOR_UNION *request_desc;
+ u_int32_t sync_cmd_idx; //For getting MFI cmd from list when complete
+ u_int32_t index;
+ u_int8_t flags;
+ u_int8_t load_balance;
+ bus_size_t length; // request length
+ u_int32_t error_code; // error during request dmamap load
+ bus_dmamap_t data_dmamap;
+ void *data;
+ union ccb *ccb_ptr; // pointer to ccb
+ struct callout cm_callout;
+ struct mrsas_softc *sc;
+ TAILQ_ENTRY(mrsas_mpt_cmd) next;
+};
+
+/*
+ * MFI Command Structure
+ */
+struct mrsas_mfi_cmd {
+ union mrsas_frame *frame;
+ bus_dmamap_t frame_dmamap; // mfi frame dmamap
+ void *frame_mem; // mfi frame virtual addr
+ bus_addr_t frame_phys_addr; // mfi frame physical addr
+ u_int8_t *sense;
+ bus_dmamap_t sense_dmamap; // mfi sense dmamap
+ void *sense_mem; // mfi sense virtual addr
+ bus_addr_t sense_phys_addr;
+ u_int32_t index;
+ u_int8_t sync_cmd;
+ u_int8_t cmd_status;
+ u_int8_t abort_aen;
+ u_int8_t retry_for_fw_reset;
+ struct mrsas_softc *sc;
+ union ccb *ccb_ptr;
+ union {
+ struct {
+ u_int16_t smid;
+ u_int16_t resvd;
+ } context;
+ u_int32_t frame_count;
+ } cmd_id;
+ TAILQ_ENTRY(mrsas_mfi_cmd) next;
+};
+
+
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
+};
+
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+
+enum MR_PD_STATE {
+ MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MR_PD_STATE_HOT_SPARE = 0x02,
+ MR_PD_STATE_OFFLINE = 0x10,
+ MR_PD_STATE_FAILED = 0x11,
+ MR_PD_STATE_REBUILD = 0x14,
+ MR_PD_STATE_ONLINE = 0x18,
+ MR_PD_STATE_COPYBACK = 0x20,
+ MR_PD_STATE_SYSTEM = 0x40,
+ };
+
+ /*
+ * defines the physical drive address structure
+ */
+#pragma pack(1)
+struct MR_PD_ADDRESS {
+ u_int16_t deviceId;
+ u_int16_t enclDeviceId;
+
+ union {
+ struct {
+ u_int8_t enclIndex;
+ u_int8_t slotNumber;
+ } mrPdAddress;
+ struct {
+ u_int8_t enclPosition;
+ u_int8_t enclConnectorIndex;
+ } mrEnclAddress;
+ } u1;
+ u_int8_t scsiDevType;
+ union {
+ u_int8_t connectedPortBitmap;
+ u_int8_t connectedPortNumbers;
+ } u2;
+ u_int64_t sasAddr[2];
+};
+#pragma pack()
+
+/*
+ * defines the physical drive list structure
+ */
+#pragma pack(1)
+struct MR_PD_LIST {
+ u_int32_t size;
+ u_int32_t count;
+ struct MR_PD_ADDRESS addr[1];
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_pd_list {
+ u_int16_t tid;
+ u_int8_t driveType;
+ u_int8_t driveState;
+};
+#pragma pack()
+
+ /*
+ * defines the logical drive reference structure
+ */
+typedef union _MR_LD_REF { // LD reference structure
+ struct {
+ u_int8_t targetId; // LD target id (0 to MAX_TARGET_ID)
+ u_int8_t reserved; // reserved to make in line with MR_PD_REF
+ u_int16_t seqNum; // Sequence Number
+ } ld_context;
+ u_int32_t ref; // shorthand reference to full 32-bits
+} MR_LD_REF; // 4 bytes
+
+
+/*
+ * defines the logical drive list structure
+ */
+#pragma pack(1)
+struct MR_LD_LIST {
+ u_int32_t ldCount; // number of LDs
+ u_int32_t reserved; // pad to 8-byte boundary
+ struct {
+ MR_LD_REF ref; // LD reference
+ u_int8_t state; // current LD state (MR_LD_STATE)
+ u_int8_t reserved[3]; // pad to 8-byte boundary
+ u_int64_t size; // LD size
+ } ldList[MAX_LOGICAL_DRIVES];
+};
+#pragma pack()
+
+/*
+ * SAS controller properties
+ */
+#pragma pack(1)
+struct mrsas_ctrl_prop {
+ u_int16_t seq_num;
+ u_int16_t pred_fail_poll_interval;
+ u_int16_t intr_throttle_count;
+ u_int16_t intr_throttle_timeouts;
+ u_int8_t rebuild_rate;
+ u_int8_t patrol_read_rate;
+ u_int8_t bgi_rate;
+ u_int8_t cc_rate;
+ u_int8_t recon_rate;
+ u_int8_t cache_flush_interval;
+ u_int8_t spinup_drv_count;
+ u_int8_t spinup_delay;
+ u_int8_t cluster_enable;
+ u_int8_t coercion_mode;
+ u_int8_t alarm_enable;
+ u_int8_t disable_auto_rebuild;
+ u_int8_t disable_battery_warn;
+ u_int8_t ecc_bucket_size;
+ u_int16_t ecc_bucket_leak_rate;
+ u_int8_t restore_hotspare_on_insertion;
+ u_int8_t expose_encl_devices;
+ u_int8_t maintainPdFailHistory;
+ u_int8_t disallowHostRequestReordering;
+ u_int8_t abortCCOnError; // set TRUE to abort CC on detecting an inconsistency
+ u_int8_t loadBalanceMode; // load balance mode (MR_LOAD_BALANCE_MODE)
+ u_int8_t disableAutoDetectBackplane; // 0 - use auto detect logic of backplanes
+ // like SGPIO, i2c SEP using h/w mechansim
+ // like GPIO pins.
+ // 1 - disable auto detect SGPIO,
+ // 2 - disable i2c SEP auto detect
+ // 3 - disable both auto detect
+ u_int8_t snapVDSpace; // % of source LD to be reserved for a VDs snapshot in
+ // snapshot repository, for metadata and user data.
+ // 1=5%, 2=10%, 3=15% and so on.
+ /*
+ * Add properties that can be controlled by a bit in the following structure.
+ */
+ struct {
+ u_int32_t copyBackDisabled : 1; // set TRUE to disable copyBack
+ // (0=copback enabled)
+ u_int32_t SMARTerEnabled : 1;
+ u_int32_t prCorrectUnconfiguredAreas : 1;
+ u_int32_t useFdeOnly : 1;
+ u_int32_t disableNCQ : 1;
+ u_int32_t SSDSMARTerEnabled : 1;
+ u_int32_t SSDPatrolReadEnabled : 1;
+ u_int32_t enableSpinDownUnconfigured : 1;
+ u_int32_t autoEnhancedImport : 1;
+ u_int32_t enableSecretKeyControl : 1;
+ u_int32_t disableOnlineCtrlReset : 1;
+ u_int32_t allowBootWithPinnedCache : 1;
+ u_int32_t disableSpinDownHS : 1;
+ u_int32_t enableJBOD : 1;
+ u_int32_t reserved :18;
+ } OnOffProperties;
+ u_int8_t autoSnapVDSpace; // % of source LD to be reserved for auto
+ // snapshot in snapshot repository, for
+ // metadata and user data.
+ // 1=5%, 2=10%, 3=15% and so on.
+ u_int8_t viewSpace; // snapshot writeable VIEWs capacity as a %
+ // of source LD capacity. 0=READ only.
+ // 1=5%, 2=10%, 3=15% and so on
+ u_int16_t spinDownTime; // # of idle minutes before device is spun
+ // down (0=use FW defaults).
+ u_int8_t reserved[24];
+
+};
+#pragma pack()
+
+
+/*
+ * SAS controller information
+ */
+//#pragma pack(1)
+struct mrsas_ctrl_info {
+ /*
+ * PCI device information
+ */
+ struct {
+ u_int16_t vendor_id;
+ u_int16_t device_id;
+ u_int16_t sub_vendor_id;
+ u_int16_t sub_device_id;
+ u_int8_t reserved[24];
+ } __packed pci;
+ /*
+ * Host interface information
+ */
+ struct {
+ u_int8_t PCIX:1;
+ u_int8_t PCIE:1;
+ u_int8_t iSCSI:1;
+ u_int8_t SAS_3G:1;
+ u_int8_t reserved_0:4;
+ u_int8_t reserved_1[6];
+ u_int8_t port_count;
+ u_int64_t port_addr[8];
+ } __packed host_interface;
+ /*
+ * Device (backend) interface information
+ */
+ struct {
+ u_int8_t SPI:1;
+ u_int8_t SAS_3G:1;
+ u_int8_t SATA_1_5G:1;
+ u_int8_t SATA_3G:1;
+ u_int8_t reserved_0:4;
+ u_int8_t reserved_1[6];
+ u_int8_t port_count;
+ u_int64_t port_addr[8];
+ } __packed device_interface;
+
+ /*
+ * List of components residing in flash. All str are null terminated
+ */
+ u_int32_t image_check_word;
+ u_int32_t image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char built_time[16];
+ } __packed image_component[8];
+ /*
+ * List of flash components that have been flashed on the card, but
+ * are not in use, pending reset of the adapter. This list will be
+ * empty if a flash operation has not occurred. All stings are null
+ * terminated
+ */
+ u_int32_t pending_image_component_count;
+
+ struct {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+ } __packed pending_image_component[8];
+
+ u_int8_t max_arms;
+ u_int8_t max_spans;
+ u_int8_t max_arrays;
+ u_int8_t max_lds;
+ char product_name[80];
+ char serial_no[32];
+
+ /*
+ * Other physical/controller/operation information. Indicates the
+ * presence of the hardware
+ */
+ struct {
+ u_int32_t bbu:1;
+ u_int32_t alarm:1;
+ u_int32_t nvram:1;
+ u_int32_t uart:1;
+ u_int32_t reserved:28;
+ } __packed hw_present;
+
+ u_int32_t current_fw_time;
+
+ /*
+ * Maximum data transfer sizes
+ */
+ u_int16_t max_concurrent_cmds;
+ u_int16_t max_sge_count;
+ u_int32_t max_request_size;
+
+ /*
+ * Logical and physical device counts
+ */
+ u_int16_t ld_present_count;
+ u_int16_t ld_degraded_count;
+ u_int16_t ld_offline_count;
+
+ u_int16_t pd_present_count;
+ u_int16_t pd_disk_present_count;
+ u_int16_t pd_disk_pred_failure_count;
+ u_int16_t pd_disk_failed_count;
+
+ /*
+ * Memory size information
+ */
+ u_int16_t nvram_size;
+ u_int16_t memory_size;
+ u_int16_t flash_size;
+
+ /*
+ * Error counters
+ */
+ u_int16_t mem_correctable_error_count;
+ u_int16_t mem_uncorrectable_error_count;
+
+ /*
+ * Cluster information
+ */
+ u_int8_t cluster_permitted;
+ u_int8_t cluster_active;
+
+ /*
+ * Additional max data transfer sizes
+ */
+ u_int16_t max_strips_per_io;
+
+ /*
+ * Controller capabilities structures
+ */
+ struct {
+ u_int32_t raid_level_0:1;
+ u_int32_t raid_level_1:1;
+ u_int32_t raid_level_5:1;
+ u_int32_t raid_level_1E:1;
+ u_int32_t raid_level_6:1;
+ u_int32_t reserved:27;
+ } __packed raid_levels;
+
+ struct {
+ u_int32_t rbld_rate:1;
+ u_int32_t cc_rate:1;
+ u_int32_t bgi_rate:1;
+ u_int32_t recon_rate:1;
+ u_int32_t patrol_rate:1;
+ u_int32_t alarm_control:1;
+ u_int32_t cluster_supported:1;
+ u_int32_t bbu:1;
+ u_int32_t spanning_allowed:1;
+ u_int32_t dedicated_hotspares:1;
+ u_int32_t revertible_hotspares:1;
+ u_int32_t foreign_config_import:1;
+ u_int32_t self_diagnostic:1;
+ u_int32_t mixed_redundancy_arr:1;
+ u_int32_t global_hot_spares:1;
+ u_int32_t reserved:17;
+ } __packed adapter_operations;
+
+ struct {
+ u_int32_t read_policy:1;
+ u_int32_t write_policy:1;
+ u_int32_t io_policy:1;
+ u_int32_t access_policy:1;
+ u_int32_t disk_cache_policy:1;
+ u_int32_t reserved:27;
+ } __packed ld_operations;
+
+ struct {
+ u_int8_t min;
+ u_int8_t max;
+ u_int8_t reserved[2];
+ } __packed stripe_sz_ops;
+
+ struct {
+ u_int32_t force_online:1;
+ u_int32_t force_offline:1;
+ u_int32_t force_rebuild:1;
+ u_int32_t reserved:29;
+ } __packed pd_operations;
+
+ struct {
+ u_int32_t ctrl_supports_sas:1;
+ u_int32_t ctrl_supports_sata:1;
+ u_int32_t allow_mix_in_encl:1;
+ u_int32_t allow_mix_in_ld:1;
+ u_int32_t allow_sata_in_cluster:1;
+ u_int32_t reserved:27;
+ } __packed pd_mix_support;
+
+ /*
+ * Define ECC single-bit-error bucket information
+ */
+ u_int8_t ecc_bucket_count;
+ u_int8_t reserved_2[11];
+
+ /*
+ * Include the controller properties (changeable items)
+ */
+ struct mrsas_ctrl_prop properties;
+
+ /*
+ * Define FW pkg version (set in envt v'bles on OEM basis)
+ */
+ char package_version[0x60];
+
+ /*
+ * If adapterOperations.supportMoreThan8Phys is set, and deviceInterface.portCount is greater than 8,
+ * SAS Addrs for first 8 ports shall be populated in deviceInterface.portAddr, and the rest shall be
+ * populated in deviceInterfacePortAddr2.
+ */
+ u_int64_t deviceInterfacePortAddr2[8]; //0x6a0
+ u_int8_t reserved3[128]; //0x6e0
+
+ struct { //0x760
+ u_int16_t minPdRaidLevel_0 : 4;
+ u_int16_t maxPdRaidLevel_0 : 12;
+
+ u_int16_t minPdRaidLevel_1 : 4;
+ u_int16_t maxPdRaidLevel_1 : 12;
+
+ u_int16_t minPdRaidLevel_5 : 4;
+ u_int16_t maxPdRaidLevel_5 : 12;
+
+ u_int16_t minPdRaidLevel_1E : 4;
+ u_int16_t maxPdRaidLevel_1E : 12;
+
+ u_int16_t minPdRaidLevel_6 : 4;
+ u_int16_t maxPdRaidLevel_6 : 12;
+
+ u_int16_t minPdRaidLevel_10 : 4;
+ u_int16_t maxPdRaidLevel_10 : 12;
+
+ u_int16_t minPdRaidLevel_50 : 4;
+ u_int16_t maxPdRaidLevel_50 : 12;
+
+ u_int16_t minPdRaidLevel_60 : 4;
+ u_int16_t maxPdRaidLevel_60 : 12;
+
+ u_int16_t minPdRaidLevel_1E_RLQ0 : 4;
+ u_int16_t maxPdRaidLevel_1E_RLQ0 : 12;
+
+ u_int16_t minPdRaidLevel_1E0_RLQ0 : 4;
+ u_int16_t maxPdRaidLevel_1E0_RLQ0 : 12;
+
+ u_int16_t reserved[6];
+ } pdsForRaidLevels;
+
+ u_int16_t maxPds; //0x780
+ u_int16_t maxDedHSPs; //0x782
+ u_int16_t maxGlobalHSPs; //0x784
+ u_int16_t ddfSize; //0x786
+ u_int8_t maxLdsPerArray; //0x788
+ u_int8_t partitionsInDDF; //0x789
+ u_int8_t lockKeyBinding; //0x78a
+ u_int8_t maxPITsPerLd; //0x78b
+ u_int8_t maxViewsPerLd; //0x78c
+ u_int8_t maxTargetId; //0x78d
+ u_int16_t maxBvlVdSize; //0x78e
+
+ u_int16_t maxConfigurableSSCSize; //0x790
+ u_int16_t currentSSCsize; //0x792
+
+ char expanderFwVersion[12]; //0x794
+
+ u_int16_t PFKTrialTimeRemaining; //0x7A0
+
+ u_int16_t cacheMemorySize; //0x7A2
+
+ struct { //0x7A4
+ u_int32_t supportPIcontroller :1;
+ u_int32_t supportLdPIType1 :1;
+ u_int32_t supportLdPIType2 :1;
+ u_int32_t supportLdPIType3 :1;
+ u_int32_t supportLdBBMInfo :1;
+ u_int32_t supportShieldState :1;
+ u_int32_t blockSSDWriteCacheChange :1;
+ u_int32_t supportSuspendResumeBGops :1;
+ u_int32_t supportEmergencySpares :1;
+ u_int32_t supportSetLinkSpeed :1;
+ u_int32_t supportBootTimePFKChange :1;
+ u_int32_t supportJBOD :1;
+ u_int32_t disableOnlinePFKChange :1;
+ u_int32_t supportPerfTuning :1;
+ u_int32_t supportSSDPatrolRead :1;
+ u_int32_t realTimeScheduler :1;
+
+ u_int32_t supportResetNow :1;
+ u_int32_t supportEmulatedDrives :1;
+ u_int32_t headlessMode :1;
+ u_int32_t dedicatedHotSparesLimited :1;
+
+
+ u_int32_t supportUnevenSpans :1;
+ u_int32_t reserved :11;
+ } adapterOperations2;
+
+ u_int8_t driverVersion[32]; //0x7A8
+ u_int8_t maxDAPdCountSpinup60; //0x7C8
+ u_int8_t temperatureROC; //0x7C9
+ u_int8_t temperatureCtrl; //0x7CA
+ u_int8_t reserved4; //0x7CB
+ u_int16_t maxConfigurablePds; //0x7CC
+
+
+ u_int8_t reserved5[2]; //0x7CD reserved for future use
+
+ /*
+ * HA cluster information
+ */
+ struct {
+ u_int32_t peerIsPresent :1;
+ u_int32_t peerIsIncompatible :1;
+
+ u_int32_t hwIncompatible :1;
+ u_int32_t fwVersionMismatch :1;
+ u_int32_t ctrlPropIncompatible :1;
+ u_int32_t premiumFeatureMismatch :1;
+ u_int32_t reserved :26;
+ } cluster;
+
+ char clusterId[16]; //0x7D4
+
+ u_int8_t pad[0x800-0x7E4]; //0x7E4
+} __packed;
+
+/*
+ * Ld and PD Max Support Defines
+ */
+#define MRSAS_MAX_PD 256
+#define MRSAS_MAX_LD 64
+
+/*
+ * When SCSI mid-layer calls driver's reset routine, driver waits for
+ * MRSAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
+ * that the driver cannot _actually_ abort or reset pending commands. While
+ * it is waiting for the commands to complete, it prints a diagnostic message
+ * every MRSAS_RESET_NOTICE_INTERVAL seconds
+ */
+#define MRSAS_RESET_WAIT_TIME 180
+#define MRSAS_INTERNAL_CMD_WAIT_TIME 180
+#define MRSAS_IOC_INIT_WAIT_TIME 60
+#define MRSAS_RESET_NOTICE_INTERVAL 5
+#define MRSAS_IOCTL_CMD 0
+#define MRSAS_DEFAULT_CMD_TIMEOUT 90
+#define MRSAS_THROTTLE_QUEUE_DEPTH 16
+
+/*
+ * FW reports the maximum of number of commands that it can accept (maximum
+ * commands that can be outstanding) at any time. The driver must report a
+ * lower number to the mid layer because it can issue a few internal commands
+ * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
+ * is shown below
+ */
+#define MRSAS_INT_CMDS 32
+#define MRSAS_SKINNY_INT_CMDS 5
+#define MRSAS_MAX_MSIX_QUEUES 16
+
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of bus_addr_t
+ */
+#define IS_DMA64 (sizeof(bus_addr_t) == 8)
+
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 // MFI state change interrupt
+#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 //MFI state change interrupt
+
+#define MFI_OB_INTR_STATUS_MASK 0x00000002
+#define MFI_POLL_TIMEOUT_SECS 60
+
+#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK 0x00000001
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
+#define MFI_1068_PCSR_OFFSET 0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
+#define MFI_1068_FW_READY 0xDDDD0000
+
+#pragma pack(1)
+struct mrsas_sge32 {
+ u_int32_t phys_addr;
+ u_int32_t length;
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_sge64 {
+ u_int64_t phys_addr;
+ u_int32_t length;
+};
+#pragma pack()
+
+#pragma pack()
+union mrsas_sgl {
+ struct mrsas_sge32 sge32[1];
+ struct mrsas_sge64 sge64[1];
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_header {
+ u_int8_t cmd; /*00e */
+ u_int8_t sense_len; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t scsi_status; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t lun; /*05h */
+ u_int8_t cdb_len; /*06h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+ u_int32_t data_xferlen; /*14h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_init_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_0; /*01h */
+ u_int8_t cmd_status; /*02h */
+
+ u_int8_t reserved_1; /*03h */
+ u_int32_t reserved_2; /*04h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t reserved_3; /*12h */
+ u_int32_t data_xfer_len; /*14h */
+
+ u_int32_t queue_info_new_phys_addr_lo; /*18h */
+ u_int32_t queue_info_new_phys_addr_hi; /*1Ch */
+ u_int32_t queue_info_old_phys_addr_lo; /*20h */
+ u_int32_t queue_info_old_phys_addr_hi; /*24h */
+ u_int32_t driver_ver_lo; /*28h */
+ u_int32_t driver_ver_hi; /*2Ch */
+ u_int32_t reserved_4[4]; /*30h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_io_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t sense_len; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t scsi_status; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t access_byte; /*05h */
+ u_int8_t reserved_0; /*06h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+ u_int32_t lba_count; /*14h */
+
+ u_int32_t sense_buf_phys_addr_lo; /*18h */
+ u_int32_t sense_buf_phys_addr_hi; /*1Ch */
+
+ u_int32_t start_lba_lo; /*20h */
+ u_int32_t start_lba_hi; /*24h */
+
+ union mrsas_sgl sgl; /*28h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_pthru_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t sense_len; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t scsi_status; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t lun; /*05h */
+ u_int8_t cdb_len; /*06h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+ u_int32_t data_xfer_len; /*14h */
+
+ u_int32_t sense_buf_phys_addr_lo; /*18h */
+ u_int32_t sense_buf_phys_addr_hi; /*1Ch */
+
+ u_int8_t cdb[16]; /*20h */
+ union mrsas_sgl sgl; /*30h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_dcmd_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_0; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t reserved_1[4]; /*03h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+
+ u_int32_t data_xfer_len; /*14h */
+ u_int32_t opcode; /*18h */
+
+ union { /*1Ch */
+ u_int8_t b[12];
+ u_int16_t s[6];
+ u_int32_t w[3];
+ } mbox;
+
+ union mrsas_sgl sgl; /*28h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_abort_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_0; /*01h */
+ u_int8_t cmd_status; /*02h */
+
+ u_int8_t reserved_1; /*03h */
+ u_int32_t reserved_2; /*04h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t reserved_3; /*12h */
+ u_int32_t reserved_4; /*14h */
+
+ u_int32_t abort_context; /*18h */
+ u_int32_t pad_1; /*1Ch */
+
+ u_int32_t abort_mfi_phys_addr_lo; /*20h */
+ u_int32_t abort_mfi_phys_addr_hi; /*24h */
+
+ u_int32_t reserved_5[6]; /*28h */
+};
+#pragma pack()
+
+#pragma pack(1)
+struct mrsas_smp_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_1; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t connection_status; /*03h */
+
+ u_int8_t reserved_2[3]; /*04h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+
+ u_int32_t data_xfer_len; /*14h */
+ u_int64_t sas_addr; /*18h */
+
+ union {
+ struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: req */
+ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: req */
+ } sgl;
+};
+#pragma pack()
+
+
+#pragma pack(1)
+struct mrsas_stp_frame {
+ u_int8_t cmd; /*00h */
+ u_int8_t reserved_1; /*01h */
+ u_int8_t cmd_status; /*02h */
+ u_int8_t reserved_2; /*03h */
+
+ u_int8_t target_id; /*04h */
+ u_int8_t reserved_3[2]; /*05h */
+ u_int8_t sge_count; /*07h */
+
+ u_int32_t context; /*08h */
+ u_int32_t pad_0; /*0Ch */
+
+ u_int16_t flags; /*10h */
+ u_int16_t timeout; /*12h */
+
+ u_int32_t data_xfer_len; /*14h */
+
+ u_int16_t fis[10]; /*18h */
+ u_int32_t stp_flags;
+
+ union {
+ struct mrsas_sge32 sge32[2]; /* [0]: resp [1]: data */
+ struct mrsas_sge64 sge64[2]; /* [0]: resp [1]: data */
+ } sgl;
+};
+#pragma pack()
+
+union mrsas_frame {
+ struct mrsas_header hdr;
+ struct mrsas_init_frame init;
+ struct mrsas_io_frame io;
+ struct mrsas_pthru_frame pthru;
+ struct mrsas_dcmd_frame dcmd;
+ struct mrsas_abort_frame abort;
+ struct mrsas_smp_frame smp;
+ struct mrsas_stp_frame stp;
+ u_int8_t raw_bytes[64];
+};
+
+#pragma pack(1)
+union mrsas_evt_class_locale {
+
+ struct {
+ u_int16_t locale;
+ u_int8_t reserved;
+ int8_t class;
+ } __packed members;
+
+ u_int32_t word;
+
+} __packed;
+
+#pragma pack()
+
+
+#pragma pack(1)
+struct mrsas_evt_log_info {
+ u_int32_t newest_seq_num;
+ u_int32_t oldest_seq_num;
+ u_int32_t clear_seq_num;
+ u_int32_t shutdown_seq_num;
+ u_int32_t boot_seq_num;
+
+} __packed;
+
+#pragma pack()
+
+struct mrsas_progress {
+
+ u_int16_t progress;
+ u_int16_t elapsed_seconds;
+
+} __packed;
+
+struct mrsas_evtarg_ld {
+
+ u_int16_t target_id;
+ u_int8_t ld_index;
+ u_int8_t reserved;
+
+} __packed;
+
+struct mrsas_evtarg_pd {
+ u_int16_t device_id;
+ u_int8_t encl_index;
+ u_int8_t slot_number;
+
+} __packed;
+
+struct mrsas_evt_detail {
+
+ u_int32_t seq_num;
+ u_int32_t time_stamp;
+ u_int32_t code;
+ union mrsas_evt_class_locale cl;
+ u_int8_t arg_type;
+ u_int8_t reserved1[15];
+
+ union {
+ struct {
+ struct mrsas_evtarg_pd pd;
+ u_int8_t cdb_length;
+ u_int8_t sense_length;
+ u_int8_t reserved[2];
+ u_int8_t cdb[16];
+ u_int8_t sense[64];
+ } __packed cdbSense;
+
+ struct mrsas_evtarg_ld ld;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ u_int64_t count;
+ } __packed ld_count;
+
+ struct {
+ u_int64_t lba;
+ struct mrsas_evtarg_ld ld;
+ } __packed ld_lba;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ u_int32_t prevOwner;
+ u_int32_t newOwner;
+ } __packed ld_owner;
+
+ struct {
+ u_int64_t ld_lba;
+ u_int64_t pd_lba;
+ struct mrsas_evtarg_ld ld;
+ struct mrsas_evtarg_pd pd;
+ } __packed ld_lba_pd_lba;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ struct mrsas_progress prog;
+ } __packed ld_prog;
+
+ struct {
+ struct mrsas_evtarg_ld ld;
+ u_int32_t prev_state;
+ u_int32_t new_state;
+ } __packed ld_state;
+
+ struct {
+ u_int64_t strip;
+ struct mrsas_evtarg_ld ld;
+ } __packed ld_strip;
+
+ struct mrsas_evtarg_pd pd;
+
+ struct {
+ struct mrsas_evtarg_pd pd;
+ u_int32_t err;
+ } __packed pd_err;
+
+ struct {
+ u_int64_t lba;
+ struct mrsas_evtarg_pd pd;
+ } __packed pd_lba;
+
+ struct {
+ u_int64_t lba;
+ struct mrsas_evtarg_pd pd;
+ struct mrsas_evtarg_ld ld;
+ } __packed pd_lba_ld;
+
+ struct {
+ struct mrsas_evtarg_pd pd;
+ struct mrsas_progress prog;
+ } __packed pd_prog;
+
+ struct {
+ struct mrsas_evtarg_pd pd;
+ u_int32_t prevState;
+ u_int32_t newState;
+ } __packed pd_state;
+
+ struct {
+ u_int16_t vendorId;
+ u_int16_t deviceId;
+ u_int16_t subVendorId;
+ u_int16_t subDeviceId;
+ } __packed pci;
+
+ u_int32_t rate;
+ char str[96];
+
+ struct {
+ u_int32_t rtc;
+ u_int32_t elapsedSeconds;
+ } __packed time;
+
+ struct {
+ u_int32_t ecar;
+ u_int32_t elog;
+ char str[64];
+ } __packed ecc;
+
+ u_int8_t b[96];
+ u_int16_t s[48];
+ u_int32_t w[24];
+ u_int64_t d[12];
+ } args;
+
+ char description[128];
+
+} __packed;
+
+
+/*******************************************************************
+ * per-instance data
+ ********************************************************************/
+struct mrsas_softc {
+ device_t mrsas_dev; // bus device
+ struct cdev *mrsas_cdev; // controller device
+ uint16_t device_id; // pci device
+ struct resource *reg_res; // register interface window
+ int reg_res_id; // register resource id
+ bus_space_tag_t bus_tag; // bus space tag
+ bus_space_handle_t bus_handle; // bus space handle
+ bus_dma_tag_t mrsas_parent_tag; // bus dma parent tag
+ bus_dma_tag_t verbuf_tag; // verbuf tag
+ bus_dmamap_t verbuf_dmamap; // verbuf dmamap
+ void *verbuf_mem; // verbuf mem
+ bus_addr_t verbuf_phys_addr; // verbuf physical addr
+ bus_dma_tag_t sense_tag; // bus dma verbuf tag
+ bus_dmamap_t sense_dmamap; // bus dma verbuf dmamap
+ void *sense_mem; // pointer to sense buf
+ bus_addr_t sense_phys_addr; // bus dma verbuf mem
+ bus_dma_tag_t io_request_tag; // bus dma io request tag
+ bus_dmamap_t io_request_dmamap; // bus dma io request dmamap
+ void *io_request_mem; // bus dma io request mem
+ bus_addr_t io_request_phys_addr; // io request physical address
+ bus_dma_tag_t chain_frame_tag; // bus dma chain frame tag
+ bus_dmamap_t chain_frame_dmamap; // bus dma chain frame dmamap
+ void *chain_frame_mem; // bus dma chain frame mem
+ bus_addr_t chain_frame_phys_addr; // chain frame phys address
+ bus_dma_tag_t reply_desc_tag; // bus dma io request tag
+ bus_dmamap_t reply_desc_dmamap; // bus dma io request dmamap
+ void *reply_desc_mem; // bus dma io request mem
+ bus_addr_t reply_desc_phys_addr; // bus dma io request mem
+ bus_dma_tag_t ioc_init_tag; // bus dma io request tag
+ bus_dmamap_t ioc_init_dmamap; // bus dma io request dmamap
+ void *ioc_init_mem; // bus dma io request mem
+ bus_addr_t ioc_init_phys_mem; // io request physical address
+ bus_dma_tag_t data_tag; // bus dma data from OS tag
+ struct cam_sim *sim_0; // SIM pointer
+ struct cam_sim *sim_1; // SIM pointer
+ struct cam_path *path_0; // ldio path pointer to CAM
+ struct cam_path *path_1; // syspd path pointer to CAM
+ struct mtx sim_lock; // sim lock
+ struct mtx pci_lock; // serialize pci access
+ struct mtx io_lock; // IO lock
+ struct mtx ioctl_lock; // IOCTL lock
+ struct mtx mpt_cmd_pool_lock; // lock for cmd pool linked list
+ struct mtx mfi_cmd_pool_lock; // lock for cmd pool linked list
+ struct mtx raidmap_lock; // lock for raid map access/update
+ struct mtx aen_lock; // aen lock
+ uint32_t max_fw_cmds; // Max commands from FW
+ uint32_t max_num_sge; // Max number of SGEs
+ struct resource *mrsas_irq; // interrupt interface window
+ void *intr_handle; // handle
+ int irq_id; // intr resource id
+ struct mrsas_mpt_cmd **mpt_cmd_list;
+ struct mrsas_mfi_cmd **mfi_cmd_list;
+ TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head;
+ TAILQ_HEAD(, mrsas_mfi_cmd) mrsas_mfi_cmd_list_head;
+ bus_addr_t req_frames_desc_phys;
+ u_int8_t *req_frames_desc;
+ u_int8_t *req_desc;
+ bus_addr_t io_request_frames_phys;
+ u_int8_t *io_request_frames;
+ bus_addr_t reply_frames_desc_phys;
+ u_int16_t last_reply_idx;
+ u_int32_t reply_q_depth;
+ u_int32_t request_alloc_sz;
+ u_int32_t reply_alloc_sz;
+ u_int32_t io_frames_alloc_sz;
+ u_int32_t chain_frames_alloc_sz;
+ u_int16_t max_sge_in_main_msg;
+ u_int16_t max_sge_in_chain;
+ u_int8_t chain_offset_io_request;
+ u_int8_t chain_offset_mfi_pthru;
+ u_int32_t map_sz;
+ u_int64_t map_id;
+ struct mrsas_mfi_cmd *map_update_cmd;
+ struct mrsas_mfi_cmd *aen_cmd;
+ u_int8_t fast_path_io;
+ void* chan;
+ void* ocr_chan;
+ u_int8_t adprecovery;
+ u_int8_t remove_in_progress;
+ u_int8_t ocr_thread_active;
+ u_int8_t do_timedout_reset;
+ u_int32_t reset_in_progress;
+ u_int32_t reset_count;
+ bus_dma_tag_t raidmap_tag[2]; // bus dma tag for RAID map
+ bus_dmamap_t raidmap_dmamap[2]; // bus dma dmamap RAID map
+ void *raidmap_mem[2]; // bus dma mem RAID map
+ bus_addr_t raidmap_phys_addr[2]; // RAID map physical address
+ bus_dma_tag_t mficmd_frame_tag; // tag for mfi frame
+ bus_dma_tag_t mficmd_sense_tag; // tag for mfi sense
+ bus_dma_tag_t evt_detail_tag; // event detail tag
+ bus_dmamap_t evt_detail_dmamap; // event detail dmamap
+ struct mrsas_evt_detail *evt_detail_mem; // event detail mem
+ bus_addr_t evt_detail_phys_addr; // event detail physical addr
+ bus_dma_tag_t ctlr_info_tag; // tag for get ctlr info cmd
+ bus_dmamap_t ctlr_info_dmamap; // get ctlr info cmd dmamap
+ void *ctlr_info_mem; // get ctlr info cmd virtual addr
+ bus_addr_t ctlr_info_phys_addr; //get ctlr info cmd physical addr
+ u_int32_t max_sectors_per_req;
+ u_int8_t disableOnlineCtrlReset;
+ atomic_t fw_outstanding;
+ u_int32_t mrsas_debug;
+ u_int32_t mrsas_io_timeout;
+ u_int32_t mrsas_fw_fault_check_delay;
+ u_int32_t io_cmds_highwater;
+ u_int8_t UnevenSpanSupport;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+ struct proc *ocr_thread;
+ u_int32_t last_seq_num;
+ bus_dma_tag_t el_info_tag; // tag for get event log info cmd
+ bus_dmamap_t el_info_dmamap; // get event log info cmd dmamap
+ void *el_info_mem; // get event log info cmd virtual addr
+ bus_addr_t el_info_phys_addr; //get event log info cmd physical addr
+ struct mrsas_pd_list pd_list[MRSAS_MAX_PD];
+ struct mrsas_pd_list local_pd_list[MRSAS_MAX_PD];
+ u_int8_t ld_ids[MRSAS_MAX_LD];
+ struct taskqueue *ev_tq; //taskqueue for events
+ struct task ev_task;
+ u_int32_t CurLdCount;
+ u_int64_t reset_flags;
+ LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+};
+
+/* Compatibility shims for different OS versions */
+#if __FreeBSD_version >= 800001
+#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mrsas_kproc_exit(arg) kproc_exit(arg)
+#else
+#define mrsas_kproc_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
+ kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
+#define mrsas_kproc_exit(arg) kthread_exit(arg)
+#endif
+
+static __inline void
+clear_bit(int b, volatile void *p)
+{
+ atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile void *p)
+{
+ atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile void *p)
+{
+ return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
+}
+
+#endif /* MRSAS_H */
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
new file mode 100644
index 0000000000000..81e8fcb13a3e5
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -0,0 +1,1179 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+*/
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "dev/mrsas/mrsas.h"
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+
+#include <cam/scsi/scsi_all.h>
+#include <cam/scsi/scsi_message.h>
+#include <sys/taskqueue.h>
+
+
+/*
+ * Function prototypes
+ */
+int mrsas_cam_attach(struct mrsas_softc *sc);
+//int mrsas_ldio_inq(union ccb *ccb);
+int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb);
+int mrsas_bus_scan(struct mrsas_softc *sc);
+int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
+int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb);
+int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim);
+int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, u_int32_t device_id,
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request);
+void mrsas_xpt_freeze(struct mrsas_softc *sc);
+void mrsas_xpt_release(struct mrsas_softc *sc);
+void mrsas_cam_detach(struct mrsas_softc *sc);
+void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
+void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
+void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
+ u_int32_t req_desc_hi);
+void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
+ struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size);
+static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
+static void mrsas_poll(struct cam_sim *sim);
+static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
+static void mrsas_scsiio_timeout(void *data);
+static void mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error);
+static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
+ union ccb *ccb);
+struct mrsas_mpt_cmd * mrsas_get_mpt_cmd(struct mrsas_softc *sc);
+MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
+ u_int16_t index);
+
+extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
+extern u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map,
+ struct mrsas_softc *sc);
+extern void mrsas_isr(void *arg);
+extern void mrsas_aen_handler(struct mrsas_softc *sc);
+extern u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
+ struct IO_REQUEST_INFO *io_info,RAID_CONTEXT *pRAID_Context,
+ MR_FW_RAID_MAP_ALL *map);
+extern u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
+ MR_FW_RAID_MAP_ALL *map);
+extern u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info);
+extern u_int8_t megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count);
+
+
+/**
+ * mrsas_cam_attach: Main entry to CAM subsystem
+ * input: Adapter instance soft state
+ *
+ * This function is called from mrsas_attach() during initialization
+ * to perform SIM allocations and XPT bus registration. If the kernel
+ * version is 7.4 or earlier, it would also initiate a bus scan.
+ */
+int mrsas_cam_attach(struct mrsas_softc *sc)
+{
+ struct cam_devq *devq;
+ int mrsas_cam_depth;
+
+ mrsas_cam_depth = sc->max_fw_cmds - MRSAS_INTERNAL_CMDS;
+
+ if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
+ return(ENOMEM);
+ }
+
+
+ /*
+ * Create SIM for bus 0 and register, also create path
+ */
+ sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
+ device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
+ mrsas_cam_depth, devq);
+ if (sc->sim_0 == NULL){
+ cam_simq_free(devq);
+ device_printf(sc->mrsas_dev, "Cannot register SIM\n");
+ return(ENXIO);
+ }
+ /* Initialize taskqueue for Event Handling */
+ TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
+ sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
+ taskqueue_thread_enqueue, &sc->ev_tq);
+
+ /* Run the task queue with lowest priority */
+ taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
+ device_get_nameunit(sc->mrsas_dev));
+ mtx_lock(&sc->sim_lock);
+ if (xpt_bus_register(sc->sim_0, sc->mrsas_dev,0) != CAM_SUCCESS)
+ {
+ cam_sim_free(sc->sim_0, TRUE); // passing true frees the devq
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_0));
+ cam_sim_free(sc->sim_0, TRUE); // passing true will free the devq
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+
+ /*
+ * Create SIM for bus 1 and register, also create path
+ */
+ sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_poll, "mrsas", sc,
+ device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
+ mrsas_cam_depth, devq);
+ if (sc->sim_1 == NULL){
+ cam_simq_free(devq);
+ device_printf(sc->mrsas_dev, "Cannot register SIM\n");
+ return(ENXIO);
+ }
+
+ mtx_lock(&sc->sim_lock);
+ if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS){
+ cam_sim_free(sc->sim_1, TRUE); // passing true frees the devq
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
+ CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_1));
+ cam_sim_free(sc->sim_1, TRUE);
+ mtx_unlock(&sc->sim_lock);
+ return(ENXIO);
+ }
+ mtx_unlock(&sc->sim_lock);
+
+#if (__FreeBSD_version <= 704000)
+ if (mrsas_bus_scan(sc)){
+ device_printf(sc->mrsas_dev, "Error in bus scan.\n");
+ return(1);
+ }
+#endif
+ return(0);
+}
+
+/**
+ * mrsas_cam_detach: De-allocates and teardown CAM
+ * input: Adapter instance soft state
+ *
+ * De-registers and frees the paths and SIMs.
+ */
+void mrsas_cam_detach(struct mrsas_softc *sc)
+{
+ if (sc->ev_tq != NULL)
+ taskqueue_free(sc->ev_tq);
+ mtx_lock(&sc->sim_lock);
+ if (sc->path_0)
+ xpt_free_path(sc->path_0);
+ if (sc->sim_0) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_0));
+ cam_sim_free(sc->sim_0, FALSE);
+ }
+ if (sc->path_1)
+ xpt_free_path(sc->path_1);
+ if (sc->sim_1) {
+ xpt_bus_deregister(cam_sim_path(sc->sim_1));
+ cam_sim_free(sc->sim_1, TRUE);
+ }
+ mtx_unlock(&sc->sim_lock);
+}
+
+/**
+ * mrsas_action: SIM callback entry point
+ * input: pointer to SIM
+ * pointer to CAM Control Block
+ *
+ * This function processes CAM subsystem requests. The type of request is
+ * stored in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary
+ * because ccb->cpi.maxio is not supported for FreeBSD version 7.4 or
+ * earlier.
+ */
+static void mrsas_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ {
+ device_id = ccb_h->target_id;
+
+ /*
+ * bus 0 is LD, bus 1 is for system-PD
+ */
+ if (cam_sim_bus(sim) == 1 &&
+ sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
+ ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
+ xpt_done(ccb);
+ }
+ else {
+ if (mrsas_startio(sc, sim, ccb)){
+ ccb->ccb_h.status |= CAM_REQ_INVALID;
+ xpt_done(ccb);
+ }
+ }
+ break;
+ }
+ case XPT_ABORT:
+ {
+ ccb->ccb_h.status = CAM_UA_ABORT;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_RESET_BUS:
+ {
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_GET_TRAN_SETTINGS:
+ {
+ ccb->cts.protocol = PROTO_SCSI;
+ ccb->cts.protocol_version = SCSI_REV_2;
+ ccb->cts.transport = XPORT_SPI;
+ ccb->cts.transport_version = 2;
+ ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
+ ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
+ ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
+ ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_SET_TRAN_SETTINGS:
+ {
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ {
+ cam_calc_geometry(&ccb->ccg, 1);
+ xpt_done(ccb);
+ break;
+ }
+ case XPT_PATH_INQ:
+ {
+ ccb->cpi.version_num = 1;
+ ccb->cpi.hba_inquiry = 0;
+ ccb->cpi.target_sprt = 0;
+ ccb->cpi.hba_misc = 0;
+ ccb->cpi.hba_eng_cnt = 0;
+ ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
+ ccb->cpi.unit_number = cam_sim_unit(sim);
+ ccb->cpi.bus_id = cam_sim_bus(sim);
+ ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
+ ccb->cpi.base_transfer_speed = 150000;
+ strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
+ strncpy(ccb->cpi.hba_vid, "LSI", HBA_IDLEN);
+ strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
+ ccb->cpi.transport = XPORT_SPI;
+ ccb->cpi.transport_version = 2;
+ ccb->cpi.protocol = PROTO_SCSI;
+ ccb->cpi.protocol_version = SCSI_REV_2;
+ if (ccb->cpi.bus_id == 0)
+ ccb->cpi.max_target = MRSAS_MAX_LD-1;
+ else
+ ccb->cpi.max_target = MRSAS_MAX_PD-1;
+#if (__FreeBSD_version > 704000)
+ ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
+#endif
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ break;
+ }
+ default:
+ {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ break;
+ }
+ }
+}
+
+/**
+ * mrsas_scsiio_timeout Callback function for IO timed out
+ * input: mpt command context
+ *
+ * This function will execute after timeout value
+ * provided by ccb header from CAM layer, if timer expires.
+ * Driver will run timer for all DCDM and LDIO comming from CAM layer.
+ * This function is callback function for IO timeout and it runs in
+ * no-sleep context. Set do_timedout_reset in Adapter context so that
+ * it will execute OCR/Kill adpter from ocr_thread context.
+ */
+static void
+mrsas_scsiio_timeout(void *data)
+{
+ struct mrsas_mpt_cmd *cmd;
+ struct mrsas_softc *sc;
+
+ cmd = (struct mrsas_mpt_cmd *)data;
+ sc = cmd->sc;
+
+ if (cmd->ccb_ptr == NULL) {
+ printf("command timeout with NULL ccb\n");
+ return;
+ }
+
+ /* Below callout is dummy entry so that it will be
+ * cancelled from mrsas_cmd_done(). Now Controller will
+ * go to OCR/Kill Adapter based on OCR enable/disable
+ * property of Controller from ocr_thread context.
+ */
+ callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
+ mrsas_scsiio_timeout, cmd);
+ sc->do_timedout_reset = 1;
+ if(sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
+}
+
+/**
+ * mrsas_startio: SCSI IO entry point
+ * input: Adapter instance soft state
+ * pointer to CAM Control Block
+ *
+ * This function is the SCSI IO entry point and it initiates IO processing.
+ * It copies the IO and depending if the IO is read/write or inquiry, it would
+ * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns
+ * 0 if the command is sent to firmware successfully, otherwise it returns 1.
+ */
+static int32_t mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
+ union ccb *ccb)
+{
+ struct mrsas_mpt_cmd *cmd;
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE){
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done(ccb);
+ return(0);
+ }
+
+ ccb_h->status |= CAM_SIM_QUEUED;
+ cmd = mrsas_get_mpt_cmd(sc);
+
+ if (!cmd) {
+ ccb_h->status |= CAM_REQUEUE_REQ;
+ xpt_done(ccb);
+ return(0);
+ }
+
+ if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
+ if(ccb_h->flags & CAM_DIR_IN)
+ cmd->flags |= MRSAS_DIR_IN;
+ if(ccb_h->flags & CAM_DIR_OUT)
+ cmd->flags |= MRSAS_DIR_OUT;
+ }
+ else
+ cmd->flags = MRSAS_DIR_NONE; /* no data */
+
+/* For FreeBSD 10.0 and higher */
+#if (__FreeBSD_version >= 1000000)
+/*
+ * * XXX We don't yet support physical addresses here.
+ */
+ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
+ case CAM_DATA_PADDR:
+ case CAM_DATA_SG_PADDR:
+ printf("%s: physical addresses not supported\n",
+ __func__);
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ ccb_h->status &= ~CAM_SIM_QUEUED;
+ goto done;
+ case CAM_DATA_SG:
+ printf("%s: scatter gather is not supported\n",
+ __func__);
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ goto done;
+ case CAM_DATA_VADDR:
+ if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
+ cmd->length = csio->dxfer_len;
+ if (cmd->length)
+ cmd->data = csio->data_ptr;
+ break;
+ default:
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ goto done;
+ }
+#else
+ if (!(ccb_h->flags & CAM_DATA_PHYS)) { //Virtual data address
+ if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
+ if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
+ cmd->length = csio->dxfer_len;
+ if (cmd->length)
+ cmd->data = csio->data_ptr;
+ }
+ else {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ goto done;
+ }
+ }
+ else { //Data addresses are physical.
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_INVALID;
+ ccb_h->status &= ~CAM_SIM_QUEUED;
+ goto done;
+ }
+#endif
+ /* save ccb ptr */
+ cmd->ccb_ptr = ccb;
+
+ req_desc = mrsas_get_request_desc(sc, (cmd->index)-1);
+ if (!req_desc) {
+ device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
+ return (FAIL);
+ }
+ memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
+ cmd->request_desc = req_desc;
+
+ if (ccb_h->flags & CAM_CDB_POINTER)
+ bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
+ else
+ bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
+ mtx_lock(&sc->raidmap_lock);
+
+ if (mrsas_ldio_inq(sim, ccb)) {
+ if (mrsas_build_ldio(sc, cmd, ccb)){
+ device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return(1);
+ }
+ }
+ else {
+ if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
+ device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return(1);
+ }
+ }
+ mtx_unlock(&sc->raidmap_lock);
+
+ if (cmd->flags == MRSAS_DIR_IN) //from device
+ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+ else if (cmd->flags == MRSAS_DIR_OUT) //to device
+ cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+
+ cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+ cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/4;
+ cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+ cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
+
+ req_desc = cmd->request_desc;
+ req_desc->SCSIIO.SMID = cmd->index;
+
+ /*
+ * Start timer for IO timeout. Default timeout value is 90 second.
+ */
+ callout_reset(&cmd->cm_callout, (sc->mrsas_io_timeout * hz) / 1000,
+ mrsas_scsiio_timeout, cmd);
+ atomic_inc(&sc->fw_outstanding);
+
+ if(atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
+ sc->io_cmds_highwater++;
+
+ mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
+ return(0);
+
+done:
+ xpt_done(ccb);
+ return(0);
+}
+
+/**
+ * mrsas_ldio_inq: Determines if IO is read/write or inquiry
+ * input: pointer to CAM Control Block
+ *
+ * This function determines if the IO is read/write or inquiry. It returns a
+ * 1 if the IO is read/write and 0 if it is inquiry.
+ */
+int mrsas_ldio_inq(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ccb_scsiio *csio = &(ccb->csio);
+
+ if (cam_sim_bus(sim) == 1)
+ return(0);
+
+ switch (csio->cdb_io.cdb_bytes[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * mrsas_get_mpt_cmd: Get a cmd from free command pool
+ * input: Adapter instance soft state
+ *
+ * This function removes an MPT command from the command free list and
+ * initializes it.
+ */
+struct mrsas_mpt_cmd* mrsas_get_mpt_cmd(struct mrsas_softc *sc)
+{
+ struct mrsas_mpt_cmd *cmd = NULL;
+
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)){
+ cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
+ TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
+ }
+ memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+ cmd->data = NULL;
+ cmd->length = 0;
+ cmd->flags = 0;
+ cmd->error_code = 0;
+ cmd->load_balance = 0;
+ cmd->ccb_ptr = NULL;
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
+
+ return cmd;
+}
+
+/**
+ * mrsas_release_mpt_cmd: Return a cmd to free command pool
+ * input: Command packet for return to free command pool
+ *
+ * This function returns an MPT command to the free command list.
+ */
+void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
+{
+ struct mrsas_softc *sc = cmd->sc;
+
+ mtx_lock(&sc->mpt_cmd_pool_lock);
+ cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
+
+ return;
+}
+
+/**
+ * mrsas_get_request_desc: Get request descriptor from array
+ * input: Adapter instance soft state
+ * SMID index
+ *
+ * This function returns a pointer to the request descriptor.
+ */
+MRSAS_REQUEST_DESCRIPTOR_UNION *
+mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
+{
+ u_int8_t *p;
+
+ if (index >= sc->max_fw_cmds) {
+ device_printf(sc->mrsas_dev, "Invalid SMID (0x%x)request for desc\n", index);
+ return NULL;
+ }
+ p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
+
+ return (MRSAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+/**
+ * mrsas_build_ldio: Builds an LDIO command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the LDIO command packet. It returns 0 if the
+ * command is built successfully, otherwise it returns a 1.
+ */
+int mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ u_int32_t device_id;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+
+ device_id = ccb_h->target_id;
+
+ io_request = cmd->io_request;
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.status = 0;
+ io_request->RaidContext.exStatus = 0;
+
+ /* just the cdb len, other flags zero, and ORed-in later for FP */
+ io_request->IoFlags = csio->cdb_len;
+
+ if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
+ device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
+
+ io_request->DataLength = cmd->length;
+
+ if (mrsas_map_request(sc, cmd) == SUCCESS) {
+ if (cmd->sge_count > MRSAS_MAX_SGL) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (FAIL);
+ }
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ }
+ else {
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return(FAIL);
+ }
+ return(0);
+}
+
+/**
+ * mrsas_setup_io: Set up data including Fast Path I/O
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the DCDB inquiry command. It returns 0 if the
+ * command is built successfully, otherwise it returns a 1.
+ */
+int mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, u_int32_t device_id,
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ struct ccb_scsiio *csio = &(ccb->csio);
+ struct IO_REQUEST_INFO io_info;
+ MR_FW_RAID_MAP_ALL *map_ptr;
+ u_int8_t fp_possible;
+ u_int32_t start_lba_hi, start_lba_lo, ld_block_size;
+ u_int32_t datalength = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+ fp_possible = 0;
+
+ /*
+ * READ_6 (0x08) or WRITE_6 (0x0A) cdb
+ */
+ if (csio->cdb_len == 6) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[1] << 16) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 8) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[3];
+ start_lba_lo &= 0x1FFFFF;
+ }
+ /*
+ * READ_10 (0x28) or WRITE_6 (0x2A) cdb
+ */
+ else if (csio->cdb_len == 10) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
+ }
+ /*
+ * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
+ */
+ else if (csio->cdb_len == 12) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
+ }
+ /*
+ * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
+ */
+ else if (csio->cdb_len == 16) {
+ datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
+ ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
+ start_lba_lo = ((u_int32_t) csio->cdb_io.cdb_bytes[6] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[7] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[8] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[9]);
+ start_lba_hi = ((u_int32_t) csio->cdb_io.cdb_bytes[2] << 24) |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[3] << 16) |
+ (u_int32_t) csio->cdb_io.cdb_bytes[4] << 8 |
+ ((u_int32_t) csio->cdb_io.cdb_bytes[5]);
+ }
+
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = datalength;
+ io_info.ldTgtId = device_id;
+
+ switch (ccb_h->flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ io_info.isRead = 1;
+ break;
+ case CAM_DIR_OUT:
+ io_info.isRead = 0;
+ break;
+ case CAM_DIR_NONE:
+ default:
+ mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
+ break;
+ }
+
+ map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
+ ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc);
+
+ if ((MR_TargetIdToLdGet(device_id, map_ptr) >= MAX_LOGICAL_DRIVES) ||
+ (!sc->fast_path_io)) {
+ io_request->RaidContext.regLockFlags = 0;
+ fp_possible = 0;
+ }
+ else
+ {
+ if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext, map_ptr))
+ fp_possible = io_info.fpOkForIo;
+ }
+
+ if (fp_possible) {
+ mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
+ start_lba_lo, ld_block_size);
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
+ if ((sc->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) {
+ io_info.devHandle = mrsas_get_updated_dev_handle(&sc->load_balance_info[device_id],
+ &io_info);
+ cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
+ }
+ else
+ cmd->load_balance = 0;
+ cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+ io_request->DevHandle = io_info.devHandle;
+ }
+ else {
+ /* Not FP IO */
+ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ }
+ return(0);
+}
+
+/**
+ * mrsas_build_dcdb: Builds an DCDB command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the DCDB inquiry command. It returns 0 if the
+ * command is built successfully, otherwise it returns a 1.
+ */
+int mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+ MR_FW_RAID_MAP_ALL *map_ptr;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+
+ io_request = cmd->io_request;
+ device_id = ccb_h->target_id;
+ map_ptr = sc->raidmap_mem[(sc->map_id & 1)];
+
+ /* Check if this is for system PD */
+ if (cam_sim_bus(sim) == 1 &&
+ sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
+ io_request->Function = 0;
+ io_request->DevHandle = map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+ io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
+ MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle =
+ map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ }
+ else {
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->LUN[1] = ccb_h->target_lun & 0xF;
+ io_request->DataLength = cmd->length;
+
+ if (mrsas_map_request(sc, cmd) == SUCCESS) {
+ if (cmd->sge_count > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (1);
+ }
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ }
+ else {
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return(1);
+ }
+ return(0);
+}
+
+/**
+ * mrsas_map_request: Map and load data
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ *
+ * For data from OS, map and load the data buffer into bus space. The
+ * SG list is built in the callback. If the bus dmamap load is not
+ * successful, cmd->error_code will contain the error code and a 1 is
+ * returned.
+ */
+int mrsas_map_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+{
+ u_int32_t retcode = 0;
+ struct cam_sim *sim;
+ int flag = BUS_DMA_NOWAIT;
+
+ sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
+
+ if (cmd->data != NULL) {
+ mtx_lock(&sc->io_lock);
+ /* Map data buffer into bus space */
+ retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
+ cmd->length, mrsas_data_load_cb, cmd, flag);
+ mtx_unlock(&sc->io_lock);
+ if (retcode)
+ device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
+ if (retcode == EINPROGRESS) {
+ device_printf(sc->mrsas_dev, "request load in progress\n");
+ mrsas_freeze_simq(cmd, sim);
+ }
+ }
+ if (cmd->error_code)
+ return(1);
+ return(retcode);
+}
+
+/**
+ * mrsas_unmap_request: Unmap and unload data
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ *
+ * This function unmaps and unloads data from OS.
+ */
+void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+{
+ if (cmd->data != NULL) {
+ if (cmd->flags & MRSAS_DIR_IN)
+ bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
+ if (cmd->flags & MRSAS_DIR_OUT)
+ bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
+ mtx_lock(&sc->io_lock);
+ bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
+ mtx_unlock(&sc->io_lock);
+ }
+}
+
+/**
+ * mrsas_data_load_cb: Callback entry point
+ * input: Pointer to command packet as argument
+ * Pointer to segment
+ * Number of segments
+ * Error
+ *
+ * This is the callback function of the bus dma map load. It builds
+ * the SG list.
+ */
+static void
+mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
+ struct mrsas_softc *sc = cmd->sc;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ pMpi25IeeeSgeChain64_t sgl_ptr;
+ int i=0, sg_processed=0;
+
+ if (error)
+ {
+ cmd->error_code = error;
+ device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
+ if (error == EFBIG) {
+ cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
+ return;
+ }
+ }
+
+ if (cmd->flags & MRSAS_DIR_IN)
+ bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+ BUS_DMASYNC_PREREAD);
+ if (cmd->flags & MRSAS_DIR_OUT)
+ bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+ BUS_DMASYNC_PREWRITE);
+ if (nseg > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
+ return;
+ }
+
+ io_request = cmd->io_request;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
+ sgl_ptr_end += sc->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ if (nseg != 0) {
+ for (i=0; i < nseg; i++) {
+ sgl_ptr->Address = segs[i].ds_addr;
+ sgl_ptr->Length = segs[i].ds_len;
+ sgl_ptr->Flags = 0;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if (i == nseg - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
+ sgl_ptr++;
+ sg_processed = i + 1;
+ /*
+ * Prepare chain element
+ */
+ if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
+ (nseg > sc->max_sge_in_main_msg)) {
+ pMpi25IeeeSgeChain64_t sg_chain;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset = sc->chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset = sc->chain_offset_io_request;
+ sg_chain = sgl_ptr;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
+ sg_chain->Address = cmd->chain_frame_phys_addr;
+ sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
+ }
+ }
+ }
+ cmd->sge_count = nseg;
+}
+
+/**
+ * mrsas_freeze_simq: Freeze SIM queue
+ * input: Pointer to command packet
+ * Pointer to SIM
+ *
+ * This function freezes the sim queue.
+ */
+static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
+{
+ union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
+
+ xpt_freeze_simq(sim, 1);
+ ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+}
+
+void mrsas_xpt_freeze(struct mrsas_softc *sc) {
+ xpt_freeze_simq(sc->sim_0, 1);
+ xpt_freeze_simq(sc->sim_1, 1);
+}
+
+void mrsas_xpt_release(struct mrsas_softc *sc) {
+ xpt_release_simq(sc->sim_0, 1);
+ xpt_release_simq(sc->sim_1, 1);
+}
+
+/**
+ * mrsas_cmd_done: Perform remaining command completion
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ *
+ * This function calls ummap request and releases the MPT command.
+ */
+void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
+{
+ callout_stop(&cmd->cm_callout);
+ mrsas_unmap_request(sc, cmd);
+ mtx_lock(&sc->sim_lock);
+ xpt_done(cmd->ccb_ptr);
+ cmd->ccb_ptr = NULL;
+ mtx_unlock(&sc->sim_lock);
+ mrsas_release_mpt_cmd(cmd);
+}
+
+/**
+ * mrsas_poll: Polling entry point
+ * input: Pointer to SIM
+ *
+ * This is currently a stub function.
+ */
+static void mrsas_poll(struct cam_sim *sim)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
+ mrsas_isr((void *) sc);
+}
+
+/*
+ * mrsas_bus_scan: Perform bus scan
+ * input: Adapter instance soft state
+ *
+ * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should
+ * not be called in FreeBSD 8.x and later versions, where the bus scan is
+ * automatic.
+ */
+int mrsas_bus_scan(struct mrsas_softc *sc)
+{
+ union ccb *ccb_0;
+ union ccb *ccb_1;
+
+ mtx_lock(&sc->sim_lock);
+ if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
+ mtx_unlock(&sc->sim_lock);
+ return(ENOMEM);
+ }
+
+ if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
+ xpt_free_ccb(ccb_0);
+ mtx_unlock(&sc->sim_lock);
+ return(ENOMEM);
+ }
+
+ if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
+ xpt_free_ccb(ccb_0);
+ xpt_free_ccb(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+ return(EIO);
+ }
+
+ if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
+ xpt_free_ccb(ccb_0);
+ xpt_free_ccb(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+ return(EIO);
+ }
+
+ xpt_rescan(ccb_0);
+ xpt_rescan(ccb_1);
+ mtx_unlock(&sc->sim_lock);
+
+ return(0);
+}
+
+/*
+ * mrsas_bus_scan_sim: Perform bus scan per SIM
+ * input: Adapter instance soft state
+ * This function will be called from Event handler
+ * on LD creation/deletion, JBOD on/off.
+ */
+int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
+{
+ union ccb *ccb;
+
+ mtx_lock(&sc->sim_lock);
+ if ((ccb = xpt_alloc_ccb()) == NULL) {
+ mtx_unlock(&sc->sim_lock);
+ return(ENOMEM);
+ }
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
+ CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP){
+ xpt_free_ccb(ccb);
+ mtx_unlock(&sc->sim_lock);
+ return(EIO);
+ }
+ xpt_rescan(ccb);
+ mtx_unlock(&sc->sim_lock);
+
+ return(0);
+}
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
new file mode 100644
index 0000000000000..69a996f782410
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -0,0 +1,1451 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/mrsas/mrsas.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_periph.h>
+#include <cam/cam_xpt_periph.h>
+
+
+/*
+ * Function prototypes
+ */
+u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
+u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count);
+u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
+ struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
+u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context,
+ MR_FW_RAID_MAP_ALL *map);
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info);
+u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor);
+u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_FW_RAID_MAP_ALL *map, int *div_error);
+u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor);
+void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
+ PLD_LOAD_BALANCE_INFO lbInfo);
+void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request,
+ u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size);
+static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
+ MR_FW_RAID_MAP_ALL *map);
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map);
+static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm,
+ MR_FW_RAID_MAP_ALL *map);
+static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span,
+ MR_FW_RAID_MAP_ALL *map);
+static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx,
+ MR_FW_RAID_MAP_ALL *map);
+static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld,
+ MR_FW_RAID_MAP_ALL *map);
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
+
+/*
+ * Spanset related function prototypes
+ * Added for PRL11 configuration (Uneven span support)
+ */
+void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo);
+static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
+static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t strip, MR_FW_RAID_MAP_ALL *map);
+static u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_FW_RAID_MAP_ALL *map, int *div_error);
+static u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span,
+ u_int64_t stripe, MR_FW_RAID_MAP_ALL *map);
+
+
+/*
+ * Spanset related defines
+ * Added for PRL11 configuration(Uneven span support)
+ */
+#define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize
+#define SPAN_INVALID 0xff
+#define SPAN_DEBUG 0
+
+/*
+ * Related Defines
+ */
+
+typedef u_int64_t REGION_KEY;
+typedef u_int32_t REGION_LEN;
+
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
+
+
+/*
+ * Related Macros
+ */
+
+#define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
+
+#define swap32(x) \
+ ((unsigned int)( \
+ (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
+ (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
+ (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
+ (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
+
+
+/*
+ * In-line functions for mod and divide of 64-bit dividend and 32-bit divisor.
+ * Assumes a check for a divisor of zero is not possible.
+ *
+ * @param dividend : Dividend
+ * @param divisor : Divisor
+ * @return remainder
+ */
+
+#define mega_mod64(dividend, divisor) ({ \
+int remainder; \
+remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \
+remainder;})
+
+#define mega_div64_32(dividend, divisor) ({ \
+int quotient; \
+quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \
+quotient;})
+
+
+/*
+ * Various RAID map access functions. These functions access the various
+ * parts of the RAID map and returns the appropriate parameters.
+ */
+
+MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+{
+ return (&map->raidMap.ldSpanMap[ld].ldRaid);
+}
+
+u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+{
+ return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+}
+
+static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+}
+
+static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.arMapInfo[ar].pd[arm];
+}
+
+static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid;
+ u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+
+ /*
+ * Check if logical drive was removed.
+ */
+ if (ld >= MAX_LOGICAL_DRIVES)
+ return ldBlockSize;
+
+ raid = MR_LdRaidGet(ld, map);
+ ldBlockSize = raid->logicalBlockLength;
+ if (!ldBlockSize)
+ ldBlockSize = MRSAS_SCSIBLOCKSIZE;
+
+ return ldBlockSize;
+}
+
+/**
+ * MR_ValidateMapInfo: Validate RAID map
+ * input: Adapter instance soft state
+ *
+ * This function checks and validates the loaded RAID map. It returns 0 if
+ * successful, and 1 otherwise.
+ */
+u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc)
+{
+ if (!sc) {
+ return 1;
+ }
+ uint32_t total_map_sz;
+ MR_FW_RAID_MAP_ALL *map = sc->raidmap_mem[(sc->map_id & 1)];
+ MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) &sc->log_to_span;
+
+ total_map_sz = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP) +
+ (sizeof(MR_LD_SPAN_MAP) * pFwRaidMap->ldCount));
+
+ if (pFwRaidMap->totalSize != total_map_sz) {
+ device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", total_map_sz);
+ device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
+ device_printf(sc->mrsas_dev, "pFwRaidMap->totalSize=%x\n", pFwRaidMap->totalSize);
+ return 1;
+ }
+
+ if (sc->UnevenSpanSupport) {
+ mr_update_span_set(map, ldSpanInfo);
+ }
+
+ mrsas_update_load_balance_params(map, sc->load_balance_info);
+
+ return 0;
+}
+
+/*
+ * ******************************************************************************
+ *
+ * Function to print info about span set created in driver from FW raid map
+ *
+ * Inputs :
+ * map - LD map
+ * ldSpanInfo - ldSpanInfo per HBA instance
+ *
+ *
+ * */
+#if SPAN_DEBUG
+static int getSpanInfo(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+
+ u_int8_t span;
+ u_int32_t element;
+ MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u_int16_t ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
+ {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ continue;
+ }
+ raid = MR_LdRaidGet(ld, map);
+ printf("LD %x: span_depth=%x\n", ld, raid->spanDepth);
+ for (span=0; span<raid->spanDepth; span++)
+ printf("Span=%x, number of quads=%x\n", span,
+ map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements);
+ for (element=0; element < MAX_QUAD_DEPTH; element++) {
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ if (span_set->span_row_data_width == 0) break;
+
+ printf(" Span Set %x: width=%x, diff=%x\n", element,
+ (unsigned int)span_set->span_row_data_width,
+ (unsigned int)span_set->diff);
+ printf(" logical LBA start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->log_start_lba,
+ (long unsigned int)span_set->log_end_lba);
+ printf(" span row start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->span_row_start,
+ (long unsigned int)span_set->span_row_end);
+ printf(" data row start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->data_row_start,
+ (long unsigned int)span_set->data_row_end);
+ printf(" data strip start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->data_strip_start,
+ (long unsigned int)span_set->data_strip_end);
+
+ for (span=0; span<raid->spanDepth; span++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=element+1){
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+ printf(" Span=%x, Quad=%x, diff=%x\n", span,
+ element, quad->diff);
+ printf(" offset_in_span=0x%08lx\n",
+ (long unsigned int)quad->offsetInSpan);
+ printf(" logical start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)quad->logStart,
+ (long unsigned int)quad->logEnd);
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+* div_error - Devide error code.
+*/
+
+u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row,
+ u_int64_t *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ u_int32_t span, info;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+
+ for (info=0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (row > span_set->data_row_end) continue;
+
+ for (span=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].
+ block_span_info.quad[info];
+ if (quad->diff == 0) {
+ *div_error = 1;
+ return span;
+ }
+ if ( quad->logStart <= row &&
+ row <= quad->logEnd &&
+ (mega_mod64(row - quad->logStart,
+ quad->diff)) == 0 ) {
+ if (span_blk != NULL) {
+ u_int64_t blk;
+ blk = mega_div64_32
+ ((row - quad->logStart),
+ quad->diff);
+ blk = (blk + quad->offsetInSpan)
+ << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* Strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* row - row associated with strip
+*/
+
+static u_int64_t get_row_from_strip(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t info, strip_offset, span, span_offset;
+ u_int64_t span_set_Strip, span_set_Row;
+
+ for (info=0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (strip > span_set->data_strip_end) continue;
+
+ span_set_Strip = strip - span_set->data_strip_start;
+ strip_offset = mega_mod64(span_set_Strip,
+ span_set->span_row_data_width);
+ span_set_Row = mega_div64_32(span_set_Strip,
+ span_set->span_row_data_width) * span_set->diff;
+ for (span=0,span_offset=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset++;
+ else
+ break;
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx "
+ "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip,
+ (unsigned long long)span_set_Strip,
+ (unsigned long long)span_set_Row,
+ (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset);
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip,
+ (unsigned long long) span_set->data_row_start +
+ (unsigned long long) span_set_Row + (span_offset - 1));
+ return (span_set->data_row_start + span_set_Row + (span_offset - 1));
+ }
+ return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* Strip - Start strip associated with row
+*/
+
+static u_int64_t get_strip_from_row(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t row, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ MR_QUAD_ELEMENT *quad;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t span, info;
+ u_int64_t strip;
+
+ for (info=0; info<MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (row > span_set->data_row_end) continue;
+
+ for (span=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[info];
+ if ( quad->logStart <= row &&
+ row <= quad->logEnd &&
+ mega_mod64((row - quad->logStart),
+ quad->diff) == 0 ) {
+ strip = mega_div64_32
+ (((row - span_set->data_row_start)
+ - quad->logStart),
+ quad->diff);
+ strip *= span_set->span_row_data_width;
+ strip += span_set->data_strip_start;
+ strip += span_set->strip_offset[span];
+ return strip;
+ }
+ }
+ }
+ mrsas_dprint(sc, MRSAS_PRL11,"LSI Debug - get_strip_from_row: returns invalid "
+ "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row);
+ return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* Phys Arm - Phys Arm associated with strip
+*/
+
+static u_int32_t get_arm_from_strip(struct mrsas_softc *sc,
+ u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
+ u_int32_t info, strip_offset, span, span_offset;
+
+ for (info=0; info<MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0) break;
+ if (strip > span_set->data_strip_end) continue;
+
+ strip_offset = (u_int32_t)mega_mod64
+ ((strip - span_set->data_strip_start),
+ span_set->span_row_data_width);
+
+ for (span=0,span_offset=0; span<raid->spanDepth; span++)
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >=info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset =
+ span_set->strip_offset[span];
+ else
+ break;
+ }
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: "
+ " for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
+ (long unsigned int)strip, (strip_offset - span_offset));
+ return (strip_offset - span_offset);
+ }
+
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm"
+ " for ld=%x strip=%lx\n", ld, (long unsigned int)strip);
+
+ return -1;
+}
+
+
+/* This Function will return Phys arm */
+u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe,
+ MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ /* Need to check correct default value */
+ u_int32_t arm = 0;
+
+ switch (raid->level) {
+ case 0:
+ case 5:
+ case 6:
+ arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+ break;
+ case 1:
+ // start with logical arm
+ arm = get_arm_from_strip(sc, ld, stripe, map);
+ arm *= 2;
+ break;
+
+ }
+
+ return arm;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow,
+ u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t pd, arRef;
+ u_int8_t physArm, span;
+ u_int64_t row;
+ u_int8_t retval = TRUE;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ u_int16_t *pDevHandle = &io_info->devHandle;
+ u_int32_t logArm, rowMod, armQ, arm;
+ u_int8_t do_invader = 0;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ do_invader = 1;
+
+ // Get row and span from io_info for Uneven Span IO.
+ row = io_info->start_row;
+ span = io_info->start_span;
+
+
+ if (raid->level == 6) {
+ logArm = get_arm_from_strip(sc, ld, stripRow, map);
+ rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+ armQ = SPAN_ROW_SIZE(map,ld,span) - 1 - rowMod;
+ arm = armQ + 1 + logArm;
+ if (arm >= SPAN_ROW_SIZE(map, ld, span))
+ arm -= SPAN_ROW_SIZE(map ,ld ,span);
+ physArm = (u_int8_t)arm;
+ } else
+ // Calculate the arm
+ physArm = get_arm(sc, ld, span, stripRow, map);
+
+
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map);
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID;
+ if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ return retval;
+}
+
+/**
+* MR_BuildRaidContext: Set up Fast path RAID context
+*
+* This function will initiate command processing. The start/end row
+* and strip information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return
+* num strips.
+*/
+u_int8_t
+MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid;
+ u_int32_t ld, stripSize, stripe_mask;
+ u_int64_t endLba, endStrip, endRow, start_row, start_strip;
+ REGION_KEY regStart;
+ REGION_LEN regSize;
+ u_int8_t num_strips, numRows;
+ u_int16_t ref_in_start_stripe, ref_in_end_stripe;
+ u_int64_t ldStartBlock;
+ u_int32_t numBlocks, ldTgtId;
+ u_int8_t isRead, stripIdx;
+ u_int8_t retval = 0;
+ u_int8_t startlba_span = SPAN_INVALID;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ int error_code = 0;
+
+ ldStartBlock = io_info->ldStartBlock;
+ numBlocks = io_info->numBlocks;
+ ldTgtId = io_info->ldTgtId;
+ isRead = io_info->isRead;
+
+ io_info->IoforUnevenSpan = 0;
+ io_info->start_span = SPAN_INVALID;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+
+ /*
+ * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+ * return FALSE
+ */
+ if (raid->rowDataSize == 0) {
+ if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+ return FALSE;
+ else if (sc->UnevenSpanSupport) {
+ io_info->IoforUnevenSpan = 1;
+ }
+ else {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x,"
+ " but there is _NO_ UnevenSpanSupport\n",
+ MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+ return FALSE;
+ }
+ }
+ stripSize = 1 << raid->stripeShift;
+ stripe_mask = stripSize-1;
+ /*
+ * calculate starting row and stripe, and number of strips and rows
+ */
+ start_strip = ldStartBlock >> raid->stripeShift;
+ ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask);
+ endLba = ldStartBlock + numBlocks - 1;
+ ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask);
+ endStrip = endLba >> raid->stripeShift;
+ num_strips = (u_int8_t)(endStrip - start_strip + 1); // End strip
+ if (io_info->IoforUnevenSpan) {
+ start_row = get_row_from_strip(sc, ld, start_strip, map);
+ endRow = get_row_from_strip(sc, ld, endStrip, map);
+ if (raid->spanDepth == 1) {
+ startlba_span = 0;
+ *pdBlock = start_row << raid->stripeShift;
+ } else {
+ startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row,
+ pdBlock, map, &error_code);
+ if (error_code == 1) {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n",
+ __func__, __LINE__);
+ return FALSE;
+ }
+ }
+ if (startlba_span == SPAN_INVALID) {
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx,"
+ "start strip %llx endSrip %llx\n", __func__,
+ __LINE__, (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip);
+ return FALSE;
+ }
+ io_info->start_span = startlba_span;
+ io_info->start_row = start_row;
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, "
+ " start strip 0x%llx endSrip 0x%llx span 0x%x\n",
+ __func__, __LINE__, (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip, startlba_span);
+ mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n",
+ (unsigned long long)start_row, (unsigned long long)endRow, startlba_span);
+ } else {
+ start_row = mega_div64_32(start_strip, raid->rowDataSize); // Start Row
+ endRow = mega_div64_32(endStrip, raid->rowDataSize);
+ }
+
+ numRows = (u_int8_t)(endRow - start_row + 1); // get the row count
+
+ /*
+ * Calculate region info. (Assume region at start of first row, and
+ * assume this IO needs the full row - will adjust if not true.)
+ */
+ regStart = start_row << raid->stripeShift;
+ regSize = stripSize;
+
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpWriteAcrossStripe));
+ }
+ else
+ io_info->fpOkForIo = FALSE;
+
+ if (numRows == 1) {
+ if (num_strips == 1) {
+ /* single-strip IOs can always lock only the data needed,
+ multi-strip IOs always need to full stripe locked */
+ regStart += ref_in_start_stripe;
+ regSize = numBlocks;
+ }
+ }
+ else if (io_info->IoforUnevenSpan == 0){
+ // For Even span region lock optimization.
+ // If the start strip is the last in the start row
+ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+ regStart += ref_in_start_stripe;
+ // initialize count to sectors from startRef to end of strip
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ // add complete rows in the middle of the transfer
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ // if IO ends within first strip of last row
+ if (endStrip == endRow*raid->rowDataSize)
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ } else {
+ //For Uneven span region lock optimization.
+ // If the start strip is the last in the start row
+ if (start_strip == (get_strip_from_row(sc, ld, start_row, map) +
+ SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+ regStart += ref_in_start_stripe;
+ // initialize count to sectors from startRef to end of strip
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ // add complete rows in the middle of the transfer
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ // if IO ends within first strip of last row
+ if (endStrip == get_strip_from_row(sc, ld, endRow, map))
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ }
+ pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ pRAID_Context->regLockFlags = (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead)? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+ pRAID_Context->VirtualDiskTgtId = raid->targetId;
+ pRAID_Context->regLockRowLBA = regStart;
+ pRAID_Context->regLockLength = regSize;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /*
+ * Get Phy Params only if FP capable, or else leave it to MR firmware
+ * to do the calculation.
+ */
+ if (io_info->fpOkForIo) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(sc, ld,
+ start_strip, ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(sc, ld, start_strip,
+ ref_in_start_stripe, io_info, pRAID_Context, map);
+ /* If IO on an invalid Pd, then FP is not possible */
+ if (io_info->devHandle == MR_PD_INVALID)
+ io_info->fpOkForIo = FALSE;
+ return retval;
+ }
+ else if (isRead) {
+ for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(sc, ld,
+ start_strip + stripIdx,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(sc, ld,
+ start_strip + stripIdx, ref_in_start_stripe,
+ io_info, pRAID_Context, map);
+ if (!retval)
+ return TRUE;
+ }
+ }
+#if SPAN_DEBUG
+ // Just for testing what arm we get for strip.
+ get_arm_from_strip(sc, ld, start_strip, map);
+#endif
+ return TRUE;
+}
+
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+ u_int8_t span,count;
+ u_int32_t element,span_row_width;
+ u_int64_t span_row;
+ MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set, *span_set_prev;
+ MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u_int16_t ld;
+
+ if (!ldSpanInfo)
+ return;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
+ {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES)
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element=0; element < MAX_QUAD_DEPTH; element++) {
+ for (span=0; span < raid->spanDepth; span++) {
+ if (map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements < element+1)
+ continue;
+ // TO-DO
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+
+ span_set->diff = quad->diff;
+
+ for (count=0,span_row_width=0;
+ count<raid->spanDepth; count++) {
+ if (map->raidMap.ldSpanMap[ld].
+ spanBlock[count].
+ block_span_info.
+ noElements >=element+1) {
+ span_set->strip_offset[count] =
+ span_row_width;
+ span_row_width +=
+ MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize;
+#if SPAN_DEBUG
+ printf("LSI Debug span %x rowDataSize %x\n",
+ count, MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize);
+#endif
+ }
+ }
+
+ span_set->span_row_data_width = span_row_width;
+ span_row = mega_div64_32(((quad->logEnd -
+ quad->logStart) + quad->diff), quad->diff);
+
+ if (element == 0) {
+ span_set->log_start_lba = 0;
+ span_set->log_end_lba =
+ ((span_row << raid->stripeShift) * span_row_width) - 1;
+
+ span_set->span_row_start = 0;
+ span_set->span_row_end = span_row - 1;
+
+ span_set->data_strip_start = 0;
+ span_set->data_strip_end =
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = 0;
+ span_set->data_row_end =
+ (span_row * quad->diff) - 1;
+ } else {
+ span_set_prev = &(ldSpanInfo[ld].
+ span_set[element - 1]);
+ span_set->log_start_lba =
+ span_set_prev->log_end_lba + 1;
+ span_set->log_end_lba =
+ span_set->log_start_lba +
+ ((span_row << raid->stripeShift) * span_row_width) - 1;
+
+ span_set->span_row_start =
+ span_set_prev->span_row_end + 1;
+ span_set->span_row_end =
+ span_set->span_row_start + span_row - 1;
+
+ span_set->data_strip_start =
+ span_set_prev->data_strip_end + 1;
+ span_set->data_strip_end =
+ span_set->data_strip_start +
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start =
+ span_set_prev->data_row_end + 1;
+ span_set->data_row_end =
+ span_set->data_row_start +
+ (span_row * quad->diff) - 1;
+ }
+ break;
+ }
+ if (span == raid->spanDepth) break; // no quads remain
+ }
+ }
+#if SPAN_DEBUG
+ getSpanInfo(map, ldSpanInfo); //to get span set info
+#endif
+}
+
+/**
+ * mrsas_update_load_balance_params: Update load balance parmas
+ * Inputs: map pointer
+ * Load balance info
+ * io_info pointer
+ *
+ * This function updates the load balance parameters for the LD config
+ * of a two drive optimal RAID-1.
+ */
+void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
+ PLD_LOAD_BALANCE_INFO lbInfo)
+{
+ int ldCount;
+ u_int16_t ld;
+ u_int32_t pd, arRef;
+ MR_LD_RAID *raid;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
+ {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+
+ raid = MR_LdRaidGet(ld, map);
+
+ /* Two drive Optimal RAID 1 */
+ if ((raid->level == 1) && (raid->rowSize == 2) &&
+ (raid->spanDepth == 1)
+ && raid->ldState == MR_LD_STATE_OPTIMAL) {
+ lbInfo[ldCount].loadBalanceFlag = 1;
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, 0, map);
+
+ /* Get the PD */
+ pd = MR_ArPdGet(arRef, 0, map);
+ /* Get dev handle from PD */
+ lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map);
+ pd = MR_ArPdGet(arRef, 1, map);
+ lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map);
+ }
+ else
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ }
+}
+
+
+/**
+ * mrsas_set_pd_lba: Sets PD LBA
+ * input: io_request pointer
+ * CDB length
+ * io_info pointer
+ * Pointer to CCB
+ * Local RAID map pointer
+ * Start block of IO
+ * Block Size
+ *
+ * Used to set the PD logical block address in CDB for FP IOs.
+ */
+void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
+ struct IO_REQUEST_INFO *io_info, union ccb *ccb,
+ MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
+ u_int32_t ld_block_size)
+{
+ MR_LD_RAID *raid;
+ u_int32_t ld;
+ u_int64_t start_blk = io_info->pdBlock;
+ u_int8_t *cdb = io_request->CDB.CDB32;
+ u_int32_t num_blocks = io_info->numBlocks;
+ u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+
+ /* Check if T10 PI (DIF) is enabled for this LD */
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
+ cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
+
+ if (ccb_h->flags == CAM_DIR_OUT)
+ cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
+ else
+ cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
+ cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL;
+
+ /* LBA */
+ cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff);
+ cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff);
+ cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff);
+ cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff);
+ cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff);
+ cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[19] = (u_int8_t)(start_blk & 0xff);
+
+ /* Logical block reference tag */
+ io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag);
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
+ io_request->IoFlags = 32; /* Specify 32-byte cdb */
+
+ /* Transfer length */
+ cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff);
+ cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff);
+ cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff);
+ cdb[31] = (u_int8_t)(num_blocks & 0xff);
+
+ /* set SCSI IO EEDP Flags */
+ if (ccb_h->flags == CAM_DIR_OUT) {
+ io_request->EEDPFlags =
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ }
+ else {
+ io_request->EEDPFlags =
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ }
+ io_request->Control |= (0x4 << 26);
+ io_request->EEDPBlockSize = ld_block_size;
+ }
+ else {
+ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
+ if (((cdb_len == 12) || (cdb_len == 16)) &&
+ (start_blk <= 0xffffffff)) {
+ if (cdb_len == 16) {
+ opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[14];
+ control = cdb[15];
+ }
+ else {
+ opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[6] = groupnum;
+ cdb[9] = control;
+
+ /* Transfer length */
+ cdb[8] = (u_int8_t)(num_blocks & 0xff);
+ cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
+
+ io_request->IoFlags = 10; /* Specify 10-byte cdb */
+ cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u_int8_t)(num_blocks & 0xff);
+ cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = 16; /* Specify 16-byte cdb */
+ cdb_len = 16;
+ } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
+ /* convert to 10 byte CDB */
+ opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
+ control = cdb[5];
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = opcode;
+ cdb[9] = control;
+
+ /* Set transfer length */
+ cdb[8] = (u_int8_t)(num_blocks & 0xff);
+ cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
+
+ /* Specify 10-byte cdb */
+ cdb_len = 10;
+ }
+
+ /* Fall through normal case, just load LBA here */
+ switch (cdb_len)
+ {
+ case 6:
+ {
+ u_int8_t val = cdb[1] & 0xE0;
+ cdb[3] = (u_int8_t)(start_blk & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f);
+ break;
+ }
+ case 10:
+ cdb[5] = (u_int8_t)(start_blk & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
+ break;
+ case 12:
+ cdb[5] = (u_int8_t)(start_blk & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
+ break;
+ case 16:
+ cdb[9] = (u_int8_t)(start_blk & 0xff);
+ cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
+ cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff);
+ cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff);
+ cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff);
+ cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff);
+ cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff);
+ cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff);
+ break;
+ }
+ }
+}
+
+/**
+ * mrsas_get_best_arm Determine the best spindle arm
+ * Inputs: Load balance info
+ *
+ * This function determines and returns the best arm by looking at the
+ * parameters of the last PD access.
+ */
+u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
+ u_int64_t block, u_int32_t count)
+{
+ u_int16_t pend0, pend1;
+ u_int64_t diff0, diff1;
+ u_int8_t bestArm;
+
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
+ bestArm = (diff0 <= diff1 ? 0 : 1);
+
+ if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
+ bestArm ^= 1;
+
+ /* Update the last accessed block on the correct pd */
+ lbInfo->last_accessed_block[bestArm] = block + count - 1;
+
+ return bestArm;
+}
+
+/**
+ * mrsas_get_updated_dev_handle Get the update dev handle
+ * Inputs: Load balance info
+ * io_info pointer
+ *
+ * This function determines and returns the updated dev handle.
+ */
+u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
+ struct IO_REQUEST_INFO *io_info)
+{
+ u_int8_t arm, old_arm;
+ u_int16_t devHandle;
+
+ old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
+
+ /* get best new arm */
+ arm = mrsas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
+ devHandle = lbInfo->raid1DevHandle[arm];
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+
+ return devHandle;
+}
+
+/**
+ * MR_GetPhyParams Calculates arm, span, and block
+ * Inputs: Adapter instance soft state
+ * Logical drive number (LD)
+ * Stripe number (stripRow)
+ * Reference in stripe (stripRef)
+ * Outputs: Span number
+ * Absolute Block number in the physical disk
+ *
+ * This routine calculates the arm, span and block for the specified stripe
+ * and reference in stripe.
+ */
+u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
+ u_int64_t stripRow,
+ u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
+ RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
+{
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t pd, arRef;
+ u_int8_t physArm, span;
+ u_int64_t row;
+ u_int8_t retval = TRUE;
+ int error_code = 0;
+ u_int64_t *pdBlock = &io_info->pdBlock;
+ u_int16_t *pDevHandle = &io_info->devHandle;
+ u_int32_t rowMod, armQ, arm, logArm;
+ u_int8_t do_invader = 0;
+
+ if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ do_invader = 1;
+
+ row = mega_div64_32(stripRow, raid->rowDataSize);
+
+ if (raid->level == 6) {
+ logArm = mega_mod64(stripRow, raid->rowDataSize); // logical arm within row
+ if (raid->rowSize == 0)
+ return FALSE;
+ rowMod = mega_mod64(row, raid->rowSize); // get logical row mod
+ armQ = raid->rowSize-1-rowMod; // index of Q drive
+ arm = armQ+1+logArm; // data always logically follows Q
+ if (arm >= raid->rowSize) // handle wrap condition
+ arm -= raid->rowSize;
+ physArm = (u_int8_t)arm;
+ }
+ else {
+ if (raid->modFactor == 0)
+ return FALSE;
+ physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map);
+ }
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ *pdBlock = row << raid->stripeShift;
+ }
+ else {
+ span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
+ if (error_code == 1)
+ return FALSE;
+ }
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+
+ pd = MR_ArPdGet(arRef, physArm, map); // Get the Pd.
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
+ else {
+ *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
+ if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);//Get dev handle from Pd.
+ }
+ }
+
+ *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+ return retval;
+}
+
+/**
+ * MR_GetSpanBlock Calculates span block
+ * Inputs: LD
+ * row
+ * PD span block
+ * RAID map pointer
+ * Outputs: Span number
+ * Error code
+ *
+ * This routine calculates the span from the span block info.
+ */
+u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
+ MR_FW_RAID_MAP_ALL *map, int *div_error)
+{
+ MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+ MR_QUAD_ELEMENT *quad;
+ MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u_int32_t span, j;
+ u_int64_t blk, debugBlk;
+
+ for (span=0; span < raid->spanDepth; span++, pSpanBlock++) {
+ for (j=0; j < pSpanBlock->block_span_info.noElements; j++) {
+ quad = &pSpanBlock->block_span_info.quad[j];
+ if (quad->diff == 0) {
+ *div_error = 1;
+ return span;
+ }
+ if (quad->logStart <= row && row <= quad->logEnd &&
+ (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+ if (span_blk != NULL) {
+ blk = mega_div64_32((row-quad->logStart), quad->diff);
+ debugBlk = blk;
+ blk = (blk + quad->offsetInSpan) << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return span;
+}
+
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
new file mode 100644
index 0000000000000..6343aa5956042
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <dev/mrsas/mrsas.h>
+#include <dev/mrsas/mrsas_ioctl.h>
+
+/*
+ * Function prototypes
+ */
+int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
+int mrsas_passthru(struct mrsas_softc *sc, void *arg);
+void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
+void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+void mrsas_dump_dcmd(struct mrsas_softc *sc, struct mrsas_dcmd_frame* dcmd);
+void mrsas_dump_ioctl(struct mrsas_softc *sc, struct mrsas_iocpacket *user_ioc);
+void * mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
+static int mrsas_create_frame_pool(struct mrsas_softc *sc);
+static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error);
+
+extern struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
+extern void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
+extern int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
+ struct mrsas_mfi_cmd *cmd);
+
+
+/**
+ * mrsas_dump_ioctl: Print debug output for DCMDs
+ * input: Adapter instance soft state
+ * DCMD frame structure
+ *
+ * This function is called from mrsas_passthru() to print out debug information
+ * in the handling and routing of DCMD commands.
+ */
+void mrsas_dump_dcmd( struct mrsas_softc *sc, struct mrsas_dcmd_frame* dcmd )
+{
+ int i;
+
+ device_printf(sc->mrsas_dev, "dcmd->cmd: 0x%02hhx\n", dcmd->cmd);
+ device_printf(sc->mrsas_dev, "dcmd->cmd_status: 0x%02hhx\n", dcmd->cmd_status);
+ device_printf(sc->mrsas_dev, "dcmd->sge_count: 0x%02hhx\n", dcmd->sge_count);
+ device_printf(sc->mrsas_dev, "dcmd->context: 0x%08x\n", dcmd->context);
+ device_printf(sc->mrsas_dev, "dcmd->flags: 0x%04hx\n", dcmd->flags);
+ device_printf(sc->mrsas_dev, "dcmd->timeout: 0x%04hx\n", dcmd->timeout);
+ device_printf(sc->mrsas_dev, "dcmd->data_xfer_len: 0x%08x\n", dcmd->data_xfer_len);
+ device_printf(sc->mrsas_dev, "dcmd->opcode: 0x%08x\n", dcmd->opcode);
+ device_printf(sc->mrsas_dev, "dcmd->mbox.w[0]: 0x%08x\n", dcmd->mbox.w[0]);
+ device_printf(sc->mrsas_dev, "dcmd->mbox.w[1]: 0x%08x\n", dcmd->mbox.w[1]);
+ device_printf(sc->mrsas_dev, "dcmd->mbox.w[2]: 0x%08x\n", dcmd->mbox.w[2]);
+ for (i=0; i< MIN(MAX_IOCTL_SGE, dcmd->sge_count); i++) {
+ device_printf(sc->mrsas_dev, "sgl[%02d]\n", i);
+ device_printf(sc->mrsas_dev, " sge32[%02d].phys_addr: 0x%08x\n",
+ i, dcmd->sgl.sge32[i].phys_addr);
+ device_printf(sc->mrsas_dev, " sge32[%02d].length: 0x%08x\n",
+ i, dcmd->sgl.sge32[i].length);
+ device_printf(sc->mrsas_dev, " sge64[%02d].phys_addr: 0x%08llx\n",
+ i, (long long unsigned int) dcmd->sgl.sge64[i].phys_addr);
+ device_printf(sc->mrsas_dev, " sge64[%02d].length: 0x%08x\n",
+ i, dcmd->sgl.sge64[i].length);
+ }
+}
+
+/**
+ * mrsas_dump_ioctl: Print debug output for ioctl
+ * input: Adapter instance soft state
+ * iocpacket structure
+ *
+ * This function is called from mrsas_passthru() to print out debug information
+ * in the handling and routing of ioctl commands.
+ */
+void mrsas_dump_ioctl(struct mrsas_softc *sc, struct mrsas_iocpacket *user_ioc)
+{
+ union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
+ struct mrsas_dcmd_frame* dcmd = (struct mrsas_dcmd_frame *) &(in_cmd->dcmd);
+ int i;
+
+ device_printf(sc->mrsas_dev,
+ "====== In %s() ======================================\n", __func__);
+ device_printf(sc->mrsas_dev, "host_no: 0x%04hx\n", user_ioc->host_no);
+ device_printf(sc->mrsas_dev, " __pad1: 0x%04hx\n", user_ioc->__pad1);
+ device_printf(sc->mrsas_dev, "sgl_off: 0x%08x\n", user_ioc->sgl_off);
+ device_printf(sc->mrsas_dev, "sge_count: 0x%08x\n", user_ioc->sge_count);
+ device_printf(sc->mrsas_dev, "sense_off: 0x%08x\n", user_ioc->sense_off);
+ device_printf(sc->mrsas_dev, "sense_len: 0x%08x\n", user_ioc->sense_len);
+
+ mrsas_dump_dcmd(sc, dcmd);
+
+ for (i=0; i< MIN(MAX_IOCTL_SGE, user_ioc->sge_count); i++) {
+ device_printf(sc->mrsas_dev, "sge[%02d]\n", i);
+ device_printf(sc->mrsas_dev,
+ " iov_base: %p\n", user_ioc->sgl[i].iov_base);
+ device_printf(sc->mrsas_dev, " iov_len: %p\n",
+ (void*)user_ioc->sgl[i].iov_len);
+ }
+ device_printf(sc->mrsas_dev,
+ "==================================================================\n");
+}
+
+/**
+ * mrsas_passthru: Handle pass-through commands
+ * input: Adapter instance soft state
+ * argument pointer
+ *
+ * This function is called from mrsas_ioctl() to handle pass-through and
+ * ioctl commands to Firmware.
+ */
+int mrsas_passthru( struct mrsas_softc *sc, void *arg )
+{
+ struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
+ union mrsas_frame *in_cmd = (union mrsas_frame *) &(user_ioc->frame.raw);
+ struct mrsas_mfi_cmd *cmd = NULL;
+ bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE];
+ bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE];
+ void *ioctl_data_mem[MAX_IOCTL_SGE]; // ioctl data virtual addr
+ bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; // ioctl data phys addr
+ bus_dma_tag_t ioctl_sense_tag = 0;
+ bus_dmamap_t ioctl_sense_dmamap = 0;
+ void *ioctl_sense_mem = 0;
+ bus_addr_t ioctl_sense_phys_addr = 0;
+ int i, adapter, ioctl_data_size, ioctl_sense_size, ret=0;
+ struct mrsas_sge32 *kern_sge32;
+ unsigned long *sense_ptr;
+
+ /* For debug - uncomment the following line for debug output */
+ //mrsas_dump_ioctl(sc, user_ioc);
+
+ /*
+ * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In this
+ * case do nothing and return 0 to it as status.
+ */
+ if (in_cmd->dcmd.opcode == 0) {
+ device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__);
+ user_ioc->frame.hdr.cmd_status = MFI_STAT_OK;
+ return (0);
+ }
+
+ /* Validate host_no */
+ adapter = user_ioc->host_no;
+ if (adapter != device_get_unit(sc->mrsas_dev)) {
+ device_printf(sc->mrsas_dev, "In %s() IOCTL not for me!\n", __func__);
+ return(ENOENT);
+ }
+
+ /* Validate SGL length */
+ if (user_ioc->sge_count > MAX_IOCTL_SGE) {
+ device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n",
+ __func__, user_ioc->sge_count);
+ return(ENOENT);
+ }
+
+ /* Get a command */
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n");
+ return(ENOMEM);
+ }
+
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two
+ * frames into our cmd's frames. cmd->frame's context will get
+ * overwritten when we copy from user's frames. So set that value
+ * alone separately
+ */
+ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cmd->index;
+ cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
+
+ /*
+ * The management interface between applications and the fw uses
+ * MFI frames. E.g, RAID configuration changes, LD property changes
+ * etc are accomplishes through different kinds of MFI frames. The
+ * driver needs to care only about substituting user buffers with
+ * kernel buffers in SGLs. The location of SGL is embedded in the
+ * struct iocpacket itself.
+ */
+ kern_sge32 = (struct mrsas_sge32 *)
+ ((unsigned long)cmd->frame + user_ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i=0; i < user_ioc->sge_count; i++) {
+ if (!user_ioc->sgl[i].iov_len)
+ continue;
+ ioctl_data_size = user_ioc->sgl[i].iov_len;
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ioctl_data_size, // maxsize
+ 1, // msegments
+ ioctl_data_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &ioctl_data_tag[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i],
+ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i],
+ ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb,
+ &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n");
+ return (ENOMEM);
+ }
+
+ /* Save the physical address and length */
+ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i];
+ kern_sge32[i].length = user_ioc->sgl[i].iov_len;
+
+ /* Copy in data from user space */
+ ret = copyin(user_ioc->sgl[i].iov_base, ioctl_data_mem[i],
+ user_ioc->sgl[i].iov_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n");
+ goto out;
+ }
+ }
+
+ ioctl_sense_size = user_ioc->sense_len;
+ if (user_ioc->sense_len) {
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ ioctl_sense_size, // maxsize
+ 1, // msegments
+ ioctl_sense_size, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &ioctl_sense_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem,
+ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n");
+ return (ENOMEM);
+ }
+ if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap,
+ ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb,
+ &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n");
+ return (ENOMEM);
+ }
+ sense_ptr =
+ (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off);
+ sense_ptr = ioctl_sense_mem;
+ }
+
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ mrsas_issue_blocked_cmd(sc, cmd);
+ cmd->sync_cmd = 0;
+
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ ret = copyout(ioctl_data_mem[i], user_ioc->sgl[i].iov_base,
+ user_ioc->sgl[i].iov_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n");
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (user_ioc->sense_len) {
+ /*
+ * sense_buff points to the location that has the user
+ * sense buffer address
+ */
+ sense_ptr = (unsigned long *) ((unsigned long)user_ioc->frame.raw +
+ user_ioc->sense_off);
+ ret = copyout(ioctl_sense_mem, (unsigned long*)*sense_ptr,
+ user_ioc->sense_len);
+ if (ret) {
+ device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n");
+ goto out;
+ }
+ }
+
+ /*
+ * Return command status to user space
+ */
+ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status,
+ sizeof(u_int8_t));
+
+out:
+ /*
+ * Release sense buffer
+ */
+ if (ioctl_sense_phys_addr)
+ bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap);
+ if (ioctl_sense_mem)
+ bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap);
+ if (ioctl_sense_tag)
+ bus_dma_tag_destroy(ioctl_sense_tag);
+
+ /*
+ * Release data buffers
+ */
+ for (i = 0; i < user_ioc->sge_count; i++) {
+ if (!user_ioc->sgl[i].iov_len)
+ continue;
+ if (ioctl_data_phys_addr[i])
+ bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]);
+ if (ioctl_data_mem[i] != NULL)
+ bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i],
+ ioctl_data_dmamap[i]);
+ if (ioctl_data_tag[i] != NULL)
+ bus_dma_tag_destroy(ioctl_data_tag[i]);
+ }
+
+ /* Free command */
+ mrsas_release_mfi_cmd(cmd);
+
+ return(ret);
+}
+
+/**
+ * mrsas_alloc_mfi_cmds: Allocates the command packets
+ * input: Adapter instance soft state
+ *
+ * Each IOCTL or passthru command that is issued to the FW are wrapped in a
+ * local data structure called mrsas_mfi_cmd. The frame embedded in this
+ * mrsas_mfi is issued to FW. The array is used only to look up the
+ * mrsas_mfi_cmd given the context. The free commands are maintained in a
+ * linked list.
+ */
+int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc)
+{
+ int i, j;
+ u_int32_t max_cmd;
+ struct mrsas_mfi_cmd *cmd;
+
+ max_cmd = MRSAS_MAX_MFI_CMDS;
+
+ /*
+ * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. Allocate the
+ * dynamic array first and then allocate individual commands.
+ */
+ sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
+ if (!sc->mfi_cmd_list) {
+ device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n");
+ return(ENOMEM);
+ }
+ memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *)*max_cmd);
+ for (i = 0; i < max_cmd; i++) {
+ sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd),
+ M_MRSAS, M_NOWAIT);
+ if (!sc->mfi_cmd_list[i]) {
+ for (j = 0; j < i; j++)
+ free(sc->mfi_cmd_list[j],M_MRSAS);
+ free(sc->mfi_cmd_list, M_MRSAS);
+ sc->mfi_cmd_list = NULL;
+ return(ENOMEM);
+ }
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ memset(cmd, 0, sizeof(struct mrsas_mfi_cmd));
+ cmd->index = i;
+ cmd->ccb_ptr = NULL;
+ cmd->sc = sc;
+ TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
+ }
+
+ /* create a frame pool and assign one frame to each command */
+ if (mrsas_create_frame_pool(sc)) {
+ device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n");
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { // Free the frames
+ cmd = sc->mfi_cmd_list[i];
+ mrsas_free_frame(sc, cmd);
+ }
+ if (sc->mficmd_frame_tag != NULL)
+ bus_dma_tag_destroy(sc->mficmd_frame_tag);
+ return(ENOMEM);
+ }
+
+ return(0);
+}
+
+/**
+ * mrsas_create_frame_pool - Creates DMA pool for cmd frames
+ * input: Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value
+ * of context and could cause FW crash.
+ */
+static int mrsas_create_frame_pool(struct mrsas_softc *sc)
+{
+ int i;
+ struct mrsas_mfi_cmd *cmd;
+
+ if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
+ 1, 0, // algnmnt, boundary
+ BUS_SPACE_MAXADDR_32BIT,// lowaddr
+ BUS_SPACE_MAXADDR, // highaddr
+ NULL, NULL, // filter, filterarg
+ MRSAS_MFI_FRAME_SIZE, // maxsize
+ 1, // msegments
+ MRSAS_MFI_FRAME_SIZE, // maxsegsize
+ BUS_DMA_ALLOCNOW, // flags
+ NULL, NULL, // lockfunc, lockarg
+ &sc->mficmd_frame_tag)) {
+ device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
+ cmd = sc->mfi_cmd_list[i];
+ cmd->frame = mrsas_alloc_frame(sc, cmd);
+ if (cmd->frame == NULL) {
+ device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
+ return (ENOMEM);
+ }
+ memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
+ cmd->frame->io.context = cmd->index;
+ cmd->frame->io.pad_0 = 0;
+ }
+
+ return(0);
+}
+
+/**
+ * mrsas_alloc_frame - Allocates MFI Frames
+ * input: Adapter soft state
+ *
+ * Create bus DMA memory tag and dmamap and load memory for MFI frames.
+ * Returns virtual memory pointer to allocated region.
+ */
+void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE;
+
+ if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem,
+ BUS_DMA_NOWAIT, &cmd->frame_dmamap)) {
+ device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
+ return (NULL);
+ }
+ if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap,
+ cmd->frame_mem, frame_size, mrsas_alloc_cb,
+ &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
+ return (NULL);
+ }
+
+ return(cmd->frame_mem);
+}
+
+/*
+ * mrsas_alloc_cb: Callback function of bus_dmamap_load()
+ * input: callback argument,
+ * machine dependent type that describes DMA segments,
+ * number of segments,
+ * error code.
+ *
+ * This function is for the driver to receive mapping information resultant
+ * of the bus_dmamap_load(). The information is actually not being used,
+ * but the address is saved anyway.
+ */
+static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs,
+ int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ *addr = segs[0].ds_addr;
+}
+
+/**
+ * mrsas_free_frames: Frees memory for MFI frames
+ * input: Adapter soft state
+ *
+ * Deallocates MFI frames memory. Called from mrsas_free_mem() during
+ * detach and error case during creation of frame pool.
+ */
+void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
+{
+ if (cmd->frame_phys_addr)
+ bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap);
+ if (cmd->frame_mem != NULL)
+ bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap);
+}
diff --git a/sys/dev/mrsas/mrsas_ioctl.h b/sys/dev/mrsas/mrsas_ioctl.h
new file mode 100644
index 0000000000000..360484277678b
--- /dev/null
+++ b/sys/dev/mrsas/mrsas_ioctl.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014, LSI Corp.
+ * All rights reserved.
+ * Author: Marian Choy
+ * Support: freebsdraid@lsi.com
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of the <ORGANIZATION> nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation
+ * are those of the authors and should not be interpreted as representing
+ * official policies,either expressed or implied, of the FreeBSD Project.
+ *
+ * Send feedback to: <megaraidfbsd@lsi.com>
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: MegaRaid FreeBSD
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifndef MRSAS_IOCTL_H
+#define MRSAS_IOCTL_H
+
+#ifndef _IOWR
+#include <sys/ioccom.h>
+#endif /* !_IOWR */
+
+/*
+ * We need to use the same values as the mfi driver until MegaCli adds
+ * support for this (mrsas) driver:
+ * M is for MegaRAID. (This is typically the vendor or product initial)
+ * 1 arbitrary. (This may be used to segment kinds of commands.
+ * (1-9 status, 10-20 policy, etc.)
+ * struct mrsas_iocpacket (sizeof() this parameter will be used.)
+ * These three values are encoded into a somewhat unique, 32-bit value.
+ */
+
+#define MRSAS_IOC_FIRMWARE_PASS_THROUGH _IOWR('M', 1, struct mrsas_iocpacket)
+
+#define MRSAS_IOC_SCAN_BUS _IO('M', 10)
+
+#define MAX_IOCTL_SGE 16
+#define MFI_FRAME_DIR_READ 0x0010
+#define MFI_CMD_LD_SCSI_IO 0x03
+
+#define INQUIRY_CMD 0x12
+#define INQUIRY_CMDLEN 6
+#define INQUIRY_REPLY_LEN 96
+#define INQUIRY_VENDOR 8 /* Offset in reply data to vendor name */
+#define SCSI_SENSE_BUFFERSIZE 96
+
+#define MEGAMFI_RAW_FRAME_SIZE 128
+
+
+#pragma pack(1)
+struct mrsas_iocpacket {
+ u_int16_t host_no;
+ u_int16_t __pad1;
+ u_int32_t sgl_off;
+ u_int32_t sge_count;
+ u_int32_t sense_off;
+ u_int32_t sense_len;
+ union {
+ u_int8_t raw[MEGAMFI_RAW_FRAME_SIZE];
+ struct mrsas_header hdr;
+ } frame;
+ struct iovec sgl[MAX_IOCTL_SGE];
+};
+#pragma pack()
+
+#endif /* MRSAS_IOCTL_H */
diff --git a/sys/dev/null/null.c b/sys/dev/null/null.c
index c1208c119bbe7..f836147a773c9 100644
--- a/sys/dev/null/null.c
+++ b/sys/dev/null/null.c
@@ -1,6 +1,7 @@
/*-
* Copyright (c) 2000 Mark R. V. Murray & Jeroen C. van Gelderen
* Copyright (c) 2001-2004 Mark R. V. Murray
+ * Copyright (c) 2014 Eitan Adler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,14 +46,24 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
/* For use with destroy_dev(9). */
+static struct cdev *full_dev;
static struct cdev *null_dev;
static struct cdev *zero_dev;
+static d_write_t full_write;
static d_write_t null_write;
static d_ioctl_t null_ioctl;
static d_ioctl_t zero_ioctl;
static d_read_t zero_read;
+static struct cdevsw full_cdevsw = {
+ .d_version = D_VERSION,
+ .d_read = zero_read,
+ .d_write = full_write,
+ .d_ioctl = zero_ioctl,
+ .d_name = "full",
+};
+
static struct cdevsw null_cdevsw = {
.d_version = D_VERSION,
.d_read = (d_read_t *)nullop,
@@ -70,6 +81,16 @@ static struct cdevsw zero_cdevsw = {
.d_flags = D_MMAP_ANON,
};
+
+
+/* ARGSUSED */
+static int
+full_write(struct cdev *dev __unused, struct uio *uio __unused, int flags __unused)
+{
+
+ return (ENOSPC);
+}
+
/* ARGSUSED */
static int
null_write(struct cdev *dev __unused, struct uio *uio, int flags __unused)
@@ -155,7 +176,9 @@ null_modevent(module_t mod __unused, int type, void *data __unused)
switch(type) {
case MOD_LOAD:
if (bootverbose)
- printf("null: <null device, zero device>\n");
+ printf("null: <full device, null device, zero device>\n");
+ full_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &full_cdevsw, 0,
+ NULL, UID_ROOT, GID_WHEEL, 0666, "full");
null_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &null_cdevsw, 0,
NULL, UID_ROOT, GID_WHEEL, 0666, "null");
zero_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &zero_cdevsw, 0,
@@ -163,6 +186,7 @@ null_modevent(module_t mod __unused, int type, void *data __unused)
break;
case MOD_UNLOAD:
+ destroy_dev(full_dev);
destroy_dev(null_dev);
destroy_dev(zero_dev);
break;
diff --git a/sys/dev/ofw/ofw_bus.h b/sys/dev/ofw/ofw_bus.h
index 7f0e50e7f347b..f7c72958d55fc 100644
--- a/sys/dev/ofw/ofw_bus.h
+++ b/sys/dev/ofw/ofw_bus.h
@@ -76,12 +76,4 @@ ofw_bus_map_intr(device_t dev, phandle_t iparent, int icells, pcell_t *intr)
return (OFW_BUS_MAP_INTR(dev, dev, iparent, icells, intr));
}
-static __inline int
-ofw_bus_map_gpios(device_t bus, phandle_t dev, phandle_t gparent, int gcells,
- pcell_t *gpios, uint32_t *pin, uint32_t *flags)
-{
- return (OFW_BUS_MAP_GPIOS(bus, dev, gparent, gcells, gpios, pin,
- flags));
-}
-
#endif /* !_DEV_OFW_OFW_BUS_H_ */
diff --git a/sys/dev/ofw/ofw_bus_if.m b/sys/dev/ofw/ofw_bus_if.m
index 86c9876181551..36d10e6168002 100644
--- a/sys/dev/ofw/ofw_bus_if.m
+++ b/sys/dev/ofw/ofw_bus_if.m
@@ -58,7 +58,6 @@ CODE {
static ofw_bus_get_node_t ofw_bus_default_get_node;
static ofw_bus_get_type_t ofw_bus_default_get_type;
static ofw_bus_map_intr_t ofw_bus_default_map_intr;
- static ofw_bus_map_gpios_t ofw_bus_default_map_gpios;
static const struct ofw_bus_devinfo *
ofw_bus_default_get_devinfo(device_t bus, device_t dev)
@@ -114,24 +113,6 @@ CODE {
/* If that fails, then assume a one-domain system */
return (interrupt[0]);
}
-
- int
- ofw_bus_default_map_gpios(device_t bus, phandle_t dev,
- phandle_t gparent, int gcells, pcell_t *gpios, uint32_t *pin,
- uint32_t *flags)
- {
- /* Propagate up the bus hierarchy until someone handles it. */
- if (device_get_parent(bus) != NULL)
- return OFW_BUS_MAP_GPIOS(device_get_parent(bus), dev,
- gparent, gcells, gpios, pin, flags);
-
- /* If that fails, then assume the FreeBSD defaults. */
- *pin = gpios[0];
- if (gcells == 2 || gcells == 3)
- *flags = gpios[gcells - 1];
-
- return (0);
- }
};
# Get the ofw_bus_devinfo struct for the device dev on the bus. Used for bus
@@ -188,14 +169,3 @@ METHOD int map_intr {
int icells;
pcell_t *interrupt;
} DEFAULT ofw_bus_default_map_intr;
-
-# Map the GPIO controller specific gpio-specifier to GPIO pin and flags.
-METHOD int map_gpios {
- device_t bus;
- phandle_t dev;
- phandle_t gparent;
- int gcells;
- pcell_t *gpios;
- uint32_t *pin;
- uint32_t *flags;
-} DEFAULT ofw_bus_default_map_gpios;
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index 9df891bedb07a..7d36b6261c708 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -3968,105 +3968,107 @@ static const struct
{
int class;
int subclass;
+ int report; /* 0 = bootverbose, 1 = always */
const char *desc;
} pci_nomatch_tab[] = {
- {PCIC_OLD, -1, "old"},
- {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
- {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
- {PCIC_STORAGE, -1, "mass storage"},
- {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
- {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
- {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
- {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
- {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
- {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"},
- {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"},
- {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"},
- {PCIC_STORAGE, PCIS_STORAGE_NVM, "NVM"},
- {PCIC_NETWORK, -1, "network"},
- {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
- {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
- {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
- {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
- {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
- {PCIC_DISPLAY, -1, "display"},
- {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
- {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
- {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
- {PCIC_MULTIMEDIA, -1, "multimedia"},
- {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
- {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
- {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
- {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"},
- {PCIC_MEMORY, -1, "memory"},
- {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
- {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
- {PCIC_BRIDGE, -1, "bridge"},
- {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
- {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
- {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
- {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
- {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
- {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
- {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
- {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
- {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
- {PCIC_SIMPLECOMM, -1, "simple comms"},
- {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
- {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
- {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
- {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
- {PCIC_BASEPERIPH, -1, "base peripheral"},
- {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
- {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
- {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
- {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
- {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
- {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"},
- {PCIC_INPUTDEV, -1, "input device"},
- {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
- {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
- {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
- {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
- {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
- {PCIC_DOCKING, -1, "docking station"},
- {PCIC_PROCESSOR, -1, "processor"},
- {PCIC_SERIALBUS, -1, "serial bus"},
- {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
- {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
- {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
- {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
- {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
- {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
- {PCIC_WIRELESS, -1, "wireless controller"},
- {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
- {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
- {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
- {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
- {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
- {PCIC_SATCOM, -1, "satellite communication"},
- {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
- {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
- {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
- {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
- {PCIC_CRYPTO, -1, "encrypt/decrypt"},
- {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
- {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
- {PCIC_DASP, -1, "dasp"},
- {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
- {0, 0, NULL}
+ {PCIC_OLD, -1, 1, "old"},
+ {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
+ {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
+ {PCIC_STORAGE, -1, 1, "mass storage"},
+ {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
+ {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
+ {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
+ {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
+ {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
+ {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
+ {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
+ {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
+ {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
+ {PCIC_NETWORK, -1, 1, "network"},
+ {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
+ {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
+ {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
+ {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
+ {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
+ {PCIC_DISPLAY, -1, 1, "display"},
+ {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
+ {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
+ {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
+ {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
+ {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"},
+ {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"},
+ {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
+ {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"},
+ {PCIC_MEMORY, -1, 1, "memory"},
+ {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
+ {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
+ {PCIC_BRIDGE, -1, 1, "bridge"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
+ {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
+ {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
+ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
+ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
+ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
+ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
+ {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
+ {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
+ {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
+ {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"},
+ {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
+ {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
+ {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
+ {PCIC_INPUTDEV, -1, 1, "input device"},
+ {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
+ {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
+ {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
+ {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
+ {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
+ {PCIC_DOCKING, -1, 1, "docking station"},
+ {PCIC_PROCESSOR, -1, 1, "processor"},
+ {PCIC_SERIALBUS, -1, 1, "serial bus"},
+ {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
+ {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
+ {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"},
+ {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"},
+ {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
+ {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
+ {PCIC_WIRELESS, -1, 1, "wireless controller"},
+ {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
+ {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
+ {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
+ {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
+ {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"},
+ {PCIC_SATCOM, -1, 1, "satellite communication"},
+ {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
+ {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
+ {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
+ {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
+ {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
+ {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
+ {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
+ {PCIC_DASP, -1, 0, "dasp"},
+ {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
+ {0, 0, 0, NULL}
};
void
pci_probe_nomatch(device_t dev, device_t child)
{
- int i;
+ int i, report;
const char *cp, *scp;
char *device;
/*
* Look for a listing for this device in a loaded device database.
*/
+ report = 1;
if ((device = pci_describe_device(child)) != NULL) {
device_printf(dev, "<%s>", device);
free(device, M_DEVBUF);
@@ -4081,19 +4083,25 @@ pci_probe_nomatch(device_t dev, device_t child)
if (pci_nomatch_tab[i].class == pci_get_class(child)) {
if (pci_nomatch_tab[i].subclass == -1) {
cp = pci_nomatch_tab[i].desc;
+ report = pci_nomatch_tab[i].report;
} else if (pci_nomatch_tab[i].subclass ==
pci_get_subclass(child)) {
scp = pci_nomatch_tab[i].desc;
+ report = pci_nomatch_tab[i].report;
}
}
}
- device_printf(dev, "<%s%s%s>",
- cp ? cp : "",
- ((cp != NULL) && (scp != NULL)) ? ", " : "",
- scp ? scp : "");
+ if (report || bootverbose) {
+ device_printf(dev, "<%s%s%s>",
+ cp ? cp : "",
+ ((cp != NULL) && (scp != NULL)) ? ", " : "",
+ scp ? scp : "");
+ }
+ }
+ if (report || bootverbose) {
+ printf(" at device %d.%d (no driver attached)\n",
+ pci_get_slot(child), pci_get_function(child));
}
- printf(" at device %d.%d (no driver attached)\n",
- pci_get_slot(child), pci_get_function(child));
pci_cfg_save(child, device_get_ivars(child), 1);
}
diff --git a/sys/dev/pci/pci_if.m b/sys/dev/pci/pci_if.m
index 0f19358d4a2ab..82864eb71bab9 100644
--- a/sys/dev/pci/pci_if.m
+++ b/sys/dev/pci/pci_if.m
@@ -161,7 +161,7 @@ METHOD int msix_count {
} DEFAULT null_msi_count;
METHOD uint16_t get_rid {
- device_t dev;
- device_t child;
+ device_t dev;
+ device_t child;
};
diff --git a/sys/dev/pci/pcib_if.m b/sys/dev/pci/pcib_if.m
index 835e2c39cdac1..0d7cf16087180 100644
--- a/sys/dev/pci/pcib_if.m
+++ b/sys/dev/pci/pcib_if.m
@@ -169,8 +169,8 @@ METHOD int power_for_sleep {
# Return the PCI Routing Identifier (RID) for the device.
#
METHOD uint16_t get_rid {
- device_t pcib;
- device_t dev;
+ device_t pcib;
+ device_t dev;
} DEFAULT pcib_get_rid;
#
diff --git a/sys/dev/proto/proto.h b/sys/dev/proto/proto.h
new file mode 100644
index 0000000000000..db61da5486544
--- /dev/null
+++ b/sys/dev/proto/proto.h
@@ -0,0 +1,63 @@
+/*-
+ * Copyright (c) 2014 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_PROTO_H_
+#define _DEV_PROTO_H_
+
+#define PROTO_RES_MAX 16
+
+#define PROTO_RES_UNUSED 0
+#define PROTO_RES_PCICFG 10
+
+struct proto_res {
+ int r_type;
+ int r_rid;
+ struct resource *r_res;
+ u_long r_size;
+ union {
+ void *cookie;
+ struct cdev *cdev;
+ } r_u;
+ uintptr_t r_opened;
+};
+
+struct proto_softc {
+ device_t sc_dev;
+ struct proto_res sc_res[PROTO_RES_MAX];
+ int sc_rescnt;
+};
+
+extern devclass_t proto_devclass;
+extern char proto_driver_name[];
+
+int proto_add_resource(struct proto_softc *, int, int, struct resource *);
+
+int proto_attach(device_t dev);
+int proto_detach(device_t dev);
+
+#endif /* _DEV_PROTO_H_ */
diff --git a/sys/dev/proto/proto_bus_pci.c b/sys/dev/proto/proto_bus_pci.c
new file mode 100644
index 0000000000000..012469a2d712c
--- /dev/null
+++ b/sys/dev/proto/proto_bus_pci.c
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (c) 2014 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <sys/sbuf.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <dev/proto/proto.h>
+
+static int proto_pci_probe(device_t dev);
+static int proto_pci_attach(device_t dev);
+
+static device_method_t proto_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, proto_pci_probe),
+ DEVMETHOD(device_attach, proto_pci_attach),
+ DEVMETHOD(device_detach, proto_detach),
+ DEVMETHOD_END
+};
+
+static driver_t proto_pci_driver = {
+ proto_driver_name,
+ proto_pci_methods,
+ sizeof(struct proto_softc),
+};
+
+static int
+proto_pci_probe(device_t dev)
+{
+ struct sbuf *sb;
+
+ /* For now we only attach to function 0 devices. */
+ if (pci_get_function(dev) != 0)
+ return (ENXIO);
+
+ sb = sbuf_new_auto();
+ sbuf_printf(sb, "pci%d:%d:%d:%d", pci_get_domain(dev),
+ pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+ sbuf_finish(sb);
+ device_set_desc_copy(dev, sbuf_data(sb));
+ sbuf_delete(sb);
+ return (BUS_PROBE_HOOVER);
+}
+
+static int
+proto_pci_attach(device_t dev)
+{
+ struct proto_softc *sc;
+ struct resource *res;
+ int bar, rid, type;
+
+ sc = device_get_softc(dev);
+
+ proto_add_resource(sc, PROTO_RES_PCICFG, 0, NULL);
+
+ for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) {
+ rid = PCIR_BAR(bar);
+ type = SYS_RES_MEMORY;
+ res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE);
+ if (res == NULL) {
+ type = SYS_RES_IOPORT;
+ res = bus_alloc_resource_any(dev, type, &rid,
+ RF_ACTIVE);
+ }
+ if (res != NULL)
+ proto_add_resource(sc, type, rid, res);
+ }
+
+ rid = 0;
+ type = SYS_RES_IRQ;
+ res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE | RF_SHAREABLE);
+ if (res != NULL)
+ proto_add_resource(sc, type, rid, res);
+ return (proto_attach(dev));
+}
+
+DRIVER_MODULE(proto, pci, proto_pci_driver, proto_devclass, NULL, NULL);
diff --git a/sys/dev/proto/proto_core.c b/sys/dev/proto/proto_core.c
new file mode 100644
index 0000000000000..5ea0a6c126678
--- /dev/null
+++ b/sys/dev/proto/proto_core.c
@@ -0,0 +1,384 @@
+/*-
+ * Copyright (c) 2014 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/cons.h>
+#include <sys/fcntl.h>
+#include <sys/interrupt.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/reboot.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/uio.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+
+#include <dev/pci/pcivar.h>
+
+#include <dev/proto/proto.h>
+#include <dev/proto/proto_dev.h>
+
+CTASSERT(SYS_RES_IRQ != PROTO_RES_UNUSED &&
+ SYS_RES_MEMORY != PROTO_RES_UNUSED &&
+ SYS_RES_IOPORT != PROTO_RES_UNUSED);
+CTASSERT(SYS_RES_IRQ != PROTO_RES_PCICFG &&
+ SYS_RES_MEMORY != PROTO_RES_PCICFG &&
+ SYS_RES_IOPORT != PROTO_RES_PCICFG);
+
+devclass_t proto_devclass;
+char proto_driver_name[] = "proto";
+
+static d_open_t proto_open;
+static d_close_t proto_close;
+static d_read_t proto_read;
+static d_write_t proto_write;
+static d_ioctl_t proto_ioctl;
+static d_mmap_t proto_mmap;
+
+struct cdevsw proto_devsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_name = proto_driver_name,
+ .d_open = proto_open,
+ .d_close = proto_close,
+ .d_read = proto_read,
+ .d_write = proto_write,
+ .d_ioctl = proto_ioctl,
+ .d_mmap = proto_mmap,
+};
+
+static MALLOC_DEFINE(M_PROTO, "PROTO", "PROTO driver");
+
+int
+proto_add_resource(struct proto_softc *sc, int type, int rid,
+ struct resource *res)
+{
+ struct proto_res *r;
+
+ if (type == PROTO_RES_UNUSED)
+ return (EINVAL);
+ if (sc->sc_rescnt == PROTO_RES_MAX)
+ return (ENOSPC);
+
+ r = sc->sc_res + sc->sc_rescnt++;
+ r->r_type = type;
+ r->r_rid = rid;
+ r->r_res = res;
+ return (0);
+}
+
+#ifdef notyet
+static int
+proto_intr(void *arg)
+{
+ struct proto_softc *sc = arg;
+
+ /* XXX TODO */
+ return (FILTER_HANDLED);
+}
+#endif
+
+int
+proto_attach(device_t dev)
+{
+ struct proto_softc *sc;
+ struct proto_res *r;
+ u_int res;
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+
+ for (res = 0; res < sc->sc_rescnt; res++) {
+ r = sc->sc_res + res;
+ switch (r->r_type) {
+ case SYS_RES_IRQ:
+ /* XXX TODO */
+ break;
+ case SYS_RES_MEMORY:
+ case SYS_RES_IOPORT:
+ r->r_size = rman_get_size(r->r_res);
+ r->r_u.cdev = make_dev(&proto_devsw, res, 0, 0, 0666,
+ "proto/%s/%02x.%s", device_get_desc(dev), r->r_rid,
+ (r->r_type == SYS_RES_IOPORT) ? "io" : "mem");
+ r->r_u.cdev->si_drv1 = sc;
+ r->r_u.cdev->si_drv2 = r;
+ break;
+ case PROTO_RES_PCICFG:
+ r->r_size = 4096;
+ r->r_u.cdev = make_dev(&proto_devsw, res, 0, 0, 0666,
+ "proto/%s/pcicfg", device_get_desc(dev));
+ r->r_u.cdev->si_drv1 = sc;
+ r->r_u.cdev->si_drv2 = r;
+ break;
+ }
+ }
+ return (0);
+}
+
+int
+proto_detach(device_t dev)
+{
+ struct proto_softc *sc;
+ struct proto_res *r;
+ u_int res;
+
+ sc = device_get_softc(dev);
+
+ /* Don't detach if we have open device filess. */
+ for (res = 0; res < sc->sc_rescnt; res++) {
+ r = sc->sc_res + res;
+ if (r->r_opened)
+ return (EBUSY);
+ }
+
+ for (res = 0; res < sc->sc_rescnt; res++) {
+ r = sc->sc_res + res;
+ switch (r->r_type) {
+ case SYS_RES_IRQ:
+ /* XXX TODO */
+ break;
+ case SYS_RES_MEMORY:
+ case SYS_RES_IOPORT:
+ case PROTO_RES_PCICFG:
+ destroy_dev(r->r_u.cdev);
+ break;
+ }
+ if (r->r_res != NULL) {
+ bus_release_resource(dev, r->r_type, r->r_rid,
+ r->r_res);
+ r->r_res = NULL;
+ }
+ r->r_type = PROTO_RES_UNUSED;
+ }
+ sc->sc_rescnt = 0;
+ return (0);
+}
+
+/*
+ * Device functions
+ */
+
+static int
+proto_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
+{
+ struct proto_res *r;
+
+ r = cdev->si_drv2;
+ if (!atomic_cmpset_acq_ptr(&r->r_opened, 0UL, (uintptr_t)td->td_proc))
+ return (EBUSY);
+ return (0);
+}
+
+static int
+proto_close(struct cdev *cdev, int fflag, int devtype, struct thread *td)
+{
+ struct proto_res *r;
+
+ r = cdev->si_drv2;
+ if (!atomic_cmpset_acq_ptr(&r->r_opened, (uintptr_t)td->td_proc, 0UL))
+ return (ENXIO);
+ return (0);
+}
+
+static int
+proto_read(struct cdev *cdev, struct uio *uio, int ioflag)
+{
+ union {
+ uint8_t x1[8];
+ uint16_t x2[4];
+ uint32_t x4[2];
+ uint64_t x8[1];
+ } buf;
+ struct proto_softc *sc;
+ struct proto_res *r;
+ device_t dev;
+ off_t ofs;
+ u_long width;
+ int error;
+
+ sc = cdev->si_drv1;
+ dev = sc->sc_dev;
+ r = cdev->si_drv2;
+
+ width = uio->uio_resid;
+ if (width < 1 || width > 8 || bitcount16(width) > 1)
+ return (EIO);
+ ofs = uio->uio_offset;
+ if (ofs + width > r->r_size)
+ return (EIO);
+
+ switch (width) {
+ case 1:
+ buf.x1[0] = (r->r_type == PROTO_RES_PCICFG) ?
+ pci_read_config(dev, ofs, 1) : bus_read_1(r->r_res, ofs);
+ break;
+ case 2:
+ buf.x2[0] = (r->r_type == PROTO_RES_PCICFG) ?
+ pci_read_config(dev, ofs, 2) : bus_read_2(r->r_res, ofs);
+ break;
+ case 4:
+ buf.x4[0] = (r->r_type == PROTO_RES_PCICFG) ?
+ pci_read_config(dev, ofs, 4) : bus_read_4(r->r_res, ofs);
+ break;
+#ifndef __i386__
+ case 8:
+ if (r->r_type == PROTO_RES_PCICFG)
+ return (EINVAL);
+ buf.x8[0] = bus_read_8(r->r_res, ofs);
+ break;
+#endif
+ default:
+ return (EIO);
+ }
+
+ error = uiomove(&buf, width, uio);
+ return (error);
+}
+
+static int
+proto_write(struct cdev *cdev, struct uio *uio, int ioflag)
+{
+ union {
+ uint8_t x1[8];
+ uint16_t x2[4];
+ uint32_t x4[2];
+ uint64_t x8[1];
+ } buf;
+ struct proto_softc *sc;
+ struct proto_res *r;
+ device_t dev;
+ off_t ofs;
+ u_long width;
+ int error;
+
+ sc = cdev->si_drv1;
+ dev = sc->sc_dev;
+ r = cdev->si_drv2;
+
+ width = uio->uio_resid;
+ if (width < 1 || width > 8 || bitcount16(width) > 1)
+ return (EIO);
+ ofs = uio->uio_offset;
+ if (ofs + width > r->r_size)
+ return (EIO);
+
+ error = uiomove(&buf, width, uio);
+ if (error)
+ return (error);
+
+ switch (width) {
+ case 1:
+ if (r->r_type == PROTO_RES_PCICFG)
+ pci_write_config(dev, ofs, buf.x1[0], 1);
+ else
+ bus_write_1(r->r_res, ofs, buf.x1[0]);
+ break;
+ case 2:
+ if (r->r_type == PROTO_RES_PCICFG)
+ pci_write_config(dev, ofs, buf.x2[0], 2);
+ else
+ bus_write_2(r->r_res, ofs, buf.x2[0]);
+ break;
+ case 4:
+ if (r->r_type == PROTO_RES_PCICFG)
+ pci_write_config(dev, ofs, buf.x4[0], 4);
+ else
+ bus_write_4(r->r_res, ofs, buf.x4[0]);
+ break;
+#ifndef __i386__
+ case 8:
+ if (r->r_type == PROTO_RES_PCICFG)
+ return (EINVAL);
+ bus_write_8(r->r_res, ofs, buf.x8[0]);
+ break;
+#endif
+ default:
+ return (EIO);
+ }
+
+ return (0);
+}
+
+static int
+proto_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ struct proto_ioc_region *region;
+ struct proto_res *r;
+ int error;
+
+ r = cdev->si_drv2;
+
+ error = 0;
+ switch (cmd) {
+ case PROTO_IOC_REGION:
+ region = (struct proto_ioc_region *)data;
+ region->size = r->r_size;
+ if (r->r_type == PROTO_RES_PCICFG)
+ region->address = 0;
+ else
+ region->address = rman_get_start(r->r_res);
+ break;
+ default:
+ error = ENOIOCTL;
+ break;
+ }
+ return (error);
+}
+
+static int
+proto_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
+ int prot, vm_memattr_t *memattr)
+{
+ struct proto_res *r;
+
+ r = cdev->si_drv2;
+
+ if (r->r_type != SYS_RES_MEMORY)
+ return (ENXIO);
+ if (offset & PAGE_MASK)
+ return (EINVAL);
+ if (prot & PROT_EXEC)
+ return (EACCES);
+ if (offset >= r->r_size)
+ return (EINVAL);
+ *paddr = rman_get_start(r->r_res) + offset;
+#ifndef __sparc64__
+ *memattr = VM_MEMATTR_UNCACHEABLE;
+#endif
+ return (0);
+}
diff --git a/sys/dev/proto/proto_dev.h b/sys/dev/proto/proto_dev.h
new file mode 100644
index 0000000000000..0ea37495ce3be
--- /dev/null
+++ b/sys/dev/proto/proto_dev.h
@@ -0,0 +1,43 @@
+/*-
+ * Copyright (c) 2014 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_PROTO_DEV_H_
+#define _DEV_PROTO_DEV_H_
+
+#include <sys/ioccom.h>
+
+#define PROTO_IOC_CLASS 'h'
+
+struct proto_ioc_region {
+ unsigned long address;
+ unsigned long size;
+};
+
+#define PROTO_IOC_REGION _IOWR(PROTO_IOC_CLASS, 1, struct proto_ioc_region)
+
+#endif /* _DEV_PROTO_H_ */
diff --git a/sys/dev/sdhci/sdhci_fdt.c b/sys/dev/sdhci/sdhci_fdt.c
index 65eac6df8b6aa..6a3d702a4c4fb 100644
--- a/sys/dev/sdhci/sdhci_fdt.c
+++ b/sys/dev/sdhci/sdhci_fdt.c
@@ -66,6 +66,7 @@ struct sdhci_fdt_softc {
device_t dev; /* Controller device */
u_int quirks; /* Chip specific quirks */
u_int caps; /* If we override SDHCI_CAPABILITIES */
+ uint32_t max_clk; /* Max possible freq */
struct resource *irq_res; /* IRQ resource */
void *intrhand; /* Interrupt handle */
@@ -156,6 +157,7 @@ sdhci_fdt_probe(device_t dev)
sc->quirks = 0;
sc->num_slots = 1;
+ sc->max_clk = 0;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
@@ -170,11 +172,14 @@ sdhci_fdt_probe(device_t dev)
node = ofw_bus_get_node(dev);
- /* Allow dts to patch quirks and slots. */
- if ((OF_getprop(node, "quirks", &cid, sizeof(cid))) > 0)
- sc->quirks = fdt32_to_cpu(cid);
- if ((OF_getprop(node, "num-slots", &cid, sizeof(cid))) > 0)
- sc->num_slots = fdt32_to_cpu(cid);
+ /* Allow dts to patch quirks, slots, and max-frequency. */
+ if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0)
+ sc->quirks = cid;
+ if ((OF_getencprop(node, "num-slots", &cid, sizeof(cid))) > 0)
+ sc->num_slots = cid;
+ if ((OF_getencprop(node, "max-frequency", &cid, sizeof(cid))) > 0)
+ sc->max_clk = cid;
+
return (0);
}
@@ -214,6 +219,7 @@ sdhci_fdt_attach(device_t dev)
slot->quirks = sc->quirks;
slot->caps = sc->caps;
+ slot->max_clk = sc->max_clk;
if (sdhci_init_slot(dev, slot, i) != 0)
continue;
diff --git a/sys/dev/usb/controller/dwc_otg.c b/sys/dev/usb/controller/dwc_otg.c
index 8c708019ad70e..d8bf7ea39be70 100644
--- a/sys/dev/usb/controller/dwc_otg.c
+++ b/sys/dev/usb/controller/dwc_otg.c
@@ -93,15 +93,13 @@
DWC_OTG_BUS2SC(USB_DMATAG_TO_XROOT((pc)->tag_parent)->bus)
#define DWC_OTG_MSK_GINT_ENABLED \
- (GINTSTS_ENUMDONE | \
- GINTSTS_USBRST | \
- GINTSTS_USBSUSP | \
- GINTSTS_IEPINT | \
- GINTSTS_RXFLVL | \
- GINTSTS_SESSREQINT | \
+ (GINTMSK_ENUMDONEMSK | \
+ GINTMSK_USBRSTMSK | \
+ GINTMSK_USBSUSPMSK | \
+ GINTMSK_IEPINTMSK | \
+ GINTMSK_SESSREQINTMSK | \
GINTMSK_OTGINTMSK | \
- GINTMSK_HCHINTMSK | \
- GINTSTS_PRTINT)
+ GINTMSK_PRTINTMSK)
static int dwc_otg_use_hsic;
@@ -207,6 +205,12 @@ dwc_otg_init_fifo(struct dwc_otg_softc *sc, uint8_t mode)
/* reset active endpoints */
sc->sc_active_rx_ep = 0;
+ /* reset TX size */
+ sc->sc_tx_cur_size = 0;
+
+ /* reset TT info */
+ memset(sc->sc_tt_info, 0, sizeof(sc->sc_tt_info));
+
fifo_size /= 2;
DWC_OTG_WRITE_4(sc, DOTG_GNPTXFSIZ,
@@ -215,23 +219,20 @@ dwc_otg_init_fifo(struct dwc_otg_softc *sc, uint8_t mode)
tx_start += fifo_size;
+ for (x = 0; x != sc->sc_host_ch_max; x++) {
+ /* disable all host interrupts */
+ DWC_OTG_WRITE_4(sc, DOTG_HCINTMSK(x), 0);
+ }
+
DWC_OTG_WRITE_4(sc, DOTG_HPTXFSIZ,
((fifo_size / 4) << 16) |
(tx_start / 4));
- for (x = 0; x != sc->sc_host_ch_max; x++) {
- /* enable interrupts */
- DWC_OTG_WRITE_4(sc, DOTG_HCINTMSK(x),
- HCINT_STALL | HCINT_BBLERR |
- HCINT_XACTERR |
- HCINT_NAK | HCINT_ACK | HCINT_NYET |
- HCINT_CHHLTD | HCINT_FRMOVRUN |
- HCINT_DATATGLERR);
- }
+ /* store maximum TX FIFO size */
+ sc->sc_tx_max_size = fifo_size;
- /* enable host channel interrupts */
- DWC_OTG_WRITE_4(sc, DOTG_HAINTMSK,
- (1U << sc->sc_host_ch_max) - 1U);
+ /* disable all host channel interrupts */
+ DWC_OTG_WRITE_4(sc, DOTG_HAINTMSK, 0);
}
if (mode == DWC_MODE_DEVICE) {
@@ -309,11 +310,47 @@ dwc_otg_init_fifo(struct dwc_otg_softc *sc, uint8_t mode)
} else {
/* reset active endpoints */
sc->sc_active_rx_ep = 0;
+
+ /* reset TX size */
+ sc->sc_tx_cur_size = 0;
+
+ /* reset TT info */
+ memset(sc->sc_tt_info, 0, sizeof(sc->sc_tt_info));
}
return (0);
}
static void
+dwc_otg_update_host_frame_interval(struct dwc_otg_softc *sc)
+{
+ uint32_t temp;
+
+ /* setup HOST frame interval register, based on existing value */
+ temp = DWC_OTG_READ_4(sc, DOTG_HFIR) & HFIR_FRINT_MASK;
+ if (temp >= 10000)
+ temp /= 1000;
+ else
+ temp /= 125;
+
+ /* figure out nearest X-tal value */
+ if (temp >= 54)
+ temp = 60; /* MHz */
+ else if (temp >= 39)
+ temp = 48; /* MHz */
+ else
+ temp = 30; /* MHz */
+
+ if (sc->sc_flags.status_high_speed)
+ temp *= 125;
+ else
+ temp *= 1000;
+
+ DPRINTF("HFIR=0x%08x\n", temp);
+
+ DWC_OTG_WRITE_4(sc, DOTG_HFIR, temp);
+}
+
+static void
dwc_otg_clocks_on(struct dwc_otg_softc *sc)
{
if (sc->sc_flags.clocks_off &&
@@ -376,9 +413,11 @@ dwc_otg_pull_down(struct dwc_otg_softc *sc)
static void
dwc_otg_enable_sof_irq(struct dwc_otg_softc *sc)
{
- if (sc->sc_irq_mask & GINTSTS_SOF)
+ /* In device mode we don't use the SOF interrupt */
+ if (sc->sc_flags.status_device_mode != 0 ||
+ (sc->sc_irq_mask & GINTMSK_SOFMSK) != 0)
return;
- sc->sc_irq_mask |= GINTSTS_SOF;
+ sc->sc_irq_mask |= GINTMSK_SOFMSK;
DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
}
@@ -395,8 +434,8 @@ dwc_otg_resume_irq(struct dwc_otg_softc *sc)
* Disable resume interrupt and enable suspend
* interrupt:
*/
- sc->sc_irq_mask &= ~GINTSTS_WKUPINT;
- sc->sc_irq_mask |= GINTSTS_USBSUSP;
+ sc->sc_irq_mask &= ~GINTMSK_WKUPINTMSK;
+ sc->sc_irq_mask |= GINTMSK_USBSUSPMSK;
DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
}
@@ -418,8 +457,8 @@ dwc_otg_suspend_irq(struct dwc_otg_softc *sc)
* Disable suspend interrupt and enable resume
* interrupt:
*/
- sc->sc_irq_mask &= ~GINTSTS_USBSUSP;
- sc->sc_irq_mask |= GINTSTS_WKUPINT;
+ sc->sc_irq_mask &= ~GINTMSK_USBSUSPMSK;
+ sc->sc_irq_mask |= GINTMSK_WKUPINTMSK;
DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
}
@@ -493,9 +532,11 @@ dwc_otg_common_rx_ack(struct dwc_otg_softc *sc)
{
DPRINTFN(5, "RX status clear\n");
- /* enable RX FIFO level interrupt */
- sc->sc_irq_mask |= GINTSTS_RXFLVL;
- DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+ if (sc->sc_flags.status_device_mode != 0) {
+ /* enable RX FIFO level interrupt */
+ sc->sc_irq_mask |= GINTMSK_RXFLVLMSK;
+ DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+ }
/* clear cached status */
sc->sc_last_rx_status = 0;
@@ -506,6 +547,7 @@ dwc_otg_clear_hcint(struct dwc_otg_softc *sc, uint8_t x)
{
uint32_t hcint;
+ /* clear all pending interrupts */
hcint = DWC_OTG_READ_4(sc, DOTG_HCINT(x));
DWC_OTG_WRITE_4(sc, DOTG_HCINT(x), hcint);
@@ -513,6 +555,10 @@ dwc_otg_clear_hcint(struct dwc_otg_softc *sc, uint8_t x)
sc->sc_chan_state[x].hcint = 0;
}
+/*
+ * This function waits until a DWC OTG host channel is ready to be
+ * used again:
+ */
static uint8_t
dwc_otg_host_channel_wait(struct dwc_otg_td *td)
{
@@ -545,6 +591,9 @@ dwc_otg_host_channel_wait(struct dwc_otg_td *td)
sc->sc_chan_state[td->channel].allocated = 0;
sc->sc_chan_state[x].allocated = 1;
+ sc->sc_chan_state[x].tx_size =
+ sc->sc_chan_state[td->channel].tx_size;
+
if (sc->sc_chan_state[td->channel].suspended) {
sc->sc_chan_state[td->channel].suspended = 0;
sc->sc_chan_state[x].suspended = 1;
@@ -579,6 +628,7 @@ static uint8_t
dwc_otg_host_channel_alloc(struct dwc_otg_td *td)
{
struct dwc_otg_softc *sc;
+ uint32_t tx_size;
uint8_t x;
uint8_t max_channel;
@@ -591,9 +641,25 @@ dwc_otg_host_channel_alloc(struct dwc_otg_td *td)
if ((td->hcchar & HCCHAR_EPNUM_MASK) == 0) {
max_channel = 1;
x = 0;
+ tx_size = td->max_packet_size;
+ if ((sc->sc_tx_cur_size + tx_size) > sc->sc_tx_max_size) {
+ DPRINTF("Too little FIFO space\n");
+ return (1); /* too little FIFO */
+ }
} else {
max_channel = sc->sc_host_ch_max;
x = 1;
+ if ((td->hcchar & HCCHAR_EPDIR) == HCCHAR_EPDIR_OUT) {
+ tx_size = td->max_packet_size;
+ if (td->hcsplt != 0 && tx_size > HCSPLT_XACTLEN_MAX)
+ tx_size = HCSPLT_XACTLEN_MAX;
+ if ((sc->sc_tx_cur_size + tx_size) > sc->sc_tx_max_size) {
+ DPRINTF("Too little FIFO space\n");
+ return (1); /* too little FIFO */
+ }
+ } else {
+ tx_size = 0;
+ }
}
for (; x != max_channel; x++) {
@@ -604,6 +670,10 @@ dwc_otg_host_channel_alloc(struct dwc_otg_td *td)
continue;
sc->sc_chan_state[x].allocated = 1;
+ sc->sc_chan_state[x].tx_size = tx_size;
+
+ /* keep track of used FIFO */
+ sc->sc_tx_cur_size += tx_size;
/* clear interrupts */
dwc_otg_clear_hcint(sc, x);
@@ -635,8 +705,6 @@ dwc_otg_host_channel_disable(struct dwc_otg_softc *sc, uint8_t x)
HCCHAR_CHENA | HCCHAR_CHDIS);
/* don't re-use channel until next SOF is transmitted */
sc->sc_chan_state[x].wait_sof = 2;
- /* enable SOF interrupt */
- dwc_otg_enable_sof_irq(sc);
}
}
@@ -663,6 +731,9 @@ dwc_otg_host_channel_free(struct dwc_otg_td *td)
sc->sc_chan_state[x].allocated = 0;
sc->sc_chan_state[x].suspended = 0;
+ /* keep track of used FIFO */
+ sc->sc_tx_cur_size -= sc->sc_chan_state[x].tx_size;
+
/* ack any pending messages */
if (sc->sc_last_rx_status != 0 &&
GRXSTSRD_CHNUM_GET(sc->sc_last_rx_status) == x) {
@@ -682,7 +753,7 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
uint32_t hcchar;
if (dwc_otg_host_channel_alloc(td))
- return (1); /* busy */
+ goto busy;
/* get pointer to softc */
sc = DWC_OTG_PC2SC(td->pc);
@@ -701,13 +772,13 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
DPRINTF("CH=%d STALL\n", td->channel);
td->error_stall = 1;
td->error_any = 1;
- return (0); /* complete */
+ goto complete;
} else if (hcint & HCINT_ERRORS) {
DPRINTF("CH=%d ERROR\n", td->channel);
td->errcnt++;
if (td->hcsplt != 0 || td->errcnt >= 3) {
td->error_any = 1;
- return (0); /* complete */
+ goto complete;
}
}
@@ -724,6 +795,8 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
switch (td->state) {
case DWC_CHAN_ST_START:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
goto send_pkt;
case DWC_CHAN_ST_WAIT_ANE:
@@ -731,6 +804,7 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto send_pkt;
}
if (hcint & (HCINT_ACK | HCINT_NYET)) {
@@ -739,14 +813,17 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
td->offset += td->tx_bytes;
td->remainder -= td->tx_bytes;
td->toggle = 1;
- return (0); /* complete */
+ td->tt_scheduled = 0;
+ goto complete;
}
break;
+
case DWC_CHAN_ST_WAIT_S_ANE:
if (hcint & (HCINT_RETRY | HCINT_ERRORS)) {
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto send_pkt;
}
if (hcint & (HCINT_ACK | HCINT_NYET)) {
@@ -755,6 +832,7 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
goto send_cpkt;
}
break;
+
case DWC_CHAN_ST_WAIT_C_ANE:
if (hcint & HCINT_NYET) {
if (!dwc_otg_host_channel_wait(td))
@@ -765,6 +843,7 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto send_pkt;
}
if (hcint & HCINT_ACK) {
@@ -773,35 +852,33 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
td->offset += td->tx_bytes;
td->remainder -= td->tx_bytes;
td->toggle = 1;
- return (0); /* complete */
+ goto complete;
}
break;
- case DWC_CHAN_ST_TX_PKT_SYNC:
- goto send_pkt_sync;
+
+ case DWC_CHAN_ST_WAIT_C_PKT:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
+ goto send_cpkt;
+
default:
break;
}
- return (1); /* busy */
+ goto busy;
send_pkt:
if (sizeof(req) != td->remainder) {
td->error_any = 1;
- return (0); /* complete */
+ goto complete;
}
-send_pkt_sync:
if (td->hcsplt != 0) {
- uint32_t count;
-
- count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
- /* check for not first microframe */
- if (count != 0) {
- /* enable SOF interrupt */
- dwc_otg_enable_sof_irq(sc);
- /* set state */
- td->state = DWC_CHAN_ST_TX_PKT_SYNC;
- dwc_otg_host_channel_free(td);
- return (1); /* busy */
+ /* Wait for our turn, if TT transfer */
+ if (td->tt_scheduled == 0 ||
+ (sc->sc_last_frame_num & 7) < td->tt_start_slot) {
+ /* set return state */
+ td->state = DWC_CHAN_ST_START;
+ goto tt_wait;
}
td->hcsplt &= ~HCSPLT_COMPSPLT;
@@ -832,9 +909,19 @@ send_pkt_sync:
/* store number of bytes transmitted */
td->tx_bytes = sizeof(req);
- return (1); /* busy */
+ goto busy;
send_cpkt:
+ /* Wait for our turn, if TT transfer */
+ if (td->tt_scheduled == 0 ||
+ (sc->sc_last_frame_num & 7) < td->tt_complete_slot) {
+ /* set return state */
+ td->state = DWC_CHAN_ST_WAIT_C_PKT;
+ goto tt_wait;
+ }
+ /* wait until next slot before trying again */
+ td->tt_complete_slot++;
+
td->hcsplt |= HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_C_ANE;
@@ -848,8 +935,15 @@ send_cpkt:
/* must enable channel before writing data to FIFO */
DWC_OTG_WRITE_4(sc, DOTG_HCCHAR(td->channel), hcchar);
+ goto busy;
+tt_wait:
+ /* free allocated channel */
+ dwc_otg_host_channel_free(td);
+busy:
return (1); /* busy */
+complete:
+ return (0); /* complete */
}
static uint8_t
@@ -984,6 +1078,25 @@ not_complete:
}
static uint8_t
+dwc_otg_host_rate_check_interrupt(struct dwc_otg_softc *sc, struct dwc_otg_td *td)
+{
+ uint8_t delta;
+
+ delta = sc->sc_tmr_val - td->tmr_val;
+ if (delta >= 128)
+ return (1); /* busy */
+
+ td->tmr_val = sc->sc_tmr_val + td->tmr_res;
+
+ /* set toggle, if any */
+ if (td->set_toggle) {
+ td->set_toggle = 0;
+ td->toggle = 1;
+ }
+ return (0);
+}
+
+static uint8_t
dwc_otg_host_rate_check(struct dwc_otg_td *td)
{
struct dwc_otg_softc *sc;
@@ -992,31 +1105,29 @@ dwc_otg_host_rate_check(struct dwc_otg_td *td)
/* get pointer to softc */
sc = DWC_OTG_PC2SC(td->pc);
+ if (td->channel < DWC_OTG_MAX_CHANNELS &&
+ sc->sc_chan_state[td->channel].suspended)
+ goto busy;
+
ep_type = ((td->hcchar &
HCCHAR_EPTYPE_MASK) >> HCCHAR_EPTYPE_SHIFT);
- if (sc->sc_chan_state[td->channel].suspended)
- goto busy;
-
if (ep_type == UE_ISOCHRONOUS) {
- if (td->tmr_val & 1)
- td->hcchar |= HCCHAR_ODDFRM;
- else
- td->hcchar &= ~HCCHAR_ODDFRM;
- td->tmr_val += td->tmr_res;
- } else if (ep_type == UE_INTERRUPT) {
- uint8_t delta;
- delta = sc->sc_tmr_val - td->tmr_val;
- if (delta >= 128)
+ /* non TT isochronous traffic */
+ if ((td->tmr_val != 0) ||
+ (sc->sc_last_frame_num & (td->tmr_res - 1))) {
goto busy;
- td->tmr_val = sc->sc_tmr_val + td->tmr_res;
+ }
+ td->tmr_val = 1; /* executed */
+ td->toggle = 0;
+
+ } else if (ep_type == UE_INTERRUPT) {
+ if (!td->tt_scheduled)
+ goto busy;
+ td->tt_scheduled = 0;
} else if (td->did_nak != 0) {
goto busy;
- }
-
- if (ep_type == UE_ISOCHRONOUS) {
- td->toggle = 0;
} else if (td->set_toggle) {
td->set_toggle = 0;
td->toggle = 1;
@@ -1036,7 +1147,7 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
uint8_t ep_type;
if (dwc_otg_host_channel_alloc(td))
- return (1); /* busy */
+ goto busy;
/* get pointer to softc */
sc = DWC_OTG_PC2SC(td->pc);
@@ -1060,13 +1171,15 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
DPRINTF("CH=%d STALL\n", td->channel);
td->error_stall = 1;
td->error_any = 1;
- return (0); /* complete */
+ goto complete;
} else if (hcint & HCINT_ERRORS) {
DPRINTF("CH=%d ERROR\n", td->channel);
td->errcnt++;
if (td->hcsplt != 0 || td->errcnt >= 3) {
- td->error_any = 1;
- return (0); /* complete */
+ if (ep_type != UE_ISOCHRONOUS) {
+ td->error_any = 1;
+ goto complete;
+ }
}
}
@@ -1103,25 +1216,42 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
break;
}
- td->toggle ^= 1;
-
/* get the packet byte count */
count = GRXSTSRD_BCNT_GET(sc->sc_last_rx_status);
- /* verify the packet byte count */
- if (count != td->max_packet_size) {
- if (count < td->max_packet_size) {
- /* we have a short packet */
- td->short_pkt = 1;
- td->got_short = 1;
+ /* check for isochronous transfer or high-speed bandwidth endpoint */
+ if (ep_type == UE_ISOCHRONOUS || td->max_packet_count > 1) {
+ if ((sc->sc_last_rx_status & GRXSTSRD_DPID_MASK) != GRXSTSRD_DPID_DATA0) {
+ td->tt_xactpos = HCSPLT_XACTPOS_MIDDLE;
} else {
- /* invalid USB packet */
- td->error_any = 1;
+ td->tt_xactpos = HCSPLT_XACTPOS_BEGIN;
+
+ /* verify the packet byte count */
+ if (count < td->max_packet_size) {
+ /* we have a short packet */
+ td->short_pkt = 1;
+ td->got_short = 1;
+ }
+ }
+ td->toggle = 0;
+ } else {
+ /* verify the packet byte count */
+ if (count != td->max_packet_size) {
+ if (count < td->max_packet_size) {
+ /* we have a short packet */
+ td->short_pkt = 1;
+ td->got_short = 1;
+ } else {
+ /* invalid USB packet */
+ td->error_any = 1;
- /* release FIFO */
- dwc_otg_common_rx_ack(sc);
- return (0); /* we are complete */
+ /* release FIFO */
+ dwc_otg_common_rx_ack(sc);
+ goto complete;
+ }
}
+ td->toggle ^= 1;
+ td->tt_scheduled = 0;
}
/* verify the packet byte count */
@@ -1131,7 +1261,7 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
/* release FIFO */
dwc_otg_common_rx_ack(sc);
- return (0); /* we are complete */
+ goto complete;
}
usbd_copy_in(td->pc, td->offset,
@@ -1144,7 +1274,6 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
break;
default:
- DPRINTF("OTHER\n");
break;
}
/* release FIFO */
@@ -1153,6 +1282,8 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
check_state:
switch (td->state) {
case DWC_CHAN_ST_START:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
if (td->hcsplt != 0)
goto receive_spkt;
else
@@ -1164,6 +1295,7 @@ check_state:
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
if (td->hcsplt != 0)
goto receive_spkt;
else
@@ -1171,11 +1303,13 @@ check_state:
}
if (!(hcint & HCINT_SOFTWARE_ONLY)) {
if (hcint & HCINT_NYET) {
- if (td->hcsplt != 0) {
- if (!dwc_otg_host_channel_wait(td))
- break;
- goto receive_pkt;
+ if (ep_type == UE_ISOCHRONOUS) {
+ /* we missed the service interval */
+ goto complete;
}
+ if (!dwc_otg_host_channel_wait(td))
+ break;
+ goto receive_pkt;
}
break;
}
@@ -1183,29 +1317,44 @@ check_state:
if (!dwc_otg_host_channel_wait(td))
break;
- /* check if we are complete */
- if ((td->remainder == 0) || (td->got_short != 0)) {
- if (td->short_pkt)
- return (0); /* complete */
+ if (ep_type == UE_ISOCHRONOUS) {
+ /* check if we are complete */
+ if ((td->remainder == 0) ||
+ (td->tt_xactpos == HCSPLT_XACTPOS_BEGIN))
+ goto complete;
- /*
- * Else need to receive a zero length
- * packet.
- */
- }
- if (td->hcsplt != 0)
- goto receive_spkt;
- else
goto receive_pkt;
+ } else {
+ /* check if we are complete */
+ if ((td->remainder == 0) || (td->got_short != 0)) {
+ if (td->short_pkt)
+ goto complete;
+
+ /*
+ * Else need to receive a zero length
+ * packet.
+ */
+ }
+ td->tt_scheduled = 0;
+ if (td->hcsplt != 0)
+ goto receive_spkt;
+ else
+ goto receive_pkt;
+ }
}
break;
case DWC_CHAN_ST_WAIT_S_ANE:
+ /*
+ * NOTE: The DWC OTG hardware provides a fake ACK in
+ * case of interrupt and isochronous transfers:
+ */
if (hcint & (HCINT_RETRY | HCINT_ERRORS)) {
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto receive_spkt;
}
if (hcint & (HCINT_ACK | HCINT_NYET)) {
@@ -1215,100 +1364,91 @@ check_state:
}
break;
- case DWC_CHAN_ST_RX_PKT:
+ case DWC_CHAN_ST_WAIT_C_PKT:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
goto receive_pkt;
- case DWC_CHAN_ST_RX_SPKT:
- goto receive_spkt;
-
- case DWC_CHAN_ST_RX_SPKT_SYNC:
- goto receive_spkt_sync;
-
default:
break;
}
goto busy;
receive_pkt:
- if (td->hcsplt != 0) {
- count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
-
- /* check for even microframes */
- if (count == td->curr_frame) {
- td->state = DWC_CHAN_ST_RX_PKT;
- dwc_otg_host_channel_free(td);
- /* enable SOF interrupt */
- dwc_otg_enable_sof_irq(sc);
- goto busy;
- } else if (count == 0) {
- /* check for start split timeout */
- goto receive_spkt;
+ if (td->hcsplt != 0) {
+ /* Wait for our turn, if TT transfer */
+ if (td->tt_scheduled == 0 ||
+ (sc->sc_last_frame_num & 7) < td->tt_complete_slot) {
+ /* set return state */
+ td->state = DWC_CHAN_ST_WAIT_C_PKT;
+ goto tt_wait;
}
+ /* wait until next slot before trying again */
+ td->tt_complete_slot++;
- td->curr_frame = count;
+ /* set toggle, if any */
+ if (td->set_toggle) {
+ td->set_toggle = 0;
+ td->toggle = 1;
+ }
td->hcsplt |= HCSPLT_COMPSPLT;
- } else if (dwc_otg_host_rate_check(td)) {
- td->state = DWC_CHAN_ST_RX_PKT;
+ count = HCSPLT_XACTLEN_MAX;
+ } else if (td->tt_xactpos == HCSPLT_XACTPOS_BEGIN &&
+ dwc_otg_host_rate_check(td)) {
+ td->state = DWC_CHAN_ST_START;
dwc_otg_host_channel_free(td);
goto busy;
+ } else {
+ count = td->max_packet_size;
}
-
td->state = DWC_CHAN_ST_WAIT_ANE;
/* receive one packet */
DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
- (td->max_packet_size << HCTSIZ_XFERSIZE_SHIFT) |
+ (count << HCTSIZ_XFERSIZE_SHIFT) |
(1 << HCTSIZ_PKTCNT_SHIFT) |
(td->toggle ? (HCTSIZ_PID_DATA1 << HCTSIZ_PID_SHIFT) :
(HCTSIZ_PID_DATA0 << HCTSIZ_PID_SHIFT)));
DWC_OTG_WRITE_4(sc, DOTG_HCSPLT(td->channel), td->hcsplt);
+ /* send ASAP */
+ if ((ep_type == UE_ISOCHRONOUS) && !(sc->sc_last_frame_num & 1))
+ td->hcchar |= HCCHAR_ODDFRM;
+ else
+ td->hcchar &= ~HCCHAR_ODDFRM;
+
hcchar = td->hcchar;
hcchar |= HCCHAR_EPDIR_IN;
/* must enable channel before data can be received */
DWC_OTG_WRITE_4(sc, DOTG_HCCHAR(td->channel), hcchar);
-
goto busy;
receive_spkt:
- if (dwc_otg_host_rate_check(td)) {
- td->state = DWC_CHAN_ST_RX_SPKT;
- dwc_otg_host_channel_free(td);
- goto busy;
- }
-
-receive_spkt_sync:
- if (ep_type == UE_INTERRUPT ||
- ep_type == UE_ISOCHRONOUS) {
- count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
- td->curr_frame = count;
-
- /* check for non-zero microframe */
- if (count != 0) {
- /* enable SOF interrupt */
- dwc_otg_enable_sof_irq(sc);
- /* set state */
- td->state = DWC_CHAN_ST_RX_SPKT_SYNC;
- dwc_otg_host_channel_free(td);
- goto busy;
- }
- } else {
- count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
- td->curr_frame = count;
-
- /* check for two last frames */
- if (count >= 6) {
- /* enable SOF interrupt */
- dwc_otg_enable_sof_irq(sc);
- /* set state */
- td->state = DWC_CHAN_ST_RX_SPKT_SYNC;
+ /* Wait for our turn, if TT transfer */
+ if (td->tt_scheduled == 0) {
+ if (ep_type == UE_INTERRUPT) {
+ td->state = DWC_CHAN_ST_START;
dwc_otg_host_channel_free(td);
goto busy;
}
+ /* set return state */
+ td->state = DWC_CHAN_ST_START;
+ goto tt_wait;
+ }
+ if ((sc->sc_last_frame_num & 7) < td->tt_start_slot) {
+ /* set return state */
+ td->state = DWC_CHAN_ST_START;
+ goto tt_wait;
}
+ /* send ASAP */
+ if ((ep_type == UE_ISOCHRONOUS) && !(sc->sc_last_frame_num & 1))
+ td->hcchar |= HCCHAR_ODDFRM;
+ else
+ td->hcchar &= ~HCCHAR_ODDFRM;
+
td->hcsplt &= ~HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_S_ANE;
@@ -1324,9 +1464,15 @@ receive_spkt_sync:
/* must enable channel before data can be received */
DWC_OTG_WRITE_4(sc, DOTG_HCCHAR(td->channel), hcchar);
+ goto busy;
+tt_wait:
+ /* free allocated channel */
+ dwc_otg_host_channel_free(td);
busy:
return (1); /* busy */
+complete:
+ return (0); /* complete */
}
static uint8_t
@@ -1452,7 +1598,7 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
uint8_t ep_type;
if (dwc_otg_host_channel_alloc(td))
- return (1); /* busy */
+ goto busy;
/* get pointer to softc */
sc = DWC_OTG_PC2SC(td->pc);
@@ -1474,13 +1620,13 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
DPRINTF("CH=%d STALL\n", td->channel);
td->error_stall = 1;
td->error_any = 1;
- return (0); /* complete */
+ goto complete;
} else if (hcint & HCINT_ERRORS) {
DPRINTF("CH=%d ERROR\n", td->channel);
td->errcnt++;
if (td->hcsplt != 0 || td->errcnt >= 3) {
td->error_any = 1;
- return (0); /* complete */
+ goto complete;
}
}
@@ -1497,6 +1643,8 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
switch (td->state) {
case DWC_CHAN_ST_START:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
goto send_pkt;
case DWC_CHAN_ST_WAIT_ANE:
@@ -1504,6 +1652,7 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto send_pkt;
}
if (hcint & (HCINT_ACK | HCINT_NYET)) {
@@ -1513,11 +1662,12 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
td->offset += td->tx_bytes;
td->remainder -= td->tx_bytes;
td->toggle ^= 1;
+ td->tt_scheduled = 0;
/* check remainder */
if (td->remainder == 0) {
if (td->short_pkt)
- return (0); /* complete */
+ goto complete;
/*
* Else we need to transmit a short
@@ -1527,11 +1677,13 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
goto send_pkt;
}
break;
+
case DWC_CHAN_ST_WAIT_S_ANE:
if (hcint & (HCINT_RETRY | HCINT_ERRORS)) {
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto send_pkt;
}
if (hcint & (HCINT_ACK | HCINT_NYET)) {
@@ -1540,6 +1692,7 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
goto send_cpkt;
}
break;
+
case DWC_CHAN_ST_WAIT_C_ANE:
if (hcint & HCINT_NYET) {
if (!dwc_otg_host_channel_wait(td))
@@ -1550,6 +1703,7 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
if (!dwc_otg_host_channel_wait(td))
break;
td->did_nak = 1;
+ td->tt_scheduled = 0;
goto send_pkt;
}
if (hcint & HCINT_ACK) {
@@ -1558,11 +1712,12 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
td->offset += td->tx_bytes;
td->remainder -= td->tx_bytes;
td->toggle ^= 1;
+ td->tt_scheduled = 0;
/* check remainder */
if (td->remainder == 0) {
if (td->short_pkt)
- return (0); /* complete */
+ goto complete;
/* else we need to transmit a short packet */
}
@@ -1570,64 +1725,204 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
}
break;
- case DWC_CHAN_ST_TX_PKT:
- goto send_pkt;
+ case DWC_CHAN_ST_WAIT_C_PKT:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
+ goto send_cpkt;
- case DWC_CHAN_ST_TX_PKT_SYNC:
- goto send_pkt_sync;
+ case DWC_CHAN_ST_TX_WAIT_ISOC:
- case DWC_CHAN_ST_TX_CPKT:
- goto send_cpkt;
+ /* Check if isochronous OUT traffic is complete */
+ if ((hcint & HCINT_ACK) == 0)
+ break;
+
+ td->offset += td->tx_bytes;
+ td->remainder -= td->tx_bytes;
+ /* Update split token according to specification */
+ if (td->hcsplt != 0) {
+ if (td->tt_xactpos == HCSPLT_XACTPOS_BEGIN)
+ td->tt_xactpos = HCSPLT_XACTPOS_MIDDLE;
+ } else if (td->max_packet_count > 1) {
+ td->tt_xactpos++;
+ }
+
+ dwc_otg_host_channel_disable(sc, td->channel);
+
+ if (td->remainder == 0)
+ goto complete;
+
+ td->state = DWC_CHAN_ST_TX_PKT_ISOC;
+
+ /* FALLTHROUGH */
+
+ case DWC_CHAN_ST_TX_PKT_ISOC:
+ if (!dwc_otg_host_channel_wait(td))
+ break;
+
+ if (td->hcsplt != 0) {
+ if ((sc->sc_last_frame_num & 7) < td->tt_start_slot)
+ goto tt_wait;
+ /* packets must be 125us apart */
+ td->tt_start_slot++;
+ }
+ goto send_isoc_pkt;
default:
break;
}
goto busy;
send_pkt:
- if (dwc_otg_host_rate_check(td)) {
- td->state = DWC_CHAN_ST_TX_PKT;
+ if (td->hcsplt != 0) {
+ /* Wait for our turn, if TT transfer */
+ if (td->tt_scheduled == 0) {
+ if (ep_type == UE_INTERRUPT) {
+ td->state = DWC_CHAN_ST_START;
+ dwc_otg_host_channel_free(td);
+ goto busy;
+ }
+ /* set return state */
+ td->state = DWC_CHAN_ST_START;
+ goto tt_wait;
+ }
+ if ((sc->sc_last_frame_num & 7) < td->tt_start_slot) {
+ /* set return state */
+ td->state = DWC_CHAN_ST_START;
+ goto tt_wait;
+ }
+
+ /* packets must be 125us apart */
+ td->tt_start_slot++;
+
+ /* set toggle, if any */
+ if (td->set_toggle) {
+ td->set_toggle = 0;
+ td->toggle = 1;
+ }
+ } else if (dwc_otg_host_rate_check(td)) {
+ td->state = DWC_CHAN_ST_START;
dwc_otg_host_channel_free(td);
goto busy;
}
-send_pkt_sync:
- if (td->hcsplt != 0) {
- count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
- /* check for first or last microframe */
- if (count == 7 || count == 0) {
- /* enable SOF interrupt */
- dwc_otg_enable_sof_irq(sc);
- /* set state */
- td->state = DWC_CHAN_ST_TX_PKT_SYNC;
- dwc_otg_host_channel_free(td);
- goto busy;
+ if (ep_type == UE_ISOCHRONOUS) {
+send_isoc_pkt:
+ /* Isochronous OUT transfers don't have any ACKs */
+ td->state = DWC_CHAN_ST_TX_WAIT_ISOC;
+ td->hcsplt &= ~HCSPLT_COMPSPLT;
+ if (td->hcsplt != 0) {
+ /* get maximum transfer length */
+ count = td->remainder;
+
+ /* Update split token according to specification */
+ if (td->tt_xactpos == HCSPLT_XACTPOS_BEGIN) {
+ if (count <= HCSPLT_XACTLEN_MAX)
+ td->tt_xactpos = HCSPLT_XACTPOS_ALL;
+ else
+ count = HCSPLT_XACTLEN_MAX;
+ } else if (td->tt_xactpos == HCSPLT_XACTPOS_MIDDLE) {
+ if (count <= HCSPLT_XACTLEN_MAX)
+ td->tt_xactpos = HCSPLT_XACTPOS_LAST;
+ else
+ count = HCSPLT_XACTLEN_MAX;
+ }
+
+ /* Update transaction position */
+ td->hcsplt &= ~HCSPLT_XACTPOS_MASK;
+ td->hcsplt |= ((uint32_t)td->tt_xactpos << HCSPLT_XACTPOS_SHIFT);
+ } else {
+ /* send one packet at a time */
+ count = td->max_packet_size;
+ if (td->remainder < count) {
+ /* we have a short packet */
+ td->short_pkt = 1;
+ count = td->remainder;
+ }
}
+ } else if (td->hcsplt != 0) {
td->hcsplt &= ~HCSPLT_COMPSPLT;
+
+ /* Wait for ACK/NAK/ERR from TT */
td->state = DWC_CHAN_ST_WAIT_S_ANE;
+
+ /* send one packet at a time */
+ count = td->max_packet_size;
+ if (td->remainder < count) {
+ /* we have a short packet */
+ td->short_pkt = 1;
+ count = td->remainder;
+ }
} else {
+ /* Wait for ACK/NAK/STALL from device */
td->state = DWC_CHAN_ST_WAIT_ANE;
- }
- /* send one packet at a time */
- count = td->max_packet_size;
- if (td->remainder < count) {
- /* we have a short packet */
- td->short_pkt = 1;
- count = td->remainder;
+ /* send one packet at a time */
+ count = td->max_packet_size;
+ if (td->remainder < count) {
+ /* we have a short packet */
+ td->short_pkt = 1;
+ count = td->remainder;
+ }
}
- /* TODO: HCTSIZ_DOPNG */
+ /* check for High-Speed multi-packets */
+ if ((td->hcsplt == 0) && (td->max_packet_count > 1)) {
+ if (td->npkt == 0) {
+ if (td->remainder >= (3 * td->max_packet_size))
+ td->npkt = 3;
+ else if (td->remainder >= (2 * td->max_packet_size))
+ td->npkt = 2;
+ else
+ td->npkt = 1;
- DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
- (count << HCTSIZ_XFERSIZE_SHIFT) |
- (1 << HCTSIZ_PKTCNT_SHIFT) |
- (td->toggle ? (HCTSIZ_PID_DATA1 << HCTSIZ_PID_SHIFT) :
- (HCTSIZ_PID_DATA0 << HCTSIZ_PID_SHIFT)));
+ if (td->npkt > td->max_packet_count)
+ td->npkt = td->max_packet_count;
+
+ td->tt_xactpos = 1; /* overload */
+ }
+ if (td->tt_xactpos == td->npkt) {
+ if (td->npkt == 1) {
+ DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
+ (count << HCTSIZ_XFERSIZE_SHIFT) |
+ (1 << HCTSIZ_PKTCNT_SHIFT) |
+ (HCTSIZ_PID_DATA0 << HCTSIZ_PID_SHIFT));
+ } else if (td->npkt == 2) {
+ DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
+ (count << HCTSIZ_XFERSIZE_SHIFT) |
+ (1 << HCTSIZ_PKTCNT_SHIFT) |
+ (HCTSIZ_PID_DATA1 << HCTSIZ_PID_SHIFT));
+ } else {
+ DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
+ (count << HCTSIZ_XFERSIZE_SHIFT) |
+ (1 << HCTSIZ_PKTCNT_SHIFT) |
+ (HCTSIZ_PID_DATA2 << HCTSIZ_PID_SHIFT));
+ }
+ td->npkt = 0;
+ } else {
+ DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
+ (count << HCTSIZ_XFERSIZE_SHIFT) |
+ (1 << HCTSIZ_PKTCNT_SHIFT) |
+ (HCTSIZ_PID_MDATA << HCTSIZ_PID_SHIFT));
+ }
+ } else {
+ /* TODO: HCTSIZ_DOPNG */
+
+ DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
+ (count << HCTSIZ_XFERSIZE_SHIFT) |
+ (1 << HCTSIZ_PKTCNT_SHIFT) |
+ (td->toggle ? (HCTSIZ_PID_DATA1 << HCTSIZ_PID_SHIFT) :
+ (HCTSIZ_PID_DATA0 << HCTSIZ_PID_SHIFT)));
+ }
DWC_OTG_WRITE_4(sc, DOTG_HCSPLT(td->channel), td->hcsplt);
+ /* send ASAP */
+ if ((ep_type == UE_ISOCHRONOUS) && !(sc->sc_last_frame_num & 1))
+ td->hcchar |= HCCHAR_ODDFRM;
+ else
+ td->hcchar &= ~HCCHAR_ODDFRM;
+
hcchar = td->hcchar;
hcchar &= ~HCCHAR_EPDIR_IN;
@@ -1651,18 +1946,20 @@ send_pkt_sync:
/* store number of bytes transmitted */
td->tx_bytes = count;
-
goto busy;
send_cpkt:
- count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
- /* check for first microframe */
- if (count == 0) {
- /* send packet again */
- goto send_pkt;
+ /* Wait for our turn, if TT transfer */
+ if (td->tt_scheduled == 0 ||
+ (sc->sc_last_frame_num & 7) < td->tt_complete_slot) {
+ /* set return state */
+ td->state = DWC_CHAN_ST_WAIT_C_PKT;
+ goto tt_wait;
}
+ /* wait until next slot before trying again */
+ td->tt_complete_slot++;
- td->hcsplt |= HCSPLT_COMPSPLT;
+ td->hcsplt |= HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_C_ANE;
DWC_OTG_WRITE_4(sc, DOTG_HCTSIZ(td->channel),
@@ -1676,9 +1973,15 @@ send_cpkt:
/* must enable channel before writing data to FIFO */
DWC_OTG_WRITE_4(sc, DOTG_HCCHAR(td->channel), hcchar);
+ goto busy;
+tt_wait:
+ /* free allocated channel */
+ dwc_otg_host_channel_free(td);
busy:
return (1); /* busy */
+complete:
+ return (0); /* complete */
}
static uint8_t
@@ -2003,8 +2306,8 @@ dwc_otg_timer(void *_sc)
td->did_nak = 0;
}
- /* poll jobs */
- dwc_otg_interrupt_poll(sc);
+ /* enable SOF interrupt, which will poll jobs */
+ dwc_otg_enable_sof_irq(sc);
if (sc->sc_timer_active) {
/* restart timer */
@@ -2041,6 +2344,238 @@ dwc_otg_timer_stop(struct dwc_otg_softc *sc)
}
static void
+dwc_otg_update_host_transfer_schedule(struct dwc_otg_softc *sc)
+{
+ TAILQ_HEAD(, usb_xfer) head;
+ struct usb_xfer *xfer;
+ struct usb_xfer *xfer_next;
+ struct dwc_otg_td *td;
+ uint8_t needsof;
+ uint16_t temp;
+
+ /* FS/LS TT frames are one behind, so add one here */
+ temp = (DWC_OTG_READ_4(sc, DOTG_HFNUM) + 1) & HFNUM_FRNUM_MASK;
+
+ if (sc->sc_last_frame_num == temp)
+ return;
+
+ sc->sc_last_frame_num = temp;
+
+ needsof = 0;
+
+ TAILQ_INIT(&head);
+
+ if (sc->sc_irq_mask & GINTMSK_SOFMSK) {
+ uint8_t x;
+
+ for (x = 0; x != sc->sc_host_ch_max; x++) {
+ if (sc->sc_chan_state[x].wait_sof != 0) {
+ if (--(sc->sc_chan_state[x].wait_sof) != 0)
+ needsof = 1;
+ }
+ }
+ }
+
+ if ((temp & 7) == 0) {
+
+ /* reset TT info */
+ memset(sc->sc_tt_info, 0, sizeof(sc->sc_tt_info));
+
+ /*
+ * Plan ahead FULL speed transfers going through the
+ * transaction translator, according to the USB
+ * specified priority:
+ */
+ TAILQ_FOREACH_SAFE(xfer, &sc->sc_bus.intr_q.head, wait_entry, xfer_next) {
+ struct dwc_otg_tt_info *pinfo;
+
+ td = xfer->td_transfer_cache;
+ if (td == NULL || td->did_nak != 0 ||
+ (td->hcchar & HCCHAR_EPTYPE_MASK) !=
+ (UE_CONTROL << HCCHAR_EPTYPE_SHIFT))
+ continue;
+
+ needsof = 1;
+
+ if (td->hcsplt == 0)
+ continue;
+
+ /* Reset state if stuck waiting for complete split */
+ if (td->state == DWC_CHAN_ST_WAIT_C_PKT)
+ td->state = DWC_CHAN_ST_START;
+
+ pinfo = sc->sc_tt_info + td->tt_index;
+
+ td->tt_start_slot = pinfo->slot_index;
+ pinfo->bytes_used += td->max_packet_size;
+ while (pinfo->bytes_used >= HCSPLT_XACTLEN_MAX) {
+ pinfo->bytes_used -= HCSPLT_XACTLEN_MAX;
+ pinfo->slot_index ++;
+ }
+
+ td->tt_complete_slot = pinfo->slot_index + 2;
+ if (td->tt_complete_slot < 8) {
+ td->tt_scheduled = 1;
+ TAILQ_REMOVE(&sc->sc_bus.intr_q.head, xfer, wait_entry);
+ TAILQ_INSERT_TAIL(&head, xfer, wait_entry);
+ } else {
+ td->tt_scheduled = 0;
+ }
+ }
+
+ TAILQ_FOREACH_SAFE(xfer, &sc->sc_bus.intr_q.head, wait_entry, xfer_next) {
+ struct dwc_otg_tt_info *pinfo;
+
+ td = xfer->td_transfer_cache;
+ if (td == NULL ||
+ (td->hcchar & HCCHAR_EPTYPE_MASK) !=
+ (UE_ISOCHRONOUS << HCCHAR_EPTYPE_SHIFT))
+ continue;
+
+ /* execute more frames */
+ td->tmr_val = 0;
+
+ needsof = 1;
+
+ if (td->hcsplt == 0)
+ continue;
+
+ /* Reset state if stuck waiting for complete split */
+ if (td->state == DWC_CHAN_ST_WAIT_C_PKT)
+ td->state = DWC_CHAN_ST_START;
+
+ pinfo = sc->sc_tt_info + td->tt_index;
+
+ td->tt_start_slot = pinfo->slot_index;
+ pinfo->bytes_used += td->remainder;
+ while (pinfo->bytes_used >= HCSPLT_XACTLEN_MAX) {
+ pinfo->bytes_used -= HCSPLT_XACTLEN_MAX;
+ pinfo->slot_index ++;
+ }
+
+ td->tt_complete_slot = pinfo->slot_index + 2;
+ if (td->tt_complete_slot < 8) {
+ td->tt_scheduled = 1;
+ TAILQ_REMOVE(&sc->sc_bus.intr_q.head, xfer, wait_entry);
+ TAILQ_INSERT_TAIL(&head, xfer, wait_entry);
+ } else {
+ td->tt_scheduled = 0;
+ }
+ }
+
+ TAILQ_FOREACH_SAFE(xfer, &sc->sc_bus.intr_q.head, wait_entry, xfer_next) {
+ struct dwc_otg_tt_info *pinfo;
+
+ td = xfer->td_transfer_cache;
+ if (td == NULL ||
+ (td->hcchar & HCCHAR_EPTYPE_MASK) !=
+ (UE_INTERRUPT << HCCHAR_EPTYPE_SHIFT)) {
+ continue;
+ }
+
+ if (dwc_otg_host_rate_check_interrupt(sc, td))
+ continue;
+
+ needsof = 1;
+
+ if (td->hcsplt == 0) {
+ td->tt_scheduled = 1;
+ continue;
+ }
+
+ /* Reset state if stuck waiting for complete split */
+ if (td->state == DWC_CHAN_ST_WAIT_C_PKT)
+ td->state = DWC_CHAN_ST_START;
+
+ pinfo = sc->sc_tt_info + td->tt_index;
+
+ td->tt_start_slot = pinfo->slot_index;
+ pinfo->bytes_used += td->remainder;
+ while (pinfo->bytes_used >= HCSPLT_XACTLEN_MAX) {
+ pinfo->bytes_used -= HCSPLT_XACTLEN_MAX;
+ pinfo->slot_index ++;
+ }
+
+ td->tt_complete_slot = pinfo->slot_index + 2;
+ if (td->tt_complete_slot < 8) {
+ td->tt_scheduled = 1;
+ TAILQ_REMOVE(&sc->sc_bus.intr_q.head, xfer, wait_entry);
+ TAILQ_INSERT_TAIL(&head, xfer, wait_entry);
+ } else {
+ td->tt_scheduled = 0;
+ }
+ }
+ }
+
+ if ((temp & 7) < 6) {
+ TAILQ_FOREACH_SAFE(xfer, &sc->sc_bus.intr_q.head, wait_entry, xfer_next) {
+ struct dwc_otg_tt_info *pinfo;
+
+ td = xfer->td_transfer_cache;
+ if (td == NULL || td->did_nak != 0 ||
+ (td->hcchar & HCCHAR_EPTYPE_MASK) !=
+ (UE_BULK << HCCHAR_EPTYPE_SHIFT)) {
+ continue;
+ }
+
+ needsof = 1;
+
+ if (td->hcsplt == 0)
+ continue;
+
+ if ((temp & 7) == 0) {
+ /* Reset state if stuck waiting for complete split */
+ if (td->state == DWC_CHAN_ST_WAIT_C_PKT)
+ td->state = DWC_CHAN_ST_START;
+ } else if (td->tt_scheduled != 0)
+ continue; /* already scheduled */
+
+ pinfo = sc->sc_tt_info + td->tt_index;
+
+ td->tt_start_slot = pinfo->slot_index;
+ pinfo->bytes_used += td->remainder;
+ while (pinfo->bytes_used >= HCSPLT_XACTLEN_MAX) {
+ pinfo->bytes_used -= HCSPLT_XACTLEN_MAX;
+ pinfo->slot_index ++;
+ }
+
+ td->tt_complete_slot = pinfo->slot_index + 2;
+ if (td->tt_complete_slot < 8) {
+ td->tt_scheduled = 1;
+ TAILQ_REMOVE(&sc->sc_bus.intr_q.head, xfer, wait_entry);
+ TAILQ_INSERT_TAIL(&head, xfer, wait_entry);
+ } else {
+ td->tt_scheduled = 0;
+ }
+ }
+ }
+
+ /* TT transfers need to be executed in a specific order */
+ TAILQ_CONCAT(&head, &sc->sc_bus.intr_q.head, wait_entry);
+
+ /* Put TT transfers first in the queue */
+ TAILQ_CONCAT(&sc->sc_bus.intr_q.head, &head, wait_entry);
+
+ if ((temp & 7) == 0) {
+ DPRINTFN(12, "SOF interrupt #%d, needsof=%d\n",
+ (int)temp, (int)needsof);
+
+ /* update SOF IRQ mask */
+ if (sc->sc_irq_mask & GINTMSK_SOFMSK) {
+ if (needsof == 0) {
+ sc->sc_irq_mask &= ~GINTMSK_SOFMSK;
+ DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+ }
+ } else {
+ if (needsof != 0) {
+ sc->sc_irq_mask |= GINTMSK_SOFMSK;
+ DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+ }
+ }
+ }
+}
+
+static void
dwc_otg_interrupt_poll(struct dwc_otg_softc *sc)
{
struct usb_xfer *xfer;
@@ -2126,6 +2661,7 @@ repeat:
got_rx_status = 1;
}
+ /* scan for completion events first */
TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
if (!dwc_otg_xfer_do_fifo(xfer)) {
/* queue has been modified */
@@ -2133,13 +2669,26 @@ repeat:
}
}
+ if (sc->sc_flags.status_device_mode == 0) {
+ /* update host transfer schedule, so that new transfers can be issued */
+ dwc_otg_update_host_transfer_schedule(sc);
+
+ /* start re-scheduled transfers */
+ TAILQ_FOREACH(xfer, &sc->sc_bus.intr_q.head, wait_entry) {
+ if (!dwc_otg_xfer_do_fifo(xfer)) {
+ /* queue has been modified */
+ goto repeat;
+ }
+ }
+ }
+
if (got_rx_status) {
/* check if data was consumed */
if (sc->sc_last_rx_status == 0)
goto repeat;
/* disable RX FIFO level interrupt */
- sc->sc_irq_mask &= ~GINTSTS_RXFLVL;
+ sc->sc_irq_mask &= ~GINTMSK_RXFLVLMSK;
DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
}
}
@@ -2201,6 +2750,12 @@ dwc_otg_interrupt(struct dwc_otg_softc *sc)
sc->sc_flags.change_suspend = 0;
sc->sc_flags.change_connect = 1;
+ /* Disable SOF interrupt */
+ sc->sc_irq_mask &= ~GINTMSK_SOFMSK;
+ /* Enable RX frame interrupt */
+ sc->sc_irq_mask |= GINTMSK_RXFLVLMSK;
+ DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+
/* complete root HUB interrupt endpoint */
dwc_otg_root_intr(sc);
}
@@ -2234,10 +2789,12 @@ dwc_otg_interrupt(struct dwc_otg_softc *sc)
else
sc->sc_flags.status_high_speed = 0;
- /* disable resume interrupt and enable suspend interrupt */
-
- sc->sc_irq_mask &= ~GINTSTS_WKUPINT;
- sc->sc_irq_mask |= GINTSTS_USBSUSP;
+ /*
+ * Disable resume and SOF interrupt, and enable
+ * suspend and RX frame interrupt:
+ */
+ sc->sc_irq_mask &= ~(GINTMSK_WKUPINTMSK | GINTMSK_SOFMSK);
+ sc->sc_irq_mask |= (GINTMSK_USBSUSPMSK | GINTMSK_RXFLVLMSK);
DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
/* complete root HUB interrupt endpoint */
@@ -2307,6 +2864,13 @@ dwc_otg_interrupt(struct dwc_otg_softc *sc)
/* complete root HUB interrupt endpoint */
dwc_otg_root_intr(sc);
+
+ /* disable RX FIFO level interrupt */
+ sc->sc_irq_mask &= ~GINTMSK_RXFLVLMSK;
+ DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+
+ /* update host frame interval */
+ dwc_otg_update_host_frame_interval(sc);
}
/*
@@ -2355,27 +2919,6 @@ dwc_otg_interrupt(struct dwc_otg_softc *sc)
}
}
- /* check for SOF interrupt */
- if (status & GINTSTS_SOF) {
- if (sc->sc_irq_mask & GINTMSK_SOFMSK) {
- uint8_t x;
- uint8_t y;
-
- DPRINTFN(12, "SOF interrupt\n");
-
- for (x = y = 0; x != sc->sc_host_ch_max; x++) {
- if (sc->sc_chan_state[x].wait_sof != 0) {
- if (--(sc->sc_chan_state[x].wait_sof) != 0)
- y = 1;
- }
- }
- if (y == 0) {
- sc->sc_irq_mask &= ~GINTMSK_SOFMSK;
- DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
- }
- }
- }
-
/* poll FIFO(s) */
dwc_otg_interrupt_poll(sc);
@@ -2412,6 +2955,9 @@ dwc_otg_setup_standard_chain_sub(struct dwc_otg_std_temp *temp)
td->channel = DWC_OTG_MAX_CHANNELS;
td->state = 0;
td->errcnt = 0;
+ td->tt_scheduled = 0;
+ td->tt_index = temp->tt_index;
+ td->tt_xactpos = HCSPLT_XACTPOS_BEGIN;
}
static void
@@ -2419,6 +2965,7 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
{
struct dwc_otg_std_temp temp;
struct dwc_otg_td *td;
+ struct usb_device *udev;
uint32_t x;
uint8_t need_sync;
uint8_t is_host;
@@ -2429,6 +2976,16 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
temp.max_frame_size = xfer->max_frame_size;
+ udev = xfer->xroot->udev;
+ if (udev->parent_hs_hub != NULL && udev->speed != USB_SPEED_HIGH) {
+ if (udev->parent_hs_hub->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
+ temp.tt_index = udev->device_index;
+ else
+ temp.tt_index = udev->parent_hs_hub->device_index;
+ } else {
+ temp.tt_index = udev->device_index;
+ }
+
td = xfer->td_start[0];
xfer->td_transfer_first = td;
xfer->td_transfer_cache = td;
@@ -2439,7 +2996,8 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
temp.td = NULL;
temp.td_next = xfer->td_start[0];
temp.offset = 0;
- temp.setup_alt_next = xfer->flags_int.short_frames_ok;
+ temp.setup_alt_next = xfer->flags_int.short_frames_ok ||
+ xfer->flags_int.isochronous_xfr;
temp.did_stall = !xfer->flags_int.control_stall;
is_host = (xfer->xroot->udev->flags.usb_mode == USB_MODE_HOST);
@@ -2637,14 +3195,12 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
case USB_SPEED_FULL:
case USB_SPEED_LOW:
/* check if root HUB port is running High Speed */
- if (sc->sc_flags.status_high_speed != 0) {
+ if (xfer->xroot->udev->parent_hs_hub != NULL) {
hcsplt = HCSPLT_SPLTENA |
(xfer->xroot->udev->hs_port_no <<
HCSPLT_PRTADDR_SHIFT) |
(xfer->xroot->udev->hs_hub_addr <<
HCSPLT_HUBADDR_SHIFT);
- if (xfer_type == UE_ISOCHRONOUS) /* XXX */
- hcsplt |= (3 << HCSPLT_XACTPOS_SHIFT);
} else {
hcsplt = 0;
}
@@ -2657,6 +3213,12 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
ival = 127;
td->tmr_val = sc->sc_tmr_val + ival;
td->tmr_res = ival;
+ } else if (xfer_type == UE_ISOCHRONOUS) {
+ td->tmr_val = 0;
+ td->tmr_res = 1;
+ } else {
+ td->tmr_val = 0;
+ td->tmr_res = 0;
}
break;
case USB_SPEED_HIGH:
@@ -2675,19 +3237,19 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
ival = 127;
td->tmr_val = sc->sc_tmr_val + ival;
td->tmr_res = ival;
+ } else if (xfer_type == UE_ISOCHRONOUS) {
+ td->tmr_val = 0;
+ td->tmr_res = 1 << usbd_xfer_get_fps_shift(xfer);
+ } else {
+ td->tmr_val = 0;
+ td->tmr_res = 0;
}
break;
default:
hcsplt = 0;
- break;
- }
-
- if (xfer_type == UE_ISOCHRONOUS) {
- td->tmr_val = xfer->endpoint->isoc_next & 0xFF;
- td->tmr_res = 1 << usbd_xfer_get_fps_shift(xfer);
- } else if (xfer_type != UE_INTERRUPT) {
td->tmr_val = 0;
td->tmr_res = 0;
+ break;
}
/* store configuration in all TD's */
@@ -2719,6 +3281,8 @@ dwc_otg_timeout(void *arg)
static void
dwc_otg_start_standard_chain(struct usb_xfer *xfer)
{
+ struct dwc_otg_softc *sc = DWC_OTG_BUS2SC(xfer->xroot->bus);
+
DPRINTFN(9, "\n");
/* poll one time - will turn on interrupts */
@@ -2732,6 +3296,9 @@ dwc_otg_start_standard_chain(struct usb_xfer *xfer)
usbd_transfer_timeout_ms(xfer,
&dwc_otg_timeout, xfer->timeout);
}
+
+ /* enable SOF interrupt, if any */
+ dwc_otg_enable_sof_irq(sc);
}
}
@@ -2787,7 +3354,8 @@ dwc_otg_standard_done_sub(struct usb_xfer *xfer)
}
/* Check for short transfer */
if (len > 0) {
- if (xfer->flags_int.short_frames_ok) {
+ if (xfer->flags_int.short_frames_ok ||
+ xfer->flags_int.isochronous_xfr) {
/* follow alt next */
if (td->alt_next) {
td = td->obj_next;
@@ -3357,9 +3925,15 @@ dwc_otg_device_isoc_close(struct usb_xfer *xfer)
static void
dwc_otg_device_isoc_enter(struct usb_xfer *xfer)
{
+}
+
+static void
+dwc_otg_device_isoc_start(struct usb_xfer *xfer)
+{
struct dwc_otg_softc *sc = DWC_OTG_BUS2SC(xfer->xroot->bus);
uint32_t temp;
- uint32_t nframes;
+ uint32_t msframes;
+ uint32_t framenum;
uint8_t shift = usbd_xfer_get_fps_shift(xfer);
DPRINTFN(6, "xfer=%p next=%d nframes=%d\n",
@@ -3369,34 +3943,42 @@ dwc_otg_device_isoc_enter(struct usb_xfer *xfer)
temp = DWC_OTG_READ_4(sc, DOTG_HFNUM);
/* get the current frame index */
- nframes = (temp & HFNUM_FRNUM_MASK);
+ framenum = (temp & HFNUM_FRNUM_MASK);
} else {
temp = DWC_OTG_READ_4(sc, DOTG_DSTS);
/* get the current frame index */
- nframes = DSTS_SOFFN_GET(temp);
+ framenum = DSTS_SOFFN_GET(temp);
}
- if (sc->sc_flags.status_high_speed)
- nframes /= 8;
+ if (xfer->xroot->udev->parent_hs_hub != NULL)
+ framenum /= 8;
+
+ framenum &= DWC_OTG_FRAME_MASK;
- nframes &= DWC_OTG_FRAME_MASK;
+ /*
+ * Compute number of milliseconds worth of data traffic for
+ * this USB transfer:
+ */
+ if (xfer->xroot->udev->speed == USB_SPEED_HIGH)
+ msframes = ((xfer->nframes << shift) + 7) / 8;
+ else
+ msframes = xfer->nframes;
/*
* check if the frame index is within the window where the frames
* will be inserted
*/
- temp = (nframes - xfer->endpoint->isoc_next) & DWC_OTG_FRAME_MASK;
+ temp = (framenum - xfer->endpoint->isoc_next) & DWC_OTG_FRAME_MASK;
- if ((xfer->endpoint->is_synced == 0) ||
- (temp < (((xfer->nframes << shift) + 7) / 8))) {
+ if ((xfer->endpoint->is_synced == 0) || (temp < msframes)) {
/*
* If there is data underflow or the pipe queue is
* empty we schedule the transfer a few frames ahead
* of the current frame position. Else two isochronous
* transfers might overlap.
*/
- xfer->endpoint->isoc_next = (nframes + 3) & DWC_OTG_FRAME_MASK;
+ xfer->endpoint->isoc_next = (framenum + 3) & DWC_OTG_FRAME_MASK;
xfer->endpoint->is_synced = 1;
DPRINTFN(3, "start next=%d\n", xfer->endpoint->isoc_next);
}
@@ -3404,25 +3986,20 @@ dwc_otg_device_isoc_enter(struct usb_xfer *xfer)
* compute how many milliseconds the insertion is ahead of the
* current frame position:
*/
- temp = (xfer->endpoint->isoc_next - nframes) & DWC_OTG_FRAME_MASK;
+ temp = (xfer->endpoint->isoc_next - framenum) & DWC_OTG_FRAME_MASK;
/*
* pre-compute when the isochronous transfer will be finished:
*/
xfer->isoc_time_complete =
- usb_isoc_time_expand(&sc->sc_bus, nframes) + temp +
- (((xfer->nframes << shift) + 7) / 8);
+ usb_isoc_time_expand(&sc->sc_bus, framenum) + temp + msframes;
/* setup TDs */
dwc_otg_setup_standard_chain(xfer);
/* compute frame number for next insertion */
- xfer->endpoint->isoc_next += (xfer->nframes << shift);
-}
+ xfer->endpoint->isoc_next += msframes;
-static void
-dwc_otg_device_isoc_start(struct usb_xfer *xfer)
-{
/* start TD chain */
dwc_otg_start_standard_chain(xfer);
}
@@ -3987,8 +4564,8 @@ dwc_otg_xfer_setup(struct usb_setup_params *parm)
* reasonable dummies:
*/
parm->hc_max_packet_size = 0x500;
- parm->hc_max_packet_count = 1;
- parm->hc_max_frame_size = 0x500;
+ parm->hc_max_packet_count = 3;
+ parm->hc_max_frame_size = 3 * 0x500;
usbd_transfer_setup_sub(parm);
@@ -4045,6 +4622,7 @@ dwc_otg_xfer_setup(struct usb_setup_params *parm)
/* init TD */
td->max_packet_size = xfer->max_packet_size;
+ td->max_packet_count = xfer->max_packet_count;
td->ep_no = ep_no;
td->obj_next = last_obj;
@@ -4081,40 +4659,7 @@ dwc_otg_ep_init(struct usb_device *udev, struct usb_endpoint_descriptor *edesc,
/* not supported */
return;
}
- } else {
- uint16_t mps;
-
- mps = UGETW(edesc->wMaxPacketSize);
-
- /* Apply limitations of our USB host driver */
-
- switch (udev->speed) {
- case USB_SPEED_HIGH:
- if (mps > 512) {
- DPRINTF("wMaxPacketSize=0x%04x"
- "is not supported\n", (int)mps);
- /* not supported */
- return;
- }
- break;
-
- case USB_SPEED_FULL:
- case USB_SPEED_LOW:
- if (mps > 188) {
- DPRINTF("wMaxPacketSize=0x%04x"
- "is not supported\n", (int)mps);
- /* not supported */
- return;
- }
- break;
-
- default:
- DPRINTF("Invalid device speed\n");
- /* not supported */
- return;
- }
}
-
if ((edesc->bmAttributes & UE_XFERTYPE) == UE_ISOCHRONOUS)
ep->methods = &dwc_otg_device_isoc_methods;
else
diff --git a/sys/dev/usb/controller/dwc_otg.h b/sys/dev/usb/controller/dwc_otg.h
index a301698519710..2ae2eaeb60141 100644
--- a/sys/dev/usb/controller/dwc_otg.h
+++ b/sys/dev/usb/controller/dwc_otg.h
@@ -57,23 +57,24 @@ struct dwc_otg_td {
uint32_t hcsplt; /* HOST CFG */
uint16_t max_packet_size; /* packet_size */
uint16_t npkt;
+ uint8_t max_packet_count; /* packet_count */
uint8_t errcnt;
uint8_t tmr_res;
uint8_t tmr_val;
- uint8_t curr_frame;
uint8_t ep_no;
uint8_t channel;
+ uint8_t tt_index; /* TT data */
+ uint8_t tt_start_slot; /* TT data */
+ uint8_t tt_complete_slot; /* TT data */
+ uint8_t tt_xactpos; /* TT data */
uint8_t state;
#define DWC_CHAN_ST_START 0
#define DWC_CHAN_ST_WAIT_ANE 1
#define DWC_CHAN_ST_WAIT_S_ANE 2
#define DWC_CHAN_ST_WAIT_C_ANE 3
-#define DWC_CHAN_ST_RX_PKT 4
-#define DWC_CHAN_ST_RX_SPKT 5
-#define DWC_CHAN_ST_RX_SPKT_SYNC 6
-#define DWC_CHAN_ST_TX_PKT 4
-#define DWC_CHAN_ST_TX_CPKT 5
-#define DWC_CHAN_ST_TX_PKT_SYNC 6
+#define DWC_CHAN_ST_WAIT_C_PKT 4
+#define DWC_CHAN_ST_TX_PKT_ISOC 5
+#define DWC_CHAN_ST_TX_WAIT_ISOC 6
uint8_t error:1;
uint8_t error_any:1;
uint8_t error_stall:1;
@@ -84,6 +85,7 @@ struct dwc_otg_td {
uint8_t set_toggle:1;
uint8_t got_short:1;
uint8_t did_nak:1;
+ uint8_t tt_scheduled:1;
};
struct dwc_otg_std_temp {
@@ -103,6 +105,7 @@ struct dwc_otg_std_temp {
uint8_t setup_alt_next;
uint8_t did_stall;
uint8_t bulk_or_control;
+ uint8_t tt_index;
};
struct dwc_otg_config_desc {
@@ -143,11 +146,18 @@ struct dwc_otg_profile {
struct dwc_otg_chan_state {
uint32_t hcint;
+ uint32_t tx_size;
uint8_t wait_sof;
uint8_t allocated;
uint8_t suspended;
};
+struct dwc_otg_tt_info {
+ uint16_t bytes_used;
+ uint8_t slot_index;
+ uint8_t dummy;
+};
+
struct dwc_otg_softc {
struct usb_bus sc_bus;
union dwc_otg_hub_temp sc_hub_temp;
@@ -163,9 +173,11 @@ struct dwc_otg_softc {
bus_space_handle_t sc_io_hdl;
uint32_t sc_rx_bounce_buffer[1024 / 4];
- uint32_t sc_tx_bounce_buffer[(512 * DWC_OTG_MAX_TXP) / 4];
+ uint32_t sc_tx_bounce_buffer[MAX(512 * DWC_OTG_MAX_TXP, 1024) / 4];
uint32_t sc_fifo_size;
+ uint32_t sc_tx_max_size;
+ uint32_t sc_tx_cur_size;
uint32_t sc_irq_mask;
uint32_t sc_last_rx_status;
uint32_t sc_out_ctl[DWC_OTG_MAX_ENDPOINTS];
@@ -174,7 +186,9 @@ struct dwc_otg_softc {
uint32_t sc_tmr_val;
uint32_t sc_hprt_val;
+ struct dwc_otg_tt_info sc_tt_info[DWC_OTG_MAX_DEVICES];
uint16_t sc_active_rx_ep;
+ uint16_t sc_last_frame_num;
uint8_t sc_timer_active;
uint8_t sc_dev_ep_max;
diff --git a/sys/dev/usb/controller/dwc_otgreg.h b/sys/dev/usb/controller/dwc_otgreg.h
index f59f48c89c023..cd2f45de32a60 100644
--- a/sys/dev/usb/controller/dwc_otgreg.h
+++ b/sys/dev/usb/controller/dwc_otgreg.h
@@ -536,6 +536,11 @@
#define HCSPLT_COMPSPLT (1<<16)
#define HCSPLT_XACTPOS_SHIFT 14
#define HCSPLT_XACTPOS_MASK 0x0000c000
+#define HCSPLT_XACTPOS_MIDDLE 0
+#define HCSPLT_XACTPOS_LAST 1
+#define HCSPLT_XACTPOS_BEGIN 2
+#define HCSPLT_XACTPOS_ALL 3
+#define HCSPLT_XACTLEN_MAX 188 /* bytes */
#define HCSPLT_HUBADDR_SHIFT 7
#define HCSPLT_HUBADDR_MASK 0x00003f80
#define HCSPLT_PRTADDR_SHIFT 0
@@ -545,6 +550,11 @@
(HCINT_BBLERR | HCINT_XACTERR)
#define HCINT_RETRY \
(HCINT_DATATGLERR | HCINT_FRMOVRUN | HCINT_NAK)
+#define HCINT_DEFAULT_MASK \
+ (HCINT_STALL | HCINT_BBLERR | \
+ HCINT_XACTERR | HCINT_NAK | HCINT_ACK | HCINT_NYET | \
+ HCINT_CHHLTD | HCINT_FRMOVRUN | \
+ HCINT_DATATGLERR)
#define HCINT_SOFTWARE_ONLY (1<<20) /* BSD only */
#define HCINT_DATATGLERR (1<<10)
diff --git a/sys/dev/usb/net/if_smsc.c b/sys/dev/usb/net/if_smsc.c
index 6a02925304a8f..55d3560759b1e 100644
--- a/sys/dev/usb/net/if_smsc.c
+++ b/sys/dev/usb/net/if_smsc.c
@@ -86,6 +86,9 @@ __FBSDID("$FreeBSD$");
#include <net/if.h>
#include <net/if_var.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+
#include "opt_platform.h"
#ifdef FDT
@@ -1025,25 +1028,32 @@ smsc_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
*
* Ignore H/W csum for non-IPv4 packets.
*/
- if (be16toh(eh->ether_type) == ETHERTYPE_IP && pktlen > ETHER_MIN_LEN) {
-
- /* Indicate the UDP/TCP csum has been calculated */
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
-
- /* Copy the TCP/UDP checksum from the last 2 bytes
- * of the transfer and put in the csum_data field.
- */
- usbd_copy_out(pc, (off + pktlen),
- &m->m_pkthdr.csum_data, 2);
-
- /* The data is copied in network order, but the
- * csum algorithm in the kernel expects it to be
- * in host network order.
- */
- m->m_pkthdr.csum_data = ntohs(m->m_pkthdr.csum_data);
-
- smsc_dbg_printf(sc, "RX checksum offloaded (0x%04x)\n",
- m->m_pkthdr.csum_data);
+ if ((be16toh(eh->ether_type) == ETHERTYPE_IP) &&
+ (pktlen > ETHER_MIN_LEN)) {
+ struct ip *ip;
+
+ ip = (struct ip *)(eh + 1);
+ if ((ip->ip_v == IPVERSION) &&
+ ((ip->ip_p == IPPROTO_TCP) ||
+ (ip->ip_p == IPPROTO_UDP))) {
+ /* Indicate the UDP/TCP csum has been calculated */
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
+
+ /* Copy the TCP/UDP checksum from the last 2 bytes
+ * of the transfer and put in the csum_data field.
+ */
+ usbd_copy_out(pc, (off + pktlen),
+ &m->m_pkthdr.csum_data, 2);
+
+ /* The data is copied in network order, but the
+ * csum algorithm in the kernel expects it to be
+ * in host network order.
+ */
+ m->m_pkthdr.csum_data = ntohs(m->m_pkthdr.csum_data);
+
+ smsc_dbg_printf(sc, "RX checksum offloaded (0x%04x)\n",
+ m->m_pkthdr.csum_data);
+ }
}
/* Need to adjust the offset as well or we'll be off
diff --git a/sys/dev/vt/hw/efifb/efifb.c b/sys/dev/vt/hw/efifb/efifb.c
index 4a6eeb798fc0c..683cecbabc4d7 100644
--- a/sys/dev/vt/hw/efifb/efifb.c
+++ b/sys/dev/vt/hw/efifb/efifb.c
@@ -51,36 +51,58 @@ __FBSDID("$FreeBSD$");
#include <dev/vt/hw/fb/vt_fb.h>
#include <dev/vt/colors/vt_termcolors.h>
-static vd_init_t vt_efb_init;
+static vd_init_t vt_efifb_init;
+static vd_probe_t vt_efifb_probe;
-static struct vt_driver vt_efb_driver = {
- .vd_init = vt_efb_init,
+static struct vt_driver vt_efifb_driver = {
+ .vd_name = "efifb",
+ .vd_probe = vt_efifb_probe,
+ .vd_init = vt_efifb_init,
.vd_blank = vt_fb_blank,
.vd_bitbltchr = vt_fb_bitbltchr,
+ .vd_maskbitbltchr = vt_fb_maskbitbltchr,
/* Better than VGA, but still generic driver. */
.vd_priority = VD_PRIORITY_GENERIC + 1,
};
-static struct fb_info info;
-VT_CONSDEV_DECLARE(vt_efb_driver,
- MAX(80, PIXEL_WIDTH(VT_FB_DEFAULT_WIDTH)),
- MAX(25, PIXEL_HEIGHT(VT_FB_DEFAULT_HEIGHT)), &info);
+static struct fb_info local_info;
+VT_DRIVER_DECLARE(vt_efifb, vt_efifb_driver);
static int
-vt_efb_init(struct vt_device *vd)
+vt_efifb_probe(struct vt_device *vd)
{
- int depth, d, disable, i, len;
- struct fb_info *info;
+ int disabled;
struct efi_fb *efifb;
caddr_t kmdp;
- info = vd->vd_softc;
+ disabled = 0;
+ TUNABLE_INT_FETCH("hw.syscons.disable", &disabled);
+ if (disabled != 0)
+ return (CN_DEAD);
- disable = 0;
- TUNABLE_INT_FETCH("hw.syscons.disable", &disable);
- if (disable != 0)
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ kmdp = preload_search_by_type("elf64 kernel");
+ efifb = (struct efi_fb *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_FB);
+ if (efifb == NULL)
return (CN_DEAD);
+ return (CN_INTERNAL);
+}
+
+static int
+vt_efifb_init(struct vt_device *vd)
+{
+ int depth, d, i, len;
+ struct fb_info *info;
+ struct efi_fb *efifb;
+ caddr_t kmdp;
+
+ info = vd->vd_softc;
+ if (info == NULL)
+ info = vd->vd_softc = (void *)&local_info;
+
kmdp = preload_search_by_type("elf kernel");
if (kmdp == NULL)
kmdp = preload_search_by_type("elf64 kernel");
@@ -136,7 +158,8 @@ vt_efb_init(struct vt_device *vd)
fb_probe(info);
vt_fb_init(vd);
+ /* Clear the screen. */
+ vt_fb_blank(vd, TC_BLACK);
return (CN_INTERNAL);
}
-
diff --git a/sys/dev/vt/hw/fb/vt_early_fb.c b/sys/dev/vt/hw/fb/vt_early_fb.c
index 4a81f4f41f741..64b2122f3ee37 100644
--- a/sys/dev/vt/hw/fb/vt_early_fb.c
+++ b/sys/dev/vt/hw/fb/vt_early_fb.c
@@ -52,18 +52,19 @@ __FBSDID("$FreeBSD$");
#include <dev/vt/colors/vt_termcolors.h>
static vd_init_t vt_efb_init;
+static vd_probe_t vt_efb_probe;
static struct vt_driver vt_fb_early_driver = {
+ .vd_name = "efb",
+ .vd_probe = vt_efb_probe,
.vd_init = vt_efb_init,
.vd_blank = vt_fb_blank,
.vd_bitbltchr = vt_fb_bitbltchr,
.vd_priority = VD_PRIORITY_GENERIC,
};
-static struct fb_info info;
-VT_CONSDEV_DECLARE(vt_fb_early_driver,
- MAX(80, PIXEL_WIDTH(VT_FB_DEFAULT_WIDTH)),
- MAX(25, PIXEL_HEIGHT(VT_FB_DEFAULT_HEIGHT)), &info);
+static struct fb_info local_info;
+VT_DRIVER_DECLARE(vt_efb, vt_fb_early_driver);
static void
#ifdef FDT
@@ -126,30 +127,62 @@ vt_efb_initialize(struct fb_info *info)
}
}
-static int
-vt_efb_init(struct vt_device *vd)
+static phandle_t
+vt_efb_get_fbnode()
{
- struct ofw_pci_register pciaddrs[8];
- struct fb_info *info;
- int i, len, n_pciaddrs;
phandle_t chosen, node;
ihandle_t stdout;
char type[64];
- info = vd->vd_softc;
-
chosen = OF_finddevice("/chosen");
OF_getprop(chosen, "stdout", &stdout, sizeof(stdout));
node = OF_instance_to_package(stdout);
- if (node == -1) {
- /*
- * The "/chosen/stdout" does not exist try
- * using "screen" directly.
- */
- node = OF_finddevice("screen");
+ if (node != -1) {
+ /* The "/chosen/stdout" present. */
+ OF_getprop(node, "device_type", type, sizeof(type));
+ /* Check if it has "display" type. */
+ if (strcmp(type, "display") == 0)
+ return (node);
}
- OF_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, "display") != 0)
+ /* Try device with name "screen". */
+ node = OF_finddevice("screen");
+
+ return (node);
+}
+
+static int
+vt_efb_probe(struct vt_device *vd)
+{
+ phandle_t node;
+
+ node = vt_efb_get_fbnode();
+ if (node == -1)
+ return (CN_DEAD);
+
+ if ((OF_getproplen(node, "height") <= 0) ||
+ (OF_getproplen(node, "width") <= 0) ||
+ (OF_getproplen(node, "depth") <= 0) ||
+ (OF_getproplen(node, "linebytes") <= 0))
+ return (CN_DEAD);
+
+ return (CN_INTERNAL);
+}
+
+static int
+vt_efb_init(struct vt_device *vd)
+{
+ struct ofw_pci_register pciaddrs[8];
+ struct fb_info *info;
+ int i, len, n_pciaddrs;
+ phandle_t node;
+
+ if (vd->vd_softc == NULL)
+ vd->vd_softc = (void *)&local_info;
+
+ info = vd->vd_softc;
+
+ node = vt_efb_get_fbnode();
+ if (node == -1)
return (CN_DEAD);
#define GET(name, var) \
@@ -249,7 +282,6 @@ vt_efb_init(struct vt_device *vd)
#endif
}
-
/* blank full size */
len = info->fb_size / 4;
for (i = 0; i < len; i++) {
@@ -259,13 +291,6 @@ vt_efb_init(struct vt_device *vd)
/* Get pixel storage size. */
info->fb_bpp = info->fb_stride / info->fb_width * 8;
- /*
- * Early FB driver work with static window buffer 80x25, so reduce
- * size to 640x480.
- */
- info->fb_width = VT_FB_DEFAULT_WIDTH;
- info->fb_height = VT_FB_DEFAULT_HEIGHT;
-
#ifdef FDT
vt_efb_initialize(info, node);
#else
@@ -274,6 +299,5 @@ vt_efb_init(struct vt_device *vd)
fb_probe(info);
vt_fb_init(vd);
-
return (CN_INTERNAL);
}
diff --git a/sys/dev/vt/hw/fb/vt_fb.c b/sys/dev/vt/hw/fb/vt_fb.c
index 7163bdc786acc..3ffba40a1adbd 100644
--- a/sys/dev/vt/hw/fb/vt_fb.c
+++ b/sys/dev/vt/hw/fb/vt_fb.c
@@ -50,9 +50,11 @@ void vt_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2,
void vt_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color);
static struct vt_driver vt_fb_driver = {
+ .vd_name = "fb",
.vd_init = vt_fb_init,
.vd_blank = vt_fb_blank,
.vd_bitbltchr = vt_fb_bitbltchr,
+ .vd_maskbitbltchr = vt_fb_maskbitbltchr,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_postswitch = vt_fb_postswitch,
@@ -61,6 +63,8 @@ static struct vt_driver vt_fb_driver = {
.vd_fb_mmap = vt_fb_mmap,
};
+VT_DRIVER_DECLARE(vt_fb, vt_fb_driver);
+
static int
vt_fb_ioctl(struct vt_device *vd, u_long cmd, caddr_t data, struct thread *td)
{
@@ -189,6 +193,68 @@ vt_fb_bitbltchr(struct vt_device *vd, const uint8_t *src, const uint8_t *mask,
uint32_t fgc, bgc, cc, o;
int c, l, bpp;
u_long line;
+ uint8_t b;
+ const uint8_t *ch;
+
+ info = vd->vd_softc;
+ bpp = FBTYPE_GET_BYTESPP(info);
+ fgc = info->fb_cmap[fg];
+ bgc = info->fb_cmap[bg];
+ b = 0;
+ if (bpl == 0)
+ bpl = (width + 7) >> 3; /* Bytes per sorce line. */
+
+ /* Don't try to put off screen pixels */
+ if (((left + width) > info->fb_width) || ((top + height) >
+ info->fb_height))
+ return;
+
+ line = (info->fb_stride * top) + (left * bpp);
+ for (l = 0; l < height; l++) {
+ ch = src;
+ for (c = 0; c < width; c++) {
+ if (c % 8 == 0)
+ b = *ch++;
+ else
+ b <<= 1;
+ o = line + (c * bpp);
+ cc = b & 0x80 ? fgc : bgc;
+
+ switch(bpp) {
+ case 1:
+ info->wr1(info, o, cc);
+ break;
+ case 2:
+ info->wr2(info, o, cc);
+ break;
+ case 3:
+ /* Packed mode, so unaligned. Byte access. */
+ info->wr1(info, o, (cc >> 16) & 0xff);
+ info->wr1(info, o + 1, (cc >> 8) & 0xff);
+ info->wr1(info, o + 2, cc & 0xff);
+ break;
+ case 4:
+ info->wr4(info, o, cc);
+ break;
+ default:
+ /* panic? */
+ break;
+ }
+ }
+ line += info->fb_stride;
+ src += bpl;
+ }
+}
+
+void
+vt_fb_maskbitbltchr(struct vt_device *vd, const uint8_t *src, const uint8_t *mask,
+ int bpl, vt_axis_t top, vt_axis_t left, unsigned int width,
+ unsigned int height, term_color_t fg, term_color_t bg)
+{
+ struct fb_info *info;
+ uint32_t fgc, bgc, cc, o;
+ int c, l, bpp;
+ u_long line;
uint8_t b, m;
const uint8_t *ch;
diff --git a/sys/dev/vt/hw/fb/vt_fb.h b/sys/dev/vt/hw/fb/vt_fb.h
index 9b3aa0d965249..10dd2389319ac 100644
--- a/sys/dev/vt/hw/fb/vt_fb.h
+++ b/sys/dev/vt/hw/fb/vt_fb.h
@@ -41,7 +41,7 @@ int fb_probe(struct fb_info *info);
vd_init_t vt_fb_init;
vd_blank_t vt_fb_blank;
vd_bitbltchr_t vt_fb_bitbltchr;
+vd_maskbitbltchr_t vt_fb_maskbitbltchr;
vd_postswitch_t vt_fb_postswitch;
-
#endif /* _DEV_VT_HW_FB_VT_FB_H_ */
diff --git a/sys/dev/vt/hw/ofwfb/ofwfb.c b/sys/dev/vt/hw/ofwfb/ofwfb.c
index b5f8e59897574..7245725444674 100644
--- a/sys/dev/vt/hw/ofwfb/ofwfb.c
+++ b/sys/dev/vt/hw/ofwfb/ofwfb.c
@@ -63,6 +63,7 @@ static vd_blank_t ofwfb_blank;
static vd_bitbltchr_t ofwfb_bitbltchr;
static const struct vt_driver vt_ofwfb_driver = {
+ .vd_name = "ofwfb",
.vd_init = ofwfb_init,
.vd_blank = ofwfb_blank,
.vd_bitbltchr = ofwfb_bitbltchr,
diff --git a/sys/dev/vt/hw/vga/vga.c b/sys/dev/vt/hw/vga/vga.c
index 16e07512f8b9c..986dde9323ab6 100644
--- a/sys/dev/vt/hw/vga/vga.c
+++ b/sys/dev/vt/hw/vga/vga.c
@@ -45,10 +45,8 @@ __FBSDID("$FreeBSD$");
#if defined(__amd64__) || defined(__i386__)
#include <vm/vm.h>
#include <vm/pmap.h>
-#include <machine/metadata.h>
#include <machine/pmap.h>
#include <machine/vmparam.h>
-#include <sys/linker.h>
#endif /* __amd64__ || __i386__ */
struct vga_softc {
@@ -73,6 +71,7 @@ struct vga_softc {
#define VT_VGA_HEIGHT 480
#define VT_VGA_MEMSIZE (VT_VGA_WIDTH * VT_VGA_HEIGHT / 8)
+static vd_probe_t vga_probe;
static vd_init_t vga_init;
static vd_blank_t vga_blank;
static vd_bitbltchr_t vga_bitbltchr;
@@ -83,6 +82,8 @@ static vd_putchar_t vga_putchar;
static vd_postswitch_t vga_postswitch;
static const struct vt_driver vt_vga_driver = {
+ .vd_name = "vga",
+ .vd_probe = vga_probe,
.vd_init = vga_init,
.vd_blank = vga_blank,
.vd_bitbltchr = vga_bitbltchr,
@@ -99,8 +100,7 @@ static const struct vt_driver vt_vga_driver = {
* buffer is always big enough to support both.
*/
static struct vga_softc vga_conssoftc;
-VT_CONSDEV_DECLARE(vt_vga_driver, MAX(80, PIXEL_WIDTH(VT_VGA_WIDTH)),
- MAX(25, PIXEL_HEIGHT(VT_VGA_HEIGHT)), &vga_conssoftc);
+VT_DRIVER_DECLARE(vt_vga, vt_vga_driver);
static inline void
vga_setcolor(struct vt_device *vd, term_color_t color)
@@ -633,23 +633,22 @@ vga_initialize(struct vt_device *vd, int textmode)
}
static int
+vga_probe(struct vt_device *vd)
+{
+
+ return (CN_INTERNAL);
+}
+
+static int
vga_init(struct vt_device *vd)
{
- struct vga_softc *sc = vd->vd_softc;
- int textmode = 0;
-
-#if defined(__amd64__)
- /* Disable if EFI framebuffer present. Should be handled by priority
- * logic in vt(9), but this will do for now. XXX */
-
- caddr_t kmdp, efifb;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- efifb = preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB);
- if (efifb != NULL)
- return (CN_DEAD);
-#endif
+ struct vga_softc *sc;
+ int textmode;
+
+ if (vd->vd_softc == NULL)
+ vd->vd_softc = (void *)&vga_conssoftc;
+ sc = vd->vd_softc;
+ textmode = 0;
#if defined(__amd64__) || defined(__i386__)
sc->vga_fb_tag = X86_BUS_SPACE_MEM;
diff --git a/sys/dev/vt/vt.h b/sys/dev/vt/vt.h
index caab70908fd8a..eb0ce1fab7f85 100644
--- a/sys/dev/vt/vt.h
+++ b/sys/dev/vt/vt.h
@@ -78,7 +78,13 @@ one 'device sc' or 'device vt'"
#endif /* defined(SC_TWOBUTTON_MOUSE) || defined(VT_TWOBUTTON_MOUSE) */
#define SC_DRIVER_NAME "vt"
+#ifdef VT_DEBUG
#define DPRINTF(_l, ...) if (vt_debug > (_l)) printf( __VA_ARGS__ )
+#define VT_CONSOLECTL_DEBUG
+#define VT_SYSMOUSE_DEBUG
+#else
+#define DPRINTF(_l, ...) do {} while (0)
+#endif
#define ISSIGVALID(sig) ((sig) > 0 && (sig) < NSIG)
#define VT_SYSCTL_INT(_name, _default, _descr) \
@@ -277,6 +283,7 @@ struct vt_window {
*/
typedef int vd_init_t(struct vt_device *vd);
+typedef int vd_probe_t(struct vt_device *vd);
typedef void vd_postswitch_t(struct vt_device *vd);
typedef void vd_blank_t(struct vt_device *vd, term_color_t color);
typedef void vd_bitbltchr_t(struct vt_device *vd, const uint8_t *src,
@@ -295,7 +302,9 @@ typedef void vd_drawrect_t(struct vt_device *, int, int, int, int, int,
typedef void vd_setpixel_t(struct vt_device *, int, int, term_color_t);
struct vt_driver {
+ char vd_name[16];
/* Console attachment. */
+ vd_probe_t *vd_probe;
vd_init_t *vd_init;
/* Drawing. */
@@ -391,6 +400,9 @@ TERMINAL_DECLARE_EARLY(driver ## _consterm, vt_termclass, \
SYSINIT(vt_early_cons, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, \
vt_upgrade, &driver ## _consdev)
+/* name argument is not used yet. */
+#define VT_DRIVER_DECLARE(name, drv) DATA_SET(vt_drv_set, drv)
+
/*
* Fonts.
*
diff --git a/sys/dev/vt/vt_buf.c b/sys/dev/vt/vt_buf.c
index 0b078ecbfd6c0..598126f46d65c 100644
--- a/sys/dev/vt/vt_buf.c
+++ b/sys/dev/vt/vt_buf.c
@@ -448,8 +448,9 @@ vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size)
history_size = MAX(history_size, p->tp_row);
- if (history_size > vb->vb_history_size || p->tp_col >
- vb->vb_scr_size.tp_col) {
+ /* If new screen/history size bigger or buffer is VBF_STATIC. */
+ if ((history_size > vb->vb_history_size) || (p->tp_col >
+ vb->vb_scr_size.tp_col) || (vb->vb_flags & VBF_STATIC)) {
/* Allocate new buffer. */
bufsize = history_size * p->tp_col * sizeof(term_char_t);
new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
diff --git a/sys/dev/vt/vt_consolectl.c b/sys/dev/vt/vt_consolectl.c
index f9c451780fc77..df8b341d41323 100644
--- a/sys/dev/vt/vt_consolectl.c
+++ b/sys/dev/vt/vt_consolectl.c
@@ -61,8 +61,10 @@ consolectl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
return (0);
}
default:
+#ifdef VT_CONSOLECTL_DEBUG
printf("consolectl: unknown ioctl: %c:%lx\n",
(char)IOCGROUP(cmd), IOCBASECMD(cmd));
+#endif
return (ENOIOCTL);
}
}
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index eb2843edaabf2..4b006a2addd56 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -144,6 +144,83 @@ static int vt_window_switch(struct vt_window *);
static int vt_late_window_switch(struct vt_window *);
static int vt_proc_alive(struct vt_window *);
static void vt_resize(struct vt_device *);
+static void vt_update_static(void *);
+
+SET_DECLARE(vt_drv_set, struct vt_driver);
+
+#define _VTDEFH MAX(100, PIXEL_HEIGHT(VT_FB_DEFAULT_HEIGHT))
+#define _VTDEFW MAX(200, PIXEL_WIDTH(VT_FB_DEFAULT_WIDTH))
+
+static struct terminal vt_consterm;
+static struct vt_window vt_conswindow;
+static struct vt_device vt_consdev = {
+ .vd_driver = NULL,
+ .vd_softc = NULL,
+ .vd_flags = VDF_INVALID,
+ .vd_windows = { [VT_CONSWINDOW] = &vt_conswindow, },
+ .vd_curwindow = &vt_conswindow,
+ .vd_markedwin = NULL,
+ .vd_kbstate = 0,
+};
+static term_char_t vt_constextbuf[(_VTDEFW) * (VBF_DEFAULT_HISTORY_SIZE)];
+static term_char_t *vt_constextbufrows[VBF_DEFAULT_HISTORY_SIZE];
+static struct vt_window vt_conswindow = {
+ .vw_number = VT_CONSWINDOW,
+ .vw_flags = VWF_CONSOLE,
+ .vw_buf = {
+ .vb_buffer = vt_constextbuf,
+ .vb_rows = vt_constextbufrows,
+ .vb_history_size = VBF_DEFAULT_HISTORY_SIZE,
+ .vb_curroffset = 0,
+ .vb_roffset = 0,
+ .vb_flags = VBF_STATIC,
+ .vb_mark_start = {.tp_row = 0, .tp_col = 0,},
+ .vb_mark_end = {.tp_row = 0, .tp_col = 0,},
+ .vb_scr_size = {
+ .tp_row = _VTDEFH,
+ .tp_col = _VTDEFW,
+ },
+ },
+ .vw_device = &vt_consdev,
+ .vw_terminal = &vt_consterm,
+ .vw_kbdmode = K_XLATE,
+};
+static struct terminal vt_consterm = {
+ .tm_class = &vt_termclass,
+ .tm_softc = &vt_conswindow,
+ .tm_flags = TF_CONS,
+};
+static struct consdev vt_consterm_consdev = {
+ .cn_ops = &termcn_cnops,
+ .cn_arg = &vt_consterm,
+ .cn_name = "ttyv0",
+};
+
+/* Add to set of consoles. */
+DATA_SET(cons_set, vt_consterm_consdev);
+
+/*
+ * Right after kmem is done to allow early drivers to use locking and allocate
+ * memory.
+ */
+SYSINIT(vt_update_static, SI_SUB_KMEM, SI_ORDER_ANY, vt_update_static,
+ &vt_consdev);
+/* Delay until all devices attached, to not waste time. */
+SYSINIT(vt_early_cons, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, vt_upgrade,
+ &vt_consdev);
+
+/* Initialize locks/mem depended members. */
+static void
+vt_update_static(void *dummy)
+{
+
+ if (main_vd != NULL) {
+ printf("VT: running with driver \"%s\".\n",
+ main_vd->vd_driver->vd_name);
+ mtx_init(&main_vd->vd_lock, "vtdev", NULL, MTX_DEF);
+ cv_init(&main_vd->vd_winswitch, "vtwswt");
+ }
+}
static void
vt_switch_timer(void *arg)
@@ -601,6 +678,22 @@ vtterm_bell(struct terminal *tm)
}
static void
+vtterm_beep(struct terminal *tm, u_int param)
+{
+ u_int freq, period;
+
+ if ((param == 0) || ((param & 0xffff) == 0)) {
+ vtterm_bell(tm);
+ return;
+ }
+
+ period = ((param >> 16) & 0xffff) * hz / 1000;
+ freq = 1193182 / (param & 0xffff);
+
+ sysbeep(freq, period);
+}
+
+static void
vtterm_cursor(struct terminal *tm, const term_pos_t *p)
{
struct vt_window *vw = tm->tm_softc;
@@ -775,7 +868,7 @@ vt_flush(struct vt_device *vd)
if ((vd->vd_flags & (VDF_MOUSECURSOR|VDF_TEXTMODE)) ==
VDF_MOUSECURSOR) {
m = &vt_default_mouse_pointer;
- bpl = (m->w + 7) >> 3; /* Bytes per sorce line. */
+ bpl = (m->w + 7) >> 3; /* Bytes per source line. */
w = m->w;
h = m->h;
@@ -851,9 +944,11 @@ vtterm_splash(struct vt_device *vd)
}
#endif
+
static void
vtterm_cnprobe(struct terminal *tm, struct consdev *cp)
{
+ struct vt_driver *vtd, **vtdlist, *vtdbest = NULL;
struct vt_window *vw = tm->tm_softc;
struct vt_device *vd = vw->vw_device;
struct winsize wsz;
@@ -862,6 +957,24 @@ vtterm_cnprobe(struct terminal *tm, struct consdev *cp)
/* Initialization already done. */
return;
+ SET_FOREACH(vtdlist, vt_drv_set) {
+ vtd = *vtdlist;
+ if (vtd->vd_probe == NULL)
+ continue;
+ if (vtd->vd_probe(vd) == CN_DEAD)
+ continue;
+ if ((vtdbest == NULL) ||
+ (vtd->vd_priority > vtdbest->vd_priority))
+ vtdbest = vtd;
+ }
+ if (vtdbest == NULL) {
+ cp->cn_pri = CN_DEAD;
+ vd->vd_flags |= VDF_DEAD;
+ return;
+ }
+
+ vd->vd_driver = vtdbest;
+
cp->cn_pri = vd->vd_driver->vd_init(vd);
if (cp->cn_pri == CN_DEAD) {
vd->vd_flags |= VDF_DEAD;
@@ -1636,7 +1749,7 @@ skip_thunk:
#endif
return (0);
case KDMKTONE: /* sound the bell */
- /* TODO */
+ vtterm_beep(tm, *(u_int *)data);
return (0);
case KIOCSOUND: /* make tone (*data) hz */
/* TODO */
@@ -1705,6 +1818,7 @@ skip_thunk:
vw->vw_flags |= VWF_VTYLOCK;
else
vw->vw_flags &= ~VWF_VTYLOCK;
+ return (0);
case VT_OPENQRY:
VT_LOCK(vd);
for (i = 0; i < VT_MAXWINDOWS; i++) {
@@ -1871,12 +1985,6 @@ vt_upgrade(struct vt_device *vd)
return;
vd->vd_flags |= VDF_ASYNC;
- mtx_init(&vd->vd_lock, "vtdev", NULL, MTX_DEF);
- cv_init(&vd->vd_winswitch, "vtwswt");
-
- /* Init 25 Hz timer. */
- callout_init_mtx(&vd->vd_timer, &vd->vd_lock, 0);
-
for (i = 0; i < VT_MAXWINDOWS; i++) {
vw = vd->vd_windows[i];
if (vw == NULL) {
@@ -1894,6 +2002,7 @@ vt_upgrade(struct vt_device *vd)
terminal_maketty(vw->vw_terminal, "v%r", VT_UNIT(vw));
}
+ VT_LOCK(vd);
if (vd->vd_curwindow == NULL)
vd->vd_curwindow = vd->vd_windows[VT_CONSWINDOW];
@@ -1901,8 +2010,16 @@ vt_upgrade(struct vt_device *vd)
vt_allocate_keyboard(vd);
DPRINTF(20, "%s: vd_keyboard = %d\n", __func__, vd->vd_keyboard);
+ /* Init 25 Hz timer. */
+ callout_init_mtx(&vd->vd_timer, &vd->vd_lock, 0);
+
/* Start timer when everything ready. */
callout_reset(&vd->vd_timer, hz / VT_TIMERFREQ, vt_timer, vd);
+ VT_UNLOCK(vd);
+
+ /* Refill settings with new sizes. */
+ vt_resize(vd);
+
}
static void
@@ -1913,9 +2030,11 @@ vt_resize(struct vt_device *vd)
for (i = 0; i < VT_MAXWINDOWS; i++) {
vw = vd->vd_windows[i];
+ VT_LOCK(vd);
/* Assign default font to window, if not textmode. */
if (!(vd->vd_flags & VDF_TEXTMODE) && vw->vw_font == NULL)
vw->vw_font = vtfont_ref(&vt_font_default);
+ VT_UNLOCK(vd);
/* Resize terminal windows */
vt_change_font(vw, vw->vw_font);
}
@@ -1929,21 +2048,26 @@ vt_allocate(struct vt_driver *drv, void *softc)
if (main_vd == NULL) {
main_vd = malloc(sizeof *vd, M_VT, M_WAITOK|M_ZERO);
- printf("%s: VT initialize with new VT driver.\n", __func__);
+ printf("VT: initialize with new VT driver \"%s\".\n",
+ drv->vd_name);
+ mtx_init(&main_vd->vd_lock, "vtdev", NULL, MTX_DEF);
+ cv_init(&main_vd->vd_winswitch, "vtwswt");
+
} else {
/*
* Check if have rights to replace current driver. For example:
* it is bad idea to replace KMS driver with generic VGA one.
*/
if (drv->vd_priority <= main_vd->vd_driver->vd_priority) {
- printf("%s: Driver priority %d too low. Current %d\n ",
- __func__, drv->vd_priority,
- main_vd->vd_driver->vd_priority);
+ printf("VT: Driver priority %d too low. Current %d\n ",
+ drv->vd_priority, main_vd->vd_driver->vd_priority);
return;
}
- printf("%s: Replace existing VT driver.\n", __func__);
+ printf("VT: Replacing driver \"%s\" with new \"%s\".\n",
+ main_vd->vd_driver->vd_name, drv->vd_name);
}
vd = main_vd;
+ VT_LOCK(vd);
if (drv->vd_maskbitbltchr == NULL)
drv->vd_maskbitbltchr = drv->vd_bitbltchr;
@@ -1966,12 +2090,10 @@ vt_allocate(struct vt_driver *drv, void *softc)
vd->vd_driver = drv;
vd->vd_softc = softc;
vd->vd_driver->vd_init(vd);
+ VT_UNLOCK(vd);
vt_upgrade(vd);
- /* Refill settings with new sizes. */
- vt_resize(vd);
-
#ifdef DEV_SPLASH
if (vd->vd_flags & VDF_SPLASH)
vtterm_splash(vd);
diff --git a/sys/dev/vt/vt_sysmouse.c b/sys/dev/vt/vt_sysmouse.c
index 73ef39d735f23..92eee3c4b8c12 100644
--- a/sys/dev/vt/vt_sysmouse.c
+++ b/sys/dev/vt/vt_sysmouse.c
@@ -376,8 +376,10 @@ sysmouse_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
case MOUSE_MOUSECHAR:
return (0);
default:
+#ifdef VT_SYSMOUSE_DEBUG
printf("sysmouse: unknown ioctl: %c:%lx\n",
(char)IOCGROUP(cmd), IOCBASECMD(cmd));
+#endif
return (ENOIOCTL);
}
}