aboutsummaryrefslogtreecommitdiff
path: root/sys/contrib/dev/iwlwifi/mvm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/contrib/dev/iwlwifi/mvm')
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/binding.c173
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/coex.c845
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/constants.h142
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/d3.c3609
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c1016
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/debugfs.c2203
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/debugfs.h44
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c1458
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/ftm-responder.c430
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/fw-api.h41
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/fw.c1646
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/led.c119
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/link.c1128
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c1965
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mac80211.c6626
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mld-key.c408
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mld-mac.c335
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c1422
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mld-sta.c1228
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/mvm.h3033
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/nvm.c644
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/offloading.c214
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/ops.c2221
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c407
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/power.c991
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/ptp.c326
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/quota.c258
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/rfi.c157
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/rs-fw.c738
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/rs.c214
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/rs.h112
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/rx.c1384
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/rxmq.c2658
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/scan.c3748
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/sf.c288
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/sta.c4415
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/sta.h686
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/tdls.c671
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/testmode.h92
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/time-event.c1505
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/time-event.h229
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/time-sync.c173
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/time-sync.h30
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/tt.c866
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/tx.c2324
-rw-r--r--sys/contrib/dev/iwlwifi/mvm/utils.c1416
47 files changed, 54676 insertions, 0 deletions
diff --git a/sys/contrib/dev/iwlwifi/mvm/binding.c b/sys/contrib/dev/iwlwifi/mvm/binding.c
new file mode 100644
index 000000000000..58e9a940024d
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/binding.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2020 Intel Corporation
+ * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2022, 2024 Intel Corporation
+ */
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ int idx;
+
+ struct iwl_mvm_phy_ctxt *phyctxt;
+
+ u16 ids[MAX_MACS_IN_BINDING];
+ u16 colors[MAX_MACS_IN_BINDING];
+};
+
+static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
+ struct iwl_mvm_iface_iterator_data *data)
+{
+ struct iwl_binding_cmd cmd;
+ struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
+ int i, ret;
+ u32 status;
+ int size;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+ size = sizeof(cmd);
+ cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
+ phyctxt->channel->band));
+ } else {
+ size = IWL_BINDING_CMD_SIZE_V1;
+ }
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+ cmd.action = cpu_to_le32(action);
+ cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+ phyctxt->color));
+
+ for (i = 0; i < MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+ for (i = 0; i < data->idx; i++)
+ cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
+ data->colors[i]));
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ size, &cmd, &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
+ action, ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif)
+ return;
+
+ if (mvmvif->deflink.phy_ctxt != data->phyctxt)
+ return;
+
+ if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
+ return;
+
+ data->ids[data->idx] = mvmvif->id;
+ data->colors[data->idx] = mvmvif->color;
+ data->idx++;
+}
+
+static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_phy_ctxt *phyctxt,
+ bool add)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_iface_iterator_data data = {
+ .ignore_vif = vif,
+ .phyctxt = phyctxt,
+ };
+ u32 action = FW_CTXT_ACTION_MODIFY;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_iface_iterator,
+ &data);
+
+ /*
+ * If there are no other interfaces yet we
+ * need to create a new binding.
+ */
+ if (data.idx == 0) {
+ if (add)
+ action = FW_CTXT_ACTION_ADD;
+ else
+ action = FW_CTXT_ACTION_REMOVE;
+ }
+
+ if (add) {
+ if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
+ return -EINVAL;
+
+ data.ids[data.idx] = mvmvif->id;
+ data.colors[data.idx] = mvmvif->color;
+ data.idx++;
+ }
+
+ return iwl_mvm_binding_cmd(mvm, action, &data);
+}
+
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->deflink.phy_ctxt))
+ return -EINVAL;
+
+ /*
+ * Update SF - Disable if needed. if this fails, SF might still be on
+ * while many macs are bound, which is forbidden - so fail the binding.
+ */
+ if (iwl_mvm_sf_update(mvm, vif, false))
+ return -EINVAL;
+
+ return iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt,
+ true);
+}
+
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ON_ONCE(!mvmvif->deflink.phy_ctxt))
+ return -EINVAL;
+
+ ret = iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt,
+ false);
+
+ if (!ret && iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
+
+ return ret;
+}
+
+u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+ band == NL80211_BAND_2GHZ)
+ return IWL_LMAC_24G_INDEX;
+ return IWL_LMAC_5G_INDEX;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/coex.c b/sys/contrib/dev/iwlwifi/mvm/coex.c
new file mode 100644
index 000000000000..13cdc077d8d3
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/coex.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ */
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "fw/api/coex.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-debug.h"
+
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+ /* dummy entry for channel 0 */
+ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+ {
+ cpu_to_le64(0x0000001FFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000000FFFFULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ },
+ {
+ cpu_to_le64(0x000003FFFCULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ },
+ {
+ cpu_to_le64(0x00001FFFE0ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ },
+ {
+ cpu_to_le64(0x00007FFF80ULL),
+ cpu_to_le64(0x00007FFFFFULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ },
+ {
+ cpu_to_le64(0x0003FFFC00ULL),
+ cpu_to_le64(0x0003FFFFFFULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ },
+ {
+ cpu_to_le64(0x000FFFF000ULL),
+ cpu_to_le64(0x000FFFFFFCULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ },
+ {
+ cpu_to_le64(0x007FFF8000ULL),
+ cpu_to_le64(0x007FFFFFE0ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ },
+ {
+ cpu_to_le64(0x01FFFE0000ULL),
+ cpu_to_le64(0x01FFFFFF80ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ },
+ {
+ cpu_to_le64(0x0FFFF00000ULL),
+ cpu_to_le64(0x0FFFFFFC00ULL),
+ cpu_to_le64(0x0ULL),
+ },
+ {
+ cpu_to_le64(0x3FFFC00000ULL),
+ cpu_to_le64(0x3FFFFFF000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFFE000000ULL),
+ cpu_to_le64(0xFFFFFF8000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFFF8000000ULL),
+ cpu_to_le64(0xFFFFFE0000ULL),
+ cpu_to_le64(0x0)
+ },
+ {
+ cpu_to_le64(0xFE00000000ULL),
+ cpu_to_le64(0x0ULL),
+ cpu_to_le64(0x0ULL)
+ },
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum iwl_bt_coex_lut_type ret;
+ u16 phy_ctx_id;
+ u32 primary_ch_phy_id, secondary_ch_phy_id;
+
+ /*
+ * Checking that we hold mvm->mutex is a good idea, but the rate
+ * control can't acquire the mutex since it runs in Tx path.
+ * So this is racy in that case, but in the worst case, the AMPDU
+ * size limit will be wrong for a short time which is not a big
+ * issue.
+ */
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+
+ if (!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
+ rcu_read_unlock();
+ return BT_COEX_INVALID_LUT;
+ }
+
+ ret = BT_COEX_TX_DIS_LUT;
+
+ phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+ primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
+ secondary_ch_phy_id =
+ le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
+
+ if (primary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+ else if (secondary_ch_phy_id == phy_ctx_id)
+ ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+ /* else - default = TX TX disallowed */
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
+int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_bt_coex_cmd bt_cmd = {};
+ u32 mode;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+ switch (mvm->bt_force_ant_mode) {
+ case BT_FORCE_ANT_BT:
+ mode = BT_COEX_BT;
+ break;
+ case BT_FORCE_ANT_WIFI:
+ mode = BT_COEX_WIFI;
+ break;
+ default:
+ WARN_ON(1);
+ mode = 0;
+ }
+
+ bt_cmd.mode = cpu_to_le32(mode);
+ goto send_cmd;
+ }
+
+ bt_cmd.mode = cpu_to_le32(BT_COEX_NW);
+
+ if (IWL_MVM_BT_COEX_SYNC2SCO)
+ bt_cmd.enabled_modules |=
+ cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
+
+ if (iwl_mvm_is_mplut_supported(mvm))
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+
+send_cmd:
+ memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+ return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
+}
+
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+ bool enable)
+{
+ struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
+ struct iwl_mvm_sta *mvmsta;
+ u32 value;
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (!mvmsta)
+ return 0;
+
+ /* nothing to do */
+ if (mvmsta->bt_reduced_txpower == enable)
+ return 0;
+
+ value = mvmsta->deflink.sta_id;
+
+ if (enable)
+ value |= BT_REDUCED_TX_POWER_BIT;
+
+ IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
+ enable ? "en" : "dis", sta_id);
+
+ cmd.reduced_txp = cpu_to_le32(value);
+ mvmsta->bt_reduced_txpower = enable;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP,
+ CMD_ASYNC, sizeof(cmd), &cmd);
+}
+
+struct iwl_bt_iterator_data {
+ struct iwl_bt_coex_prof_old_notif *notif;
+ struct iwl_mvm *mvm;
+ struct ieee80211_chanctx_conf *primary;
+ struct ieee80211_chanctx_conf *secondary;
+ bool primary_ll;
+ u8 primary_load;
+ u8 secondary_load;
+};
+
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif_link_info *link_info,
+ bool enable, int rssi)
+{
+ link_info->bf_data.last_bt_coex_event = rssi;
+ link_info->bf_data.bt_coex_max_thold =
+ enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
+ link_info->bf_data.bt_coex_min_thold =
+ enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
+}
+
+#define MVM_COEX_TCM_PERIOD (HZ * 10)
+
+static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
+ struct iwl_bt_iterator_data *data)
+{
+ unsigned long now = jiffies;
+
+ if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD))
+ return;
+
+ mvm->bt_coex_last_tcm_ts = now;
+
+ /* We assume here that we don't have more than 2 vifs on 2.4GHz */
+
+ /* if the primary is low latency, it will stay primary */
+ if (data->primary_ll)
+ return;
+
+ if (data->primary_load >= data->secondary_load)
+ return;
+
+ swap(data->primary, data->secondary);
+}
+
+/*
+ * This function receives the LB link id and checks if eSR should be
+ * enabled or disabled (due to BT coex)
+ */
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s32 link_rssi,
+ bool primary)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool have_wifi_loss_rate =
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ BT_PROFILE_NOTIFICATION, 0) > 4 ||
+ iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
+ PROFILE_NOTIF, 0) >= 1;
+ u8 wifi_loss_mid_high_rssi;
+ u8 wifi_loss_low_rssi;
+ u8 wifi_loss_rate;
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
+ PROFILE_NOTIF, 0) >= 1) {
+ /* For now, we consider 2.4 GHz band / ANT_A only */
+ wifi_loss_mid_high_rssi =
+ mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
+ wifi_loss_low_rssi =
+ mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
+ } else {
+ wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
+ }
+
+ if (wifi_loss_low_rssi == BT_OFF)
+ return true;
+
+ if (primary)
+ return false;
+
+ /* The feature is not supported */
+ if (!have_wifi_loss_rate)
+ return true;
+
+
+ /*
+ * In case we don't know the RSSI - take the lower wifi loss,
+ * so we will more likely enter eSR, and if RSSI is low -
+ * we will get an update on this and exit eSR.
+ */
+ if (!link_rssi)
+ wifi_loss_rate = wifi_loss_mid_high_rssi;
+
+ else if (mvmvif->esr_active)
+ /* RSSI needs to get really low to disable eSR... */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+ wifi_loss_low_rssi :
+ wifi_loss_mid_high_rssi;
+ else
+ /* ...And really high before we enable it back */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
+ wifi_loss_low_rssi :
+ wifi_loss_mid_high_rssi;
+
+ return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+}
+
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
+
+ if (!ieee80211_vif_is_mld(vif) ||
+ !iwl_mvm_vif_from_mac80211(vif)->authorized ||
+ WARN_ON(!link))
+ return;
+
+ if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
+ (s8)link->beacon_stats.avg_signal,
+ link_id == iwl_mvm_get_primary_link(vif)))
+ /* In case we decided to exit eSR - stay with the primary */
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
+ iwl_mvm_get_primary_link(vif));
+}
+
+static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_bt_iterator_data *data,
+ unsigned int link_id)
+{
+ /* default smps_mode is AUTOMATIC - only used for client modes */
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 bt_activity_grading, min_ag_for_static_smps;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct iwl_mvm_vif_link_info *link_info;
+ struct ieee80211_bss_conf *link_conf;
+ int ave_rssi;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
+ return;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ /* This can happen due to races: if we receive the notification
+ * and have the mutex held, while mac80211 is stuck on our mutex
+ * in the middle of removing the link.
+ */
+ if (!link_conf)
+ return;
+
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+
+ /* If channel context is invalid or not on 2.4GHz .. */
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /* ... relax constraints and disable rssi events */
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode, link_id);
+ iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
+ false);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false,
+ 0);
+ }
+ return;
+ }
+
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
+ min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
+ else
+ min_ag_for_static_smps = BT_HIGH_TRAFFIC;
+
+ bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+ if (bt_activity_grading >= min_ag_for_static_smps)
+ smps_mode = IEEE80211_SMPS_STATIC;
+ else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+
+ /* relax SMPS constraints for next association */
+ if (!vif->cfg.assoc)
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (link_info->phy_ctxt &&
+ (mvm->last_bt_notif.rrc_status & BIT(link_info->phy_ctxt->id)))
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d link %d: bt_activity_grading %d smps_req %d\n",
+ mvmvif->id, link_info->fw_link_id,
+ bt_activity_grading, smps_mode);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode, link_id);
+
+ /* low latency is always primary */
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ data->primary_ll = true;
+
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (chanctx_conf == data->primary)
+ return;
+
+ if (!data->primary_ll) {
+ /*
+ * downgrade the current primary no matter what its
+ * type is.
+ */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ } else {
+ /* there is low latency vif - we will be secondary */
+ data->secondary = chanctx_conf;
+ }
+
+ /* FIXME: TCM load per interface? or need something per link? */
+ if (data->primary == chanctx_conf)
+ data->primary_load = mvm->tcm.result.load[mvmvif->id];
+ else if (data->secondary == chanctx_conf)
+ data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+ return;
+ }
+
+ /*
+ * STA / P2P Client, try to be primary if first vif. If we are in low
+ * latency mode, we are already in primary and just don't do much
+ */
+ if (!data->primary || data->primary == chanctx_conf)
+ data->primary = chanctx_conf;
+ else if (!data->secondary)
+ /* if secondary is not NULL, it might be a GO */
+ data->secondary = chanctx_conf;
+
+ /* FIXME: TCM load per interface? or need something per link? */
+ if (data->primary == chanctx_conf)
+ data->primary_load = mvm->tcm.result.load[mvmvif->id];
+ else if (data->secondary == chanctx_conf)
+ data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+ /*
+ * don't reduce the Tx power if one of these is true:
+ * we are in LOOSE
+ * BT is inactive
+ * we are not associated
+ */
+ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF ||
+ !vif->cfg.assoc) {
+ iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0);
+ return;
+ }
+
+ /* try to get the avg rssi from fw */
+ ave_rssi = link_info->bf_data.ave_beacon_signal;
+
+ /* if the RSSI isn't valid, fake it is very low */
+ if (!ave_rssi)
+ ave_rssi = -100;
+ if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
+ if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
+ true))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
+ if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
+ false))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ }
+
+ /* Begin to monitor the RSSI: it may influence the reduced Tx power */
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi);
+}
+
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_bt_iterator_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ unsigned int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!mvmvif->ap_ibss_active)
+ return;
+ break;
+ default:
+ return;
+ }
+
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
+ iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
+}
+
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf =
+ rcu_dereference_check(link_conf->chanctx_conf,
+ lockdep_is_held(&mvm->mutex));
+
+ if ((!chanctx_conf ||
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
+ continue;
+
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+ }
+}
+
+static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
+{
+ struct iwl_bt_iterator_data data = {
+ .mvm = mvm,
+ .notif = &mvm->last_bt_notif,
+ };
+ struct iwl_bt_coex_ci_cmd cmd = {};
+ u8 ci_bw_idx;
+
+ /* Ignore updates if we are in force mode */
+ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+ return;
+
+ rcu_read_lock();
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bt_notif_iterator, &data);
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ rcu_read_unlock();
+ return;
+ }
+
+ iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
+
+ if (data.primary) {
+ struct ieee80211_chanctx_conf *chan = data.primary;
+ if (WARN_ON(!chan->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ } else {
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_primary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.primary_ch_phy_id =
+ cpu_to_le32(*((u16 *)data.primary->drv_priv));
+ }
+
+ if (data.secondary) {
+ struct ieee80211_chanctx_conf *chan = data.secondary;
+ if (WARN_ON(!data.secondary->def.chan)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+ ci_bw_idx = 0;
+ } else {
+ if (chan->def.center_freq1 >
+ chan->def.chan->center_freq)
+ ci_bw_idx = 2;
+ else
+ ci_bw_idx = 1;
+ }
+
+ cmd.bt_secondary_ci =
+ iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+ cmd.secondary_ch_phy_id =
+ cpu_to_le32(*((u16 *)data.secondary->drv_priv));
+ }
+
+ rcu_read_unlock();
+
+ /* Don't spam the fw with the same command over and over */
+ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
+ sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
+ memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+ }
+}
+
+void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_bt_coex_prof_old_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+ IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
+
+ /* remember this notification for future use: rssi fluctuations */
+ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
+
+ iwl_mvm_bt_coex_notif_handle(mvm);
+}
+
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ const struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm->last_bt_wifi_loss = *notif;
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bt_coex_notif_iterator,
+ mvm);
+}
+
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum ieee80211_rssi_event_data rssi_event)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Ignore updates if we are in force mode */
+ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+ return;
+
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->deflink.ap_sta_id == IWL_INVALID_STA)
+ return;
+
+ /* No BT - reports should be disabled */
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
+ return;
+
+ IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
+ rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
+
+ /*
+ * Check if rssi is good enough for reduced Tx power, but not in loose
+ * scheme.
+ */
+ if (rssi_event == RSSI_EVENT_LOW ||
+ iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm,
+ mvmvif->deflink.ap_sta_id,
+ false);
+ else
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm,
+ mvmvif->deflink.ap_sta_id,
+ true);
+
+ if (ret)
+ IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
+
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+ if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
+ return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+ /* tight coex, high bt traffic, reduce AGG time limit */
+ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt;
+ enum iwl_bt_coex_lut_type lut_type;
+
+ if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
+ return true;
+
+ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC)
+ return true;
+
+ /*
+ * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
+ * since BT is already killed.
+ * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
+ * we Tx.
+ * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
+ */
+ lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+ return lut_type != BT_COEX_LOOSE_LUT;
+}
+
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
+{
+ if (ant & mvm->cfg->non_shared_ant)
+ return true;
+
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+ BT_HIGH_TRAFFIC;
+}
+
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
+{
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
+}
+
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum nl80211_band band)
+{
+ u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+ if (band != NL80211_BAND_2GHZ)
+ return false;
+
+ return bt_activity >= BT_LOW_TRAFFIC;
+}
+
+u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) &&
+ (mvm->cfg->non_shared_ant & enabled_ants))
+ return mvm->cfg->non_shared_ant;
+
+ return first_antenna(enabled_ants);
+}
+
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac)
+{
+ __le16 fc = hdr->frame_control;
+ bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
+
+ if (info->band != NL80211_BAND_2GHZ)
+ return 0;
+
+ if (unlikely(mvm->bt_tx_prio))
+ return mvm->bt_tx_prio - 1;
+
+ if (likely(ieee80211_is_data(fc))) {
+ if (likely(ieee80211_is_data_qos(fc))) {
+ switch (ac) {
+ case IEEE80211_AC_BE:
+ return mplut_enabled ? 1 : 0;
+ case IEEE80211_AC_VI:
+ return mplut_enabled ? 2 : 3;
+ case IEEE80211_AC_VO:
+ return 3;
+ default:
+ return 0;
+ }
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ return 3;
+ } else
+ return 0;
+ } else if (ieee80211_is_mgmt(fc)) {
+ return ieee80211_is_disassoc(fc) ? 0 : 3;
+ } else if (ieee80211_is_ctl(fc)) {
+ /* ignore cfend and cfendack frames as we never send those */
+ return 3;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+ iwl_mvm_bt_coex_notif_handle(mvm);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/constants.h b/sys/contrib/dev/iwlwifi/mvm/constants.h
new file mode 100644
index 000000000000..776600ddaea6
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/constants.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2015 Intel Deutschland GmbH
+ */
+#ifndef __MVM_CONSTANTS_H
+#define __MVM_CONSTANTS_H
+
+#include <linux/ieee80211.h>
+#include "fw-api.h"
+
+#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
+#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
+#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
+#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15
+#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
+#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72
+
+#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
+#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1
+#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
+#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
+#define IWL_MVM_PS_SNOOZE_INTERVAL 25
+#define IWL_MVM_PS_SNOOZE_WINDOW 50
+#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
+#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62
+#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65
+#define IWL_MVM_BT_COEX_SYNC2SCO 1
+#define IWL_MVM_BT_COEX_MPLUT 1
+#define IWL_MVM_BT_COEX_RRC 1
+#define IWL_MVM_BT_COEX_TTC 1
+#define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200
+#define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451
+#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30
+#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0
+#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0
+#define IWL_MVM_QUOTA_THRESHOLD 4
+#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
+#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
+#define IWL_MVM_TOF_IS_RESPONDER 0
+#define IWL_MVM_ADWELL_ENABLE 1
+#define IWL_MVM_ADWELL_MAX_BUDGET 0
+#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
+#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */
+#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */
+#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */
+#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
+#define IWL_MVM_NON_TRANSMITTING_AP 0
+#define IWL_MVM_CONN_LISTEN_INTERVAL 10
+#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
+#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3
+#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3
+#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2
+#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2
+#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1
+#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16
+#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3
+#define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1
+#define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3
+#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8
+#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */
+#define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */
+#define IWL_MVM_RS_MISSED_RATE_MAX 15
+#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160
+#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480
+#define IWL_MVM_RS_LEGACY_TABLE_COUNT 160
+#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400
+#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500
+#define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */
+#define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */
+#define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */
+#define IWL_MVM_RS_AGG_DISABLE_START 3
+#define IWL_MVM_RS_AGG_START_THRESHOLD 10 /* num frames per second */
+#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */
+#define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */
+#define IWL_MVM_RS_TPC_TX_POWER_STEP 3
+#define IWL_MVM_ENABLE_EBS 1
+#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
+#define IWL_MVM_FTM_INITIATOR_DYNACK true
+#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false
+#define IWL_MVM_FTM_TEST_INCORRECT_SAC false
+#define IWL_MVM_FTM_R2I_MAX_REP 7
+#define IWL_MVM_FTM_I2R_MAX_REP 7
+#define IWL_MVM_FTM_R2I_MAX_STS 1
+#define IWL_MVM_FTM_I2R_MAX_STS 1
+#define IWL_MVM_FTM_R2I_MAX_TOTAL_LTF 3
+#define IWL_MVM_FTM_I2R_MAX_TOTAL_LTF 3
+#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false
+#define IWL_MVM_FTM_RESP_NDP_SUPPORT true
+#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true
+#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
+#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
+#define IWL_MVM_D3_DEBUG false
+#define IWL_MVM_USE_TWT true
+#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
+#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40
+/* 20016 pSec is 6 meter RTT, meaning 3 meter range */
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT 20016
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2
+#define IWL_MVM_DISABLE_AP_FILS false
+#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
+#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
+#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
+#define IWL_MVM_AUTO_EML_ENABLE true
+
+#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
+#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
+#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
+#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
+#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
+#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
+#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
+#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
+
+#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
+#endif /* __MVM_CONSTANTS_H */
diff --git a/sys/contrib/dev/iwlwifi/mvm/d3.c b/sys/contrib/dev/iwlwifi/mvm/d3.c
new file mode 100644
index 000000000000..c7d298294ec1
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/d3.c
@@ -0,0 +1,3609 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/fs.h>
+#if defined(__FreeBSD__)
+#include <linux/string.h>
+#include <linux/delay.h>
+#endif
+#include <net/cfg80211.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/addrconf.h>
+#include "iwl-modparams.h"
+#include "fw-api.h"
+#include "mvm.h"
+#include "fw/img.h"
+
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->rekey_data.kek_len = data->kek_len;
+ mvmvif->rekey_data.kck_len = data->kck_len;
+ memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len);
+ memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
+ mvmvif->rekey_data.akm = data->akm & 0xFF;
+ mvmvif->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
+ mvmvif->rekey_data.valid = true;
+
+ mutex_unlock(&mvm->mutex);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+ if (ifa->flags & IFA_F_TENTATIVE)
+ __set_bit(idx, mvmvif->tentative_addrs);
+ idx++;
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ mvmvif->num_target_ipv6_addrs = idx;
+}
+#endif
+
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->tx_key_idx = idx;
+}
+
+static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
+{
+ int i;
+
+ for (i = 0; i < IWL_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
+}
+
+static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
+ struct iwl_mvm_key_pn *ptk_pn,
+ struct ieee80211_key_seq *seq,
+ int tid, int queues)
+{
+ const u8 *ret = seq->ccmp.pn;
+ int i;
+
+ /* get the PN from mac80211, used on the default queue */
+ ieee80211_get_key_rx_seq(key, tid, seq);
+
+ /* and use the internal data for the other queues */
+ for (i = 1; i < queues; i++) {
+ const u8 *tmp = ptk_pn->q[i].pn[tid];
+
+ if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
+ ret = tmp;
+ }
+
+ return ret;
+}
+
+struct wowlan_key_reprogram_data {
+ bool error;
+ int wep_key_idx;
+};
+
+static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct wowlan_key_reprogram_data *data = _data;
+ int ret;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
+ DEFINE_RAW_FLEX(struct iwl_mvm_wep_key_cmd, wkc, wep_key, 1);
+ struct iwl_mvm_wep_key *wep_key = wkc->wep_key;
+
+ wkc->mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ wkc->num_keys = 1;
+ /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+ wkc->decryption_type = STA_KEY_FLG_WEP;
+ wep_key->key_index = key->keyidx;
+ wep_key->key_size = key->keylen;
+
+ /*
+ * This will fail -- the key functions don't set support
+ * pairwise WEP keys. However, that's better than silently
+ * failing WoWLAN. Or maybe not?
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ break;
+
+ memcpy(&wep_key->key[3], key->key, key->keylen);
+ if (key->keyidx == mvmvif->tx_key_idx) {
+ /* TX key must be at offset 0 */
+ wep_key->key_offset = 0;
+ } else {
+ /* others start at 1 */
+ data->wep_key_idx++;
+ wep_key->key_offset = data->wep_key_idx;
+ }
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
+ __struct_size(wkc), wkc);
+ data->error = ret != 0;
+
+ mvm->ptk_ivlen = key->iv_len;
+ mvm->ptk_icvlen = key->icv_len;
+ mvm->gtk_ivlen = key->iv_len;
+ mvm->gtk_icvlen = key->icv_len;
+ mutex_unlock(&mvm->mutex);
+
+ /* don't upload key again */
+ return;
+ }
+ default:
+ data->error = true;
+ return;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ return;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /*
+ * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+ * but we also shouldn't abort suspend due to that. It does have
+ * support for the IGTK key renewal, but doesn't really use the
+ * IGTK for anything. This means we could spuriously wake up or
+ * be deauthenticated, but that was considered acceptable.
+ */
+ return;
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ break;
+ }
+
+ mutex_lock(&mvm->mutex);
+ /*
+ * The D3 firmware hardcodes the key offset 0 as the key it
+ * uses to transmit packets to the AP, i.e. the PTK.
+ */
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ mvm->ptk_ivlen = key->iv_len;
+ mvm->ptk_icvlen = key->icv_len;
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
+ } else {
+ /*
+ * firmware only supports TSC/RSC for a single key,
+ * so if there are multiple keep overwriting them
+ * with new ones -- this relies on mac80211 doing
+ * list_add_tail().
+ */
+ mvm->gtk_ivlen = key->iv_len;
+ mvm->gtk_icvlen = key->icv_len;
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
+ }
+ mutex_unlock(&mvm->mutex);
+ data->error = ret != 0;
+}
+
+struct wowlan_key_rsc_tsc_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 *rsc_tsc;
+ bool have_rsc_tsc;
+};
+
+static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct wowlan_key_rsc_tsc_data *data = _data;
+ struct aes_sc *aes_sc;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct ieee80211_key_seq seq;
+ int i;
+
+ switch (key->cipher) {
+ default:
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ u64 pn64;
+
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc =
+ &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+ pn64 = atomic64_read(&key->tx_pn);
+ tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+ tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ }
+
+ data->have_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (sta) {
+ struct aes_sc *aes_tx_sc;
+ u64 pn64;
+
+ aes_sc =
+ data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc =
+ &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ pn64 = atomic64_read(&key->tx_pn);
+ aes_tx_sc->pn = cpu_to_le64(pn64);
+ } else {
+ aes_sc =
+ data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211/our RX code use TID 0 for checking the PN.
+ */
+ if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_key_pn *ptk_pn;
+ const u8 *pn;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
+ break;
+ }
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
+ mvm->trans->info.num_rxqs);
+ aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+
+ rcu_read_unlock();
+ } else {
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ }
+ data->have_rsc_tsc = true;
+ break;
+ }
+}
+
+struct wowlan_key_rsc_v5_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd *rsc;
+ bool have_rsc;
+ int gtks;
+ int gtk_ids[4];
+};
+
+static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct wowlan_key_rsc_v5_data *data = _data;
+ struct ieee80211_key_seq seq;
+ __le64 *rsc;
+ int i;
+
+ /* only for ciphers that can be PTK/GTK */
+ switch (key->cipher) {
+ default:
+ return;
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ break;
+ }
+
+ if (sta) {
+ rsc = data->rsc->ucast_rsc;
+ } else {
+ if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids)))
+ return;
+ data->gtk_ids[data->gtks] = key->keyidx;
+ rsc = data->rsc->mcast_rsc[data->gtks % 2];
+ if (WARN_ON(key->keyidx >=
+ ARRAY_SIZE(data->rsc->mcast_key_id_map)))
+ return;
+ data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2;
+ if (data->gtks >= 2) {
+ int prev = data->gtks - 2;
+ int prev_idx = data->gtk_ids[prev];
+
+ data->rsc->mcast_key_id_map[prev_idx] =
+ IWL_MCAST_KEY_MAP_INVALID;
+ }
+ data->gtks++;
+ }
+
+ switch (key->cipher) {
+ default:
+ WARN_ON(1);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+
+ rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) |
+ seq.tkip.iv16);
+ }
+
+ data->have_rsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211/our RX code use TID 0 for checking the PN.
+ */
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_key_pn *ptk_pn;
+ const u8 *pn;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
+ break;
+ }
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
+ mvm->trans->info.num_rxqs);
+ rsc[i] = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+
+ rcu_read_unlock();
+ } else {
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ rsc[i] = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ }
+ data->have_rsc = true;
+ break;
+ }
+}
+
+static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
+{
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int ret;
+
+ if (ver == 5) {
+ struct wowlan_key_rsc_v5_data data = {};
+ int i;
+
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
+ if (!data.rsc)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
+ data.rsc->mcast_key_id_map[i] =
+ IWL_MCAST_KEY_MAP_INVALID;
+ data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id);
+
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_wowlan_get_rsc_v5_data,
+ &data);
+
+ if (data.have_rsc)
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
+ CMD_ASYNC, sizeof(*data.rsc),
+ data.rsc);
+ else
+ ret = 0;
+ kfree(data.rsc);
+ } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) {
+ struct wowlan_key_rsc_tsc_data data = {};
+
+ data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL);
+ if (!data.rsc_tsc)
+ return -ENOMEM;
+
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_wowlan_get_rsc_tsc_data,
+ &data);
+
+ if (data.have_rsc_tsc)
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
+ CMD_ASYNC,
+ sizeof(*data.rsc_tsc),
+ data.rsc_tsc);
+ else
+ ret = 0;
+ kfree(data.rsc_tsc);
+ } else {
+ ret = 0;
+ WARN_ON_ONCE(1);
+ }
+
+ return ret;
+}
+
+struct wowlan_key_tkip_data {
+ struct iwl_wowlan_tkip_params_cmd tkip;
+ bool have_tkip_keys;
+};
+
+static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct wowlan_key_tkip_data *data = _data;
+ struct iwl_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWL_P1K_SIZE];
+ int i;
+
+ switch (key->cipher) {
+ default:
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ u64 pn64;
+
+ rx_p1ks = data->tkip.rx_uni;
+
+ pn64 = atomic64_read(&key->tx_pn);
+
+ ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
+ p1k);
+ iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k);
+
+ memcpy(data->tkip.mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip.mic_keys.rx_unicast;
+ } else {
+ rx_p1ks = data->tkip.rx_multi;
+ rx_mic_key = data->tkip.mic_keys.rx_mcast;
+ }
+
+ for (i = 0; i < IWL_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ data->have_tkip_keys = true;
+ break;
+ }
+}
+
+struct wowlan_key_gtk_type_iter {
+ struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
+};
+
+static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct wowlan_key_gtk_type_iter *data = _data;
+ __le32 *cipher = NULL;
+
+ if (key->keyidx == 4 || key->keyidx == 5)
+ cipher = &data->kek_kck_cmd->igtk_cipher;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ cipher = &data->kek_kck_cmd->bigtk_cipher;
+
+ switch (key->cipher) {
+ default:
+ return;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (!sta)
+ data->kek_kck_cmd->gtk_cipher =
+ cpu_to_le32(STA_KEY_FLG_TKIP);
+ return;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ return;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (!sta)
+ data->kek_kck_cmd->gtk_cipher =
+ cpu_to_le32(STA_KEY_FLG_CCM);
+ return;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (!sta)
+ data->kek_kck_cmd->gtk_cipher =
+ cpu_to_le32(STA_KEY_FLG_GCMP);
+ return;
+ }
+}
+
+static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif_link_info *mvm_link,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int i, err;
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
+
+ pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = wowlan->n_patterns;
+ if (ver >= 3)
+ pattern_cmd->sta_id = mvm_link->ap_sta_id;
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ pattern_cmd->patterns[i].pattern_type =
+ WOWLAN_PATTERN_TYPE_BITMASK;
+
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
+ pattern_cmd->patterns[i].u.bitmask.pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
+static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *ctx;
+ u8 chains_static, chains_dynamic;
+ struct cfg80211_chan_def chandef, ap_def;
+ int ret, i;
+ struct iwl_binding_cmd_v1 binding_cmd = {};
+ struct iwl_time_quota_cmd quota_cmd = {};
+ struct iwl_time_quota_data *quota;
+ u32 status;
+
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
+ ieee80211_vif_is_mld(vif)))
+ return -EINVAL;
+
+ /* add back the PHY */
+ if (WARN_ON(!mvmvif->deflink.phy_ctxt))
+ return -EINVAL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (WARN_ON(!ctx)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ chandef = ctx->def;
+ ap_def = ctx->ap;
+ chains_static = ctx->rx_chains_static;
+ chains_dynamic = ctx->rx_chains_dynamic;
+ rcu_read_unlock();
+
+ ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef,
+ &ap_def, chains_static, chains_dynamic);
+ if (ret)
+ return ret;
+
+ /* add back the MAC */
+ mvmvif->uploaded = false;
+
+ if (WARN_ON(!vif->cfg.assoc))
+ return -EINVAL;
+
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ return ret;
+
+ /* add back binding - XXX refactor? */
+ binding_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id,
+ mvmvif->deflink.phy_ctxt->color));
+ binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ binding_cmd.phy =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id,
+ mvmvif->deflink.phy_ctxt->color));
+ binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ for (i = 1; i < MAX_MACS_IN_BINDING; i++)
+ binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+ IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
+ &status);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+ return ret;
+ }
+
+ if (status) {
+ IWL_ERR(mvm, "Binding command failed: %u\n", status);
+ return -EIO;
+ }
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
+ if (ret)
+ return ret;
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
+ ap_sta);
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ if (ret)
+ return ret;
+
+ /* and some quota */
+ quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
+ quota->id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id,
+ mvmvif->deflink.phy_ctxt->color));
+ quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+
+ for (i = 1; i < MAX_BINDINGS; i++) {
+ quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
+ quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+ iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+
+ if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm, false))
+ IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+
+ return 0;
+}
+
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NON_QOS_TX_COUNTER_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int err;
+ u32 size;
+
+ cmd.data[0] = &query_cmd;
+ cmd.len[0] = sizeof(query_cmd);
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ if (err)
+ return err;
+
+ size = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (size < sizeof(__le16)) {
+ err = -EINVAL;
+ } else {
+ err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+ /* firmware returns next, not last-used seqno */
+ err = (u16) (err - 0x10);
+ }
+
+ iwl_free_resp(&cmd);
+ return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_nonqos_seq_query_cmd query_cmd = {
+ .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+ .mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .value = cpu_to_le16(mvmvif->seqno),
+ };
+
+ /* return if called during restart, not resume from D3 */
+ if (!mvmvif->seqno_valid)
+ return;
+
+ mvmvif->seqno_valid = false;
+
+ if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
+ sizeof(query_cmd), &query_cmd))
+ IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
+static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
+{
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+
+ iwl_mvm_stop_device(mvm);
+ /*
+ * Set the HW restart bit -- this is mostly true as we're
+ * going to load new firmware and reprogram that, though
+ * the reprogramming is going to be manual to avoid adding
+ * all the MACs that aren't support.
+ * We don't have to clear up everything though because the
+ * reprogramming is manual. When we resume, we'll actually
+ * go through a proper restart sequence again to switch
+ * back to the runtime firmware image.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ /* the fw is reset, so all the keys are cleared */
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+ mvm->ptk_ivlen = 0;
+ mvm->ptk_icvlen = 0;
+
+ return iwl_mvm_load_d3_fw(mvm);
+}
+
+static int
+iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct ieee80211_sta *ap_sta)
+{
+ struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+
+ /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
+
+ wowlan_config_cmd->is_11n_connection =
+ ap_sta->deflink.ht_cap.ht_supported;
+ wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
+ ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
+ /* Query the last used seqno and set it */
+ int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+
+ if (ret < 0)
+ return ret;
+
+ wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+ }
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 7)
+ iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
+
+ if (wowlan->disconnect)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ if (wowlan->tcp) {
+ /*
+ * Set the "link change" (really "link lost") flag as well
+ * since that implies losing the TCP connection.
+ */
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
+ IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
+ IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ }
+
+ if (wowlan->any) {
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
+{
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+ struct wowlan_key_reprogram_data key_data = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+ u8 cmd_ver;
+ size_t cmd_size;
+
+ if (!unified) {
+ /*
+ * if we have to configure keys, call ieee80211_iter_keys(),
+ * as we need non-atomic context in order to take the
+ * required locks.
+ */
+ /*
+ * Note that currently we don't use CMD_ASYNC in the iterator.
+ * In case of key_data.configure_keys, all the configured
+ * commands are SYNC, and iwl_mvm_wowlan_program_keys() will
+ * take care of locking/unlocking mvm->mutex.
+ */
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys,
+ &key_data);
+
+ if (key_data.error)
+ return -EIO;
+ }
+
+ ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link);
+ if (ret)
+ return ret;
+
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM,
+ IWL_FW_CMD_VER_UNKNOWN);
+ struct wowlan_key_tkip_data tkip_data = {};
+ int size;
+
+ if (ver == 2) {
+ size = sizeof(tkip_data.tkip);
+ tkip_data.tkip.sta_id =
+ cpu_to_le32(mvm_link->ap_sta_id);
+ } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
+ size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
+ } else {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data,
+ &tkip_data);
+
+ if (tkip_data.have_tkip_keys) {
+ /* send relevant data according to CMD version */
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WOWLAN_TKIP_PARAM,
+ CMD_ASYNC, size,
+ &tkip_data.tkip);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* configure rekey data only if offloaded rekey is supported (d3) */
+ if (mvmvif->rekey_data.valid) {
+ struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd =
+ &kek_kck_cmd;
+ struct wowlan_key_gtk_type_iter gtk_type_data = {
+ .kek_kck_cmd = _kek_kck_cmd,
+ };
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WOWLAN_KEK_KCK_MATERIAL,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
+ cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
+ return -EINVAL;
+
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter,
+ &gtk_type_data);
+
+ memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
+ mvmvif->rekey_data.kck_len);
+ kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len);
+ memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
+ mvmvif->rekey_data.kek_len);
+ kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
+ kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+ kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id);
+
+ if (cmd_ver == 4) {
+ cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
+ } else {
+ if (cmd_ver == 3)
+ cmd_size =
+ sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
+ else
+ cmd_size =
+ sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
+ /* skip the sta_id at the beginning */
+ _kek_kck_cmd = (void *)
+ ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
+ }
+
+ IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
+ mvmvif->rekey_data.akm);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL,
+ CMD_ASYNC, cmd_size, _kek_kck_cmd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd_v6,
+ struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_vif_link_info *mvm_link,
+ struct ieee80211_sta *ap_sta)
+{
+ int ret;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ mvm->offload_tid = wowlan_config_cmd_v6->offloading_tid;
+
+ if (!unified_image) {
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+ if (ret)
+ return ret;
+ }
+
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link);
+ if (ret)
+ return ret;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) > 6) {
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .wakeup_filter = wowlan_config_cmd_v6->wakeup_filter,
+ .wowlan_ba_teardown_tids =
+ wowlan_config_cmd_v6->wowlan_ba_teardown_tids,
+ .is_11n_connection =
+ wowlan_config_cmd_v6->is_11n_connection,
+ .offloading_tid = wowlan_config_cmd_v6->offloading_tid,
+ .flags = wowlan_config_cmd_v6->flags,
+ .sta_id = wowlan_config_cmd_v6->sta_id,
+ };
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ } else {
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+ sizeof(*wowlan_config_cmd_v6),
+ wowlan_config_cmd_v6);
+ }
+ if (ret)
+ return ret;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
+ ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan);
+ else
+ ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0,
+ mvm_link->ap_sta_id);
+}
+
+static int
+iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct ieee80211_vif *vif)
+{
+ int ret;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ if (!unified_image) {
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
+ } else {
+ /* In theory, we wouldn't have to stop a running sched
+ * scan in order to start another one (for
+ * net-detect). But in practice this doesn't seem to
+ * work properly, so stop any running sched_scan now.
+ */
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+ IWL_MVM_SCAN_NETDETECT);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
+ return -EBUSY;
+
+ /* save the sched scan matchsets... */
+ if (nd_config->n_match_sets) {
+ mvm->nd_match_sets = kmemdup(nd_config->match_sets,
+ sizeof(*nd_config->match_sets) *
+ nd_config->n_match_sets,
+ GFP_KERNEL);
+ if (mvm->nd_match_sets)
+ mvm->n_nd_match_sets = nd_config->n_match_sets;
+ }
+
+ /* ...and the sched scan channels for later reporting */
+ mvm->nd_channels = kmemdup(nd_config->channels,
+ sizeof(*nd_config->channels) *
+ nd_config->n_channels,
+ GFP_KERNEL);
+ if (mvm->nd_channels)
+ mvm->n_nd_channels = nd_config->n_channels;
+
+ return 0;
+}
+
+static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
+{
+ kfree(mvm->nd_match_sets);
+ mvm->nd_match_sets = NULL;
+ mvm->n_nd_match_sets = 0;
+ kfree(mvm->nd_channels);
+ mvm->nd_channels = NULL;
+ mvm->n_nd_channels = 0;
+}
+
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan,
+ bool test)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *vif = NULL;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_mvm_vif_link_info *mvm_link;
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ /*
+ * Program the minimum sleep time to 10 seconds, as many
+ * platforms have issues processing a wakeup signal while
+ * still being in the process of suspending.
+ */
+ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+ };
+ struct iwl_host_cmd d3_cfg_cmd = {
+ .id = D3_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data[0] = &d3_cfg_cmd_data,
+ .len[0] = sizeof(d3_cfg_cmd_data),
+ };
+ int ret;
+ int len __maybe_unused;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ if (!wowlan) {
+ /*
+ * mac80211 shouldn't get here, but for D3 test
+ * it doesn't warrant a warning
+ */
+ WARN_ON(!test);
+ return -EINVAL;
+ }
+
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ return 1;
+
+ ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
+ synchronize_net();
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)];
+ if (WARN_ON_ONCE(!mvm_link)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ if (mvm_link->ap_sta_id == IWL_INVALID_STA) {
+ /* if we're not associated, this must be netdetect */
+ if (!wowlan->nd_config) {
+ ret = 1;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_netdetect_config(
+ mvm, wowlan, wowlan->nd_config, vif);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = true;
+ } else {
+ struct iwl_wowlan_config_cmd_v6 wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
+
+ wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
+
+ ap_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(ap_sta)) {
+ ret = -EINVAL;
+ goto out_noreset;
+ }
+
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
+ ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, ap_sta);
+ if (ret)
+ goto out_noreset;
+ ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+ vif, mvmvif, mvm_link, ap_sta);
+ if (ret)
+ goto out;
+
+ mvm->net_detect = false;
+ }
+
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ goto out;
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->d3_wake_sysassert)
+ d3_cfg_cmd_data.wakeup_flags |=
+ cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
+#endif
+
+ /*
+ * Prior to 9000 device family the driver needs to stop the dbg
+ * recording before entering D3. In later devices the FW stops the
+ * recording automatically.
+ */
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
+
+ /* must be last -- this switches firmware state */
+ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+ if (ret)
+ goto out;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+ if (len >= sizeof(u32)) {
+ mvm->d3_test_pme_ptr =
+ le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
+ }
+#endif
+ iwl_free_resp(&d3_cfg_cmd);
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+ out:
+ if (ret < 0) {
+ iwl_mvm_free_nd(mvm);
+
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ }
+ out_noreset:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ iwl_mvm_pause_tcm(mvm, true);
+
+ mutex_lock(&mvm->mutex);
+ iwl_fw_runtime_suspend(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+
+ return __iwl_mvm_suspend(hw, wowlan, false);
+}
+
+struct iwl_multicast_key_data {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ u8 id;
+ u8 ipn[6];
+};
+
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+ u64 replay_ctr;
+ u32 num_of_gtk_rekeys;
+ u32 received_beacons;
+ u32 wakeup_reasons;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ u16 pattern_number;
+ u16 non_qos_seq_ctr;
+ u16 qos_seq_ctr[8];
+ u8 tid_tear_down;
+ u8 tid_offloaded_tx;
+
+ struct {
+ /* including RX MIC key for TKIP */
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ u8 id;
+ } gtk[WOWLAN_GTK_KEYS_NUM];
+
+ struct {
+ /*
+ * We store both the TKIP and AES representations
+ * coming from the firmware because we decode the
+ * data from there before we iterate the keys and
+ * know which one we need.
+ */
+ struct {
+ struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT];
+ } tkip, aes;
+
+ /*
+ * We use -1 for when we have valid data but don't know
+ * the key ID from firmware, and thus it needs to be
+ * installed with the last key (depending on rekeying).
+ */
+ s8 key_id;
+ bool valid;
+ } gtk_seq[2];
+
+ struct {
+ /* Same as above */
+ struct {
+ struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT];
+ u64 tx_pn;
+ } tkip, aes;
+ } ptk;
+
+ struct iwl_multicast_key_data igtk;
+ struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+
+ u8 *wake_packet;
+};
+
+static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
+{
+ struct sk_buff *pkt = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ u32 reasons = status->wakeup_reasons;
+
+ if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ wakeup_report = NULL;
+ goto report;
+ }
+
+ pm_wakeup_event(mvm->dev, 0);
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
+ wakeup.pattern_idx =
+ status->pattern_number;
+
+ if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+ wakeup.disconnect = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
+ wakeup.gtk_rekey_failure = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
+ wakeup.eap_identity_req = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+ wakeup.tcp_connlost = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+ wakeup.tcp_nomoretokens = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+ wakeup.tcp_match = true;
+
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
+ if (status->wake_packet) {
+ int pktsize = status->wake_packet_bufsize;
+ int pktlen = status->wake_packet_length;
+ const u8 *pktdata = status->wake_packet;
+ const struct ieee80211_hdr *hdr = (const void *)pktdata;
+ int truncated = pktlen - pktsize;
+
+ /* this would be a firmware bug */
+ if (WARN_ON_ONCE(truncated < 0))
+ truncated = 0;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int ivlen = 0, icvlen = 4; /* also FCS */
+
+ pkt = alloc_skb(pktsize, GFP_KERNEL);
+ if (!pkt)
+ goto report;
+
+ skb_put_data(pkt, pktdata, hdrlen);
+ pktdata += hdrlen;
+ pktsize -= hdrlen;
+
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ /*
+ * This is unlocked and using gtk_i(c)vlen,
+ * but since everything is under RTNL still
+ * that's not really a problem - changing
+ * it would be difficult.
+ */
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ ivlen = mvm->gtk_ivlen;
+ icvlen += mvm->gtk_icvlen;
+ } else {
+ ivlen = mvm->ptk_ivlen;
+ icvlen += mvm->ptk_icvlen;
+ }
+ }
+
+ /* if truncated, FCS/ICV is (partially) gone */
+ if (truncated >= icvlen) {
+ icvlen = 0;
+ truncated -= icvlen;
+ } else {
+ icvlen -= truncated;
+ truncated = 0;
+ }
+
+ pktsize -= ivlen + icvlen;
+ pktdata += ivlen;
+
+ skb_put_data(pkt, pktdata, pktsize);
+
+ if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+ goto report;
+ wakeup.packet = pkt->data;
+ wakeup.packet_present_len = pkt->len;
+ wakeup.packet_len = pkt->len - truncated;
+ wakeup.packet_80211 = false;
+ } else {
+ int fcslen = 4;
+
+ if (truncated >= 4) {
+ truncated -= 4;
+ fcslen = 0;
+ } else {
+ fcslen -= truncated;
+ truncated = 0;
+ }
+ pktsize -= fcslen;
+ wakeup.packet = status->wake_packet;
+ wakeup.packet_present_len = pktsize;
+ wakeup.packet_len = pktlen - truncated;
+ wakeup.packet_80211 = true;
+ }
+ }
+
+ report:
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ kfree_skb(pkt);
+}
+
+static void iwl_mvm_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ iwl_mvm_le64_to_aes_seq(sc->pn, seq);
+}
+
+static void iwl_mvm_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->tkip.iv16 = (u16)pn;
+ seq->tkip.iv32 = (u32)(pn >> 16);
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+ struct ieee80211_key_seq *seq)
+{
+ seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+ seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ int tid;
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_set_key_rx_seq(key, tid, &seq[tid]);
+}
+
+static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm,
+ struct iwl_wowlan_status_data *status,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_key_pn *ptk_pn;
+ int tid;
+
+ iwl_mvm_set_key_rx_seq_tids(key, status->ptk.aes.seq);
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ int i;
+
+ for (i = 1; i < mvm->trans->info.num_rxqs; i++)
+ memcpy(ptk_pn->q[i].pn[tid],
+ status->ptk.aes.seq[tid].ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status,
+ union iwl_all_tsc_rsc *sc, u8 key_idx)
+{
+ int i;
+
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT);
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC);
+
+ /* GTK RX counters */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i],
+ &status->gtk_seq[0].tkip.seq[i]);
+ iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i],
+ &status->gtk_seq[0].aes.seq[i]);
+ }
+ status->gtk_seq[0].valid = true;
+ status->gtk_seq[0].key_id = key_idx;
+
+ /* PTK TX counter */
+ status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) |
+ ((u64)le32_to_cpu(sc->tkip.tsc.iv32) << 16);
+ status->ptk.aes.tx_pn = le64_to_cpu(sc->aes.tsc.pn);
+
+ /* PTK RX counters */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.unicast_rsc[i],
+ &status->ptk.tkip.seq[i]);
+ iwl_mvm_aes_sc_to_seq(&sc->aes.unicast_rsc[i],
+ &status->ptk.aes.seq[i]);
+ }
+}
+
+static void
+iwl_mvm_convert_key_counters_v5_gtk_seq(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_all_rsc_tsc_v5 *sc,
+ unsigned int idx, unsigned int key_id)
+{
+ int tid;
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mvm_le64_to_tkip_seq(sc->mcast_rsc[idx][tid],
+ &status->gtk_seq[idx].tkip.seq[tid]);
+ iwl_mvm_le64_to_aes_seq(sc->mcast_rsc[idx][tid],
+ &status->gtk_seq[idx].aes.seq[tid]);
+ }
+
+ status->gtk_seq[idx].valid = true;
+ status->gtk_seq[idx].key_id = key_id;
+}
+
+static void
+iwl_mvm_convert_key_counters_v5(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+ int i, tid;
+
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT);
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC);
+ BUILD_BUG_ON(ARRAY_SIZE(sc->mcast_rsc) != ARRAY_SIZE(status->gtk_seq));
+
+ /* GTK RX counters */
+ for (i = 0; i < ARRAY_SIZE(sc->mcast_key_id_map); i++) {
+ u8 entry = sc->mcast_key_id_map[i];
+
+ if (entry < ARRAY_SIZE(sc->mcast_rsc))
+ iwl_mvm_convert_key_counters_v5_gtk_seq(status, sc,
+ entry, i);
+ }
+
+ /* PTK TX counters not needed, assigned in device */
+
+ /* PTK RX counters */
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mvm_le64_to_tkip_seq(sc->ucast_rsc[tid],
+ &status->ptk.tkip.seq[tid]);
+ iwl_mvm_le64_to_aes_seq(sc->ucast_rsc[tid],
+ &status->ptk.aes.seq[tid]);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq_idx(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_data *status,
+ int idx)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].aes.seq);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].tkip.seq);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_wowlan_status_data *status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(status->gtk_seq); i++) {
+ if (!status->gtk_seq[i].valid)
+ continue;
+
+ if (status->gtk_seq[i].key_id == key->keyidx)
+ iwl_mvm_set_key_rx_seq_idx(key, status, i);
+ }
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_wowlan_status_data *status;
+ u32 gtk_cipher, igtk_cipher, bigtk_cipher;
+ bool unhandled_cipher, igtk_support, bigtk_support;
+ int num_keys;
+};
+
+static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_d3_gtk_iter_data *data = _data;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
+
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* we support these */
+ data->gtk_cipher = key->cipher;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* we support these */
+ if (data->igtk_support &&
+ (key->keyidx == 4 || key->keyidx == 5)) {
+ data->igtk_cipher = key->cipher;
+ } else if (data->bigtk_support &&
+ (key->keyidx == 6 || key->keyidx == 7)) {
+ data->bigtk_cipher = key->cipher;
+ } else {
+ data->unhandled_cipher = true;
+ return;
+ }
+ break;
+ default:
+ /* everything else - disconnect from AP */
+ data->unhandled_cipher = true;
+ return;
+ }
+
+ data->num_keys++;
+}
+
+static void
+iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
+ struct ieee80211_key_seq *seq, u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ BUILD_BUG_ON(sizeof(seq->aes_gmac.pn) != sizeof(key->ipn));
+ memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
+ memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void
+iwl_mvm_d3_update_igtk_bigtk(struct iwl_wowlan_status_data *status,
+ struct ieee80211_key_conf *key,
+ struct iwl_multicast_key_data *key_data)
+{
+ struct ieee80211_key_seq seq;
+
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, key->cipher);
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+}
+
+static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_d3_gtk_iter_data *data = _data;
+ struct iwl_wowlan_status_data *status = data->status;
+ s8 keyidx;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
+
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (sta) {
+ atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn);
+ iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key);
+ return;
+ }
+ fallthrough;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn);
+ iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq);
+ return;
+ }
+ keyidx = key->keyidx;
+ /*
+ * Update the seq even if there was a rekey. If there was a
+ * rekey, we will update again after replacing the key
+ */
+ if ((status->gtk[0].len && keyidx == status->gtk[0].id) ||
+ (status->gtk[1].len && keyidx == status->gtk[1].id))
+ iwl_mvm_set_key_rx_seq(key, status);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ iwl_mvm_d3_update_igtk_bigtk(status, key,
+ &status->igtk);
+ }
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ u8 idx = key->keyidx == status->bigtk[1].id;
+
+ iwl_mvm_d3_update_igtk_bigtk(status, key,
+ &status->bigtk[idx]);
+ }
+ }
+}
+
+static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm *mvm, u32 gtk_cipher)
+{
+ int i, j;
+ struct ieee80211_key_conf *key;
+ DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
+ WOWLAN_KEY_MAX_SIZE);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ u8 key_data[WOWLAN_KEY_MAX_SIZE];
+
+ conf->cipher = gtk_cipher;
+
+ BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_CCMP);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_GCMP_256);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_TKIP);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(status->gtk[0].key));
+
+ switch (gtk_cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ conf->keylen = WLAN_KEY_LEN_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conf->keylen = WLAN_KEY_LEN_GCMP_256;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf->keylen = WLAN_KEY_LEN_TKIP;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(status->gtk); i++) {
+ if (!status->gtk[i].len)
+ continue;
+
+ conf->keyidx = status->gtk[i].id;
+ IWL_DEBUG_WOWLAN(mvm,
+ "Received from FW GTK cipher %d, key index %d\n",
+ conf->cipher, conf->keyidx);
+ memcpy(conf->key, status->gtk[i].key,
+ sizeof(status->gtk[i].key));
+ memcpy(key_data, status->gtk[i].key, sizeof(status->gtk[i].key));
+
+ key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id, key_data,
+ sizeof(key_data), link_id);
+ if (IS_ERR(key)) {
+ /* FW may send also the old keys */
+ if (PTR_ERR(key) == -EALREADY)
+ continue;
+ return false;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) {
+ if (!status->gtk_seq[j].valid ||
+ status->gtk_seq[j].key_id != key->keyidx)
+ continue;
+ iwl_mvm_set_key_rx_seq_idx(key, status, j);
+ break;
+ }
+ WARN_ON(j == ARRAY_SIZE(status->gtk_seq));
+ }
+
+ return true;
+}
+
+static bool
+iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
+ struct ieee80211_vif *vif, u32 cipher,
+ struct iwl_multicast_key_data *key_data)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
+ WOWLAN_KEY_MAX_SIZE);
+ struct ieee80211_key_conf *key_config;
+ struct ieee80211_key_seq seq;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ s8 keyidx = key_data->id;
+
+ conf->cipher = cipher;
+ conf->keyidx = keyidx;
+
+ if (!key_data->len)
+ return true;
+
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf->cipher);
+
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ conf->keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ conf->keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ conf->keylen = WLAN_KEY_LEN_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ conf->keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(key_data->key));
+ memcpy(conf->key, key_data->key, conf->keylen);
+
+ memcpy(key, key_data->key, sizeof(key_data->key));
+
+ key_config = ieee80211_gtk_rekey_add(vif, keyidx, key, sizeof(key),
+ link_id);
+ if (IS_ERR(key_config)) {
+ /* FW may send also the old keys */
+ return PTR_ERR(key_config) == -EALREADY;
+ }
+ ieee80211_set_key_rx_seq(key_config, 0, &seq);
+
+ if (keyidx == 4 || keyidx == 5) {
+ struct iwl_mvm_vif_link_info *mvm_link;
+
+ link_id = link_id < 0 ? 0 : link_id;
+ mvm_link = mvmvif->link[link_id];
+ if (mvm_link->igtk)
+ mvm_link->igtk->hw_key_idx = STA_KEY_IDX_INVALID;
+ mvm_link->igtk = key_config;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION && (keyidx == 6 || keyidx == 7))
+ rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6],
+ key_config);
+
+ return true;
+}
+
+static int iwl_mvm_lookup_wowlan_status_ver(struct iwl_mvm *mvm)
+{
+ u8 notif_ver;
+
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL))
+ return 6;
+
+ /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_GET_STATUSES, 0);
+ if (!notif_ver)
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ WOWLAN_GET_STATUSES, 7);
+
+ return notif_ver;
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+ .mvm = mvm,
+ .status = status,
+ };
+ int i;
+ u32 disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+
+ if (!status || !vif->bss_conf.bssid)
+ return false;
+
+ if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ 0))
+ gtkdata.igtk_support = true;
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ 0) >= 3)
+ gtkdata.bigtk_support = true;
+
+ /* find last GTK that we used initially, if any */
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_find_last_keys, &gtkdata);
+ /* not trying to keep connections with MFP/unhandled ciphers */
+ if (gtkdata.unhandled_cipher)
+ return false;
+ if (!gtkdata.num_keys)
+ goto out;
+
+ /*
+ * invalidate all other GTKs that might still exist and update
+ * the one that we used
+ */
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_d3_update_keys, &gtkdata);
+
+ if (status->num_of_gtk_rekeys) {
+ __be64 replay_ctr = cpu_to_be64(status->replay_ctr);
+
+ IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
+ status->num_of_gtk_rekeys);
+
+ if (!iwl_mvm_gtk_rekey(status, vif, mvm, gtkdata.gtk_cipher))
+ return false;
+
+ if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
+ gtkdata.igtk_cipher,
+ &status->igtk))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) {
+ if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
+ gtkdata.bigtk_cipher,
+ &status->bigtk[i]))
+ return false;
+ }
+
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ }
+
+out:
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_GET_STATUSES,
+ IWL_FW_CMD_VER_UNKNOWN) < 10) {
+ mvmvif->seqno_valid = true;
+ /* +0x10 because the set API expects next-to-use, not last-used */
+ mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
+ }
+
+ if (status->wakeup_reasons & disconnection_reasons)
+ return false;
+
+ return true;
+}
+
+static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_gtk_status_v2 *data)
+{
+ BUILD_BUG_ON(sizeof(status->gtk[0].key) < sizeof(data->key));
+ BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY +
+ sizeof(data->tkip_mic_key) >
+ sizeof(status->gtk[0].key));
+
+ status->gtk[0].len = data->key_len;
+ status->gtk[0].flags = data->key_flags;
+ status->gtk[0].id = status->gtk[0].flags & IWL_WOWLAN_GTK_IDX_MASK;
+
+ memcpy(status->gtk[0].key, data->key, sizeof(data->key));
+
+ /* if it's as long as the TKIP encryption key, copy MIC key */
+ if (status->gtk[0].len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(status->gtk[0].key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ data->tkip_mic_key, sizeof(data->tkip_mic_key));
+}
+
+static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_gtk_status_v3 *data)
+{
+ int data_idx, status_idx = 0;
+
+ BUILD_BUG_ON(sizeof(status->gtk[0].key) < sizeof(data[0].key));
+ BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY +
+ sizeof(data[0].tkip_mic_key) >
+ sizeof(status->gtk[0].key));
+ BUILD_BUG_ON(ARRAY_SIZE(status->gtk) < WOWLAN_GTK_KEYS_NUM);
+ for (data_idx = 0; data_idx < ARRAY_SIZE(status->gtk); data_idx++) {
+ if (!(data[data_idx].key_len))
+ continue;
+ status->gtk[status_idx].len = data[data_idx].key_len;
+ status->gtk[status_idx].flags = data[data_idx].key_flags;
+ status->gtk[status_idx].id = status->gtk[status_idx].flags &
+ IWL_WOWLAN_GTK_IDX_MASK;
+
+ memcpy(status->gtk[status_idx].key, data[data_idx].key,
+ sizeof(data[data_idx].key));
+
+ /* if it's as long as the TKIP encryption key, copy MIC key */
+ if (status->gtk[status_idx].len ==
+ NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(status->gtk[status_idx].key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ data[data_idx].tkip_mic_key,
+ sizeof(data[data_idx].tkip_mic_key));
+ status_idx++;
+ }
+}
+
+static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_igtk_status *data)
+{
+ int i;
+
+ BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
+
+ if (!data->key_len)
+ return;
+
+ status->igtk.len = data->key_len;
+ status->igtk.flags = data->key_flags;
+ status->igtk.id = u32_get_bits(data->key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK)
+ + WOWLAN_IGTK_MIN_INDEX;
+
+ memcpy(status->igtk.key, data->key, sizeof(data->key));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
+}
+
+static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
+ const struct iwl_wowlan_igtk_status *data)
+{
+ int data_idx, status_idx = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(status->bigtk) < WOWLAN_BIGTK_KEYS_NUM);
+
+ for (data_idx = 0; data_idx < WOWLAN_BIGTK_KEYS_NUM; data_idx++) {
+ if (!data[data_idx].key_len)
+ continue;
+
+ status->bigtk[status_idx].len = data[data_idx].key_len;
+ status->bigtk[status_idx].flags = data[data_idx].key_flags;
+ status->bigtk[status_idx].id =
+ u32_get_bits(data[data_idx].key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK)
+ + WOWLAN_BIGTK_MIN_INDEX;
+
+ BUILD_BUG_ON(sizeof(status->bigtk[status_idx].key) <
+ sizeof(data[data_idx].key));
+ BUILD_BUG_ON(sizeof(status->bigtk[status_idx].ipn) <
+ sizeof(data[data_idx].ipn));
+
+ memcpy(status->bigtk[status_idx].key, data[data_idx].key,
+ sizeof(data[data_idx].key));
+ memcpy(status->bigtk[status_idx].ipn, data[data_idx].ipn,
+ sizeof(data[data_idx].ipn));
+ status_idx++;
+ }
+}
+
+static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ if (IWL_FW_CHECK(mvm, data->num_mlo_link_keys,
+ "MLO is not supported, shouldn't receive MLO keys\n"))
+ return;
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ if (mvm->fast_resume)
+ return;
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, data->gtk);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
+ iwl_mvm_convert_bigtk(status, data->bigtk);
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ status->tid_offloaded_tx = data->tid_offloaded_tx;
+ if (IWL_FW_CHECK(mvm,
+ data->tid_offloaded_tx >=
+ ARRAY_SIZE(status->qos_seq_ctr),
+ "tid_offloaded_tx is out of bound %d\n",
+ data->tid_offloaded_tx))
+ data->tid_offloaded_tx = 0;
+ status->qos_seq_ctr[data->tid_offloaded_tx] =
+ le16_to_cpu(data->qos_seq_ctr);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+}
+
+static void
+iwl_mvm_parse_wowlan_info_notif_v3(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif_v3 *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 i;
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ if (mvm->fast_resume)
+ return;
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, data->gtk);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
+ iwl_mvm_convert_bigtk(status, data->bigtk);
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ status->qos_seq_ctr[i] =
+ le16_to_cpu(data->qos_seq_ctr[i]);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+}
+
+static void
+iwl_mvm_parse_wowlan_info_notif_v1(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif_v1 *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 i;
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, data->gtk);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ status->qos_seq_ctr[i] =
+ le16_to_cpu(data->qos_seq_ctr[i]);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+}
+
+/* Occasionally, templates would be nice. This is one of those times ... */
+#define iwl_mvm_parse_wowlan_status_common(_ver) \
+static struct iwl_wowlan_status_data * \
+iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
+ struct iwl_wowlan_status_ ##_ver *data,\
+ int len) \
+{ \
+ struct iwl_wowlan_status_data *status; \
+ int data_size, i; \
+ \
+ if (len < sizeof(*data)) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
+ if (len != sizeof(*data) + data_size) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ status = kzalloc(sizeof(*status), GFP_KERNEL); \
+ if (!status) \
+ return NULL; \
+ \
+ /* copy all the common fields */ \
+ status->replay_ctr = le64_to_cpu(data->replay_ctr); \
+ status->pattern_number = le16_to_cpu(data->pattern_number); \
+ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
+ for (i = 0; i < 8; i++) \
+ status->qos_seq_ctr[i] = \
+ le16_to_cpu(data->qos_seq_ctr[i]); \
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
+ status->num_of_gtk_rekeys = \
+ le32_to_cpu(data->num_of_gtk_rekeys); \
+ status->received_beacons = le32_to_cpu(data->received_beacons); \
+ status->wake_packet_length = \
+ le32_to_cpu(data->wake_packet_length); \
+ status->wake_packet_bufsize = \
+ le32_to_cpu(data->wake_packet_bufsize); \
+ if (status->wake_packet_bufsize) { \
+ status->wake_packet = \
+ kmemdup(data->wake_packet, \
+ status->wake_packet_bufsize, \
+ GFP_KERNEL); \
+ if (!status->wake_packet) { \
+ kfree(status); \
+ return NULL; \
+ } \
+ } else { \
+ status->wake_packet = NULL; \
+ } \
+ \
+ return status; \
+}
+
+iwl_mvm_parse_wowlan_status_common(v6)
+iwl_mvm_parse_wowlan_status_common(v7)
+
+static struct iwl_wowlan_status_data *
+iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct iwl_wowlan_status_data *status;
+ struct iwl_wowlan_get_status_cmd get_status_cmd = {
+ .sta_id = cpu_to_le32(sta_id),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_WANT_SKB,
+ .data = { &get_status_cmd, },
+ .len = { sizeof(get_status_cmd), },
+ };
+ int ret, len;
+ u8 notif_ver;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ cmd.len[0] = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+ /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
+ notif_ver = iwl_mvm_lookup_wowlan_status_ver(mvm);
+
+ if (notif_ver < 7) {
+ struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
+
+ status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len);
+ if (!status)
+ goto out_free_resp;
+
+ BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
+ sizeof(status->gtk[0].key));
+ BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY +
+ sizeof(v6->gtk.tkip_mic_key) >
+ sizeof(status->gtk[0].key));
+
+ /* copy GTK info to the right place */
+ memcpy(status->gtk[0].key, v6->gtk.decrypt_key,
+ sizeof(v6->gtk.decrypt_key));
+ memcpy(status->gtk[0].key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ v6->gtk.tkip_mic_key,
+ sizeof(v6->gtk.tkip_mic_key));
+
+ iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc,
+ v6->gtk.key_index);
+
+ /* hardcode the key length to 16 since v6 only supports 16 */
+ status->gtk[0].len = 16;
+
+ /*
+ * The key index only uses 2 bits (values 0 to 3) and
+ * we always set bit 7 which means this is the
+ * currently used key.
+ */
+ status->gtk[0].flags = v6->gtk.key_index | BIT(7);
+ status->gtk[0].id = v6->gtk.key_index;
+ } else if (notif_ver == 7) {
+ struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
+
+ status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len);
+ if (!status)
+ goto out_free_resp;
+
+ iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc,
+ v7->gtk[0].key_flags & IWL_WOWLAN_GTK_IDX_MASK);
+ iwl_mvm_convert_gtk_v2(status, &v7->gtk[0]);
+ iwl_mvm_convert_igtk(status, &v7->igtk[0]);
+ } else {
+ IWL_ERR(mvm,
+ "Firmware advertises unknown WoWLAN status response %d!\n",
+ notif_ver);
+ status = NULL;
+ }
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return status;
+}
+
+/* releases the MVM mutex */
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
+{
+ int i;
+ bool keep = false;
+ struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
+ int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw,
+ PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (WARN_ON(!mvm_link))
+ goto out_unlock;
+
+ if (!status)
+ goto out_unlock;
+
+ IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
+ status->wakeup_reasons);
+
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id);
+ if (!mvm_ap_sta)
+ goto out_unlock;
+
+ /* firmware stores last-used value, we store next value */
+ if (wowlan_info_ver >= 5) {
+ mvm_ap_sta->tid_data[status->tid_offloaded_tx].seq_number =
+ status->qos_seq_ctr[status->tid_offloaded_tx] + 0x10;
+ } else {
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ mvm_ap_sta->tid_data[i].seq_number =
+ status->qos_seq_ctr[i] + 0x10;
+ }
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ i = mvm->offload_tid;
+ iwl_trans_set_q_ptrs(mvm->trans,
+ mvm_ap_sta->tid_data[i].txq_id,
+ mvm_ap_sta->tid_data[i].seq_number >> 4);
+ }
+
+ iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ return keep;
+}
+
+#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
+ IWL_SCAN_MAX_PROFILES)
+
+struct iwl_mvm_nd_results {
+ u32 matched_profiles;
+ u8 matches[ND_QUERY_BUF_LEN];
+};
+
+static int
+iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_results *results)
+{
+ struct iwl_scan_offload_match_info *query;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, len;
+ size_t query_len, matches_len;
+ int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
+ return ret;
+ }
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ query_len = sizeof(struct iwl_scan_offload_match_info);
+ matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ max_profiles;
+ } else {
+ query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
+ matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
+ max_profiles;
+ }
+
+ len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (len < query_len) {
+ IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ query = (void *)cmd.resp_pkt->data;
+
+ results->matched_profiles = le32_to_cpu(query->matched_profiles);
+ memcpy(results->matches, query->matches, matches_len);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
+#endif
+
+out_free_resp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_results *results,
+ int idx)
+{
+ int n_chans = 0, i;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ struct iwl_scan_offload_profile_match *matches =
+ (void *)results->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
+ n_chans += hweight8(matches[idx].matching_channels[i]);
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+ (void *)results->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
+ n_chans += hweight8(matches[idx].matching_channels[i]);
+ }
+
+ return n_chans;
+}
+
+static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_results *results,
+ struct cfg80211_wowlan_nd_match *match,
+ int idx)
+{
+ int i;
+ int n_channels = 0;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ struct iwl_scan_offload_profile_match *matches =
+ (void *)results->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+ match->channels[n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+ (void *)results->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+ match->channels[n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ }
+ /* We may have ended up with fewer channels than we allocated. */
+ match->n_channels = n_channels;
+}
+
+/**
+ * enum iwl_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF was received
+ */
+enum iwl_d3_notif {
+ IWL_D3_NOTIF_WOWLAN_INFO = BIT(0),
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1),
+ IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2),
+ IWL_D3_ND_MATCH_INFO = BIT(3),
+ IWL_D3_NOTIF_D3_END_NOTIF = BIT(4)
+};
+
+/* manage d3 resume data */
+struct iwl_d3_data {
+ struct iwl_wowlan_status_data *status;
+ bool test;
+ u32 d3_end_flags;
+ u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
+ u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
+ struct iwl_mvm_nd_results *nd_results;
+ bool nd_results_valid;
+};
+
+static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
+{
+ struct cfg80211_wowlan_nd_info *net_detect = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ unsigned long matched_profiles;
+ u32 reasons = 0;
+ int i, n_matches, ret;
+
+ if (WARN_ON(!d3_data || !d3_data->status))
+ goto out;
+
+ reasons = d3_data->status->wakeup_reasons;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+ goto out;
+
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0)) {
+ IWL_INFO(mvm, "Query FW for ND results\n");
+ ret = iwl_mvm_netdetect_query_results(mvm, d3_data->nd_results);
+
+ } else {
+ IWL_INFO(mvm, "Notification based ND results\n");
+ ret = d3_data->nd_results_valid ? 0 : -1;
+ }
+
+ if (ret || !d3_data->nd_results->matched_profiles) {
+ wakeup_report = NULL;
+ goto out;
+ }
+
+ matched_profiles = d3_data->nd_results->matched_profiles;
+ if (mvm->n_nd_match_sets) {
+ n_matches = hweight_long(matched_profiles);
+ } else {
+ IWL_ERR(mvm, "no net detect match information available\n");
+ n_matches = 0;
+ }
+
+ net_detect = kzalloc(struct_size(net_detect, matches, n_matches),
+ GFP_KERNEL);
+ if (!net_detect || !n_matches)
+ goto out_report_nd;
+ net_detect->n_matches = n_matches;
+ n_matches = 0;
+
+ for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ struct cfg80211_wowlan_nd_match *match;
+ int idx, n_channels = 0;
+
+ n_channels = iwl_mvm_query_num_match_chans(mvm,
+ d3_data->nd_results,
+ i);
+
+ match = kzalloc(struct_size(match, channels, n_channels),
+ GFP_KERNEL);
+ if (!match)
+ goto out_report_nd;
+ match->n_channels = n_channels;
+
+ net_detect->matches[n_matches++] = match;
+
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+ */
+ idx = mvm->n_nd_match_sets - i - 1;
+ match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+ memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
+ match->ssid.ssid_len);
+
+ if (mvm->n_nd_channels < n_channels)
+ continue;
+
+ iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
+ }
+ /* We may have fewer matches than we allocated. */
+ net_detect->n_matches = n_matches;
+
+out_report_nd:
+ wakeup.net_detect = net_detect;
+out:
+ iwl_mvm_free_nd(mvm);
+
+ mutex_unlock(&mvm->mutex);
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+
+ if (net_detect) {
+ for (i = 0; i < net_detect->n_matches; i++)
+ kfree(net_detect->matches[i]);
+ kfree(net_detect);
+ }
+}
+
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_resume_disconnect(vif);
+}
+
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 err_id;
+
+ /* check for lmac1 error */
+ if (iwl_fwrt_read_err_table(mvm->trans,
+ mvm->trans->dbg.lmac_error_event_table[0],
+ &err_id)) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ IWL_WARN(mvm, "Rfkill was toggled during suspend\n");
+ if (vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+
+ return FW_NEEDS_RESET;
+ }
+ return FW_ERROR;
+ }
+
+ /* check if we have lmac2 set and check for error */
+ if (iwl_fwrt_read_err_table(mvm->trans,
+ mvm->trans->dbg.lmac_error_event_table[1],
+ NULL))
+ return FW_ERROR;
+
+ /* check for umac error */
+ if (iwl_fwrt_read_err_table(mvm->trans,
+ mvm->trans->dbg.umac_error_event_table,
+ NULL))
+ return FW_ERROR;
+
+ return FW_ALIVE;
+}
+
+/*
+ * This function assumes:
+ * 1. The mutex is already held.
+ * 2. The callee functions unlock the mutex.
+ */
+static bool
+iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* if FW uses status notification, status shouldn't be NULL here */
+ if (!d3_data->status) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 sta_id = mvm->net_detect ? IWL_INVALID_STA :
+ mvmvif->deflink.ap_sta_id;
+
+ /* bug - FW with MLO has status notification */
+ WARN_ON(ieee80211_vif_is_mld(vif));
+
+ d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
+ }
+
+ if (mvm->net_detect) {
+ iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
+ } else {
+ bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
+
+ return keep;
+ }
+ return false;
+}
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+ IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+static int iwl_mvm_wowlan_store_wake_pkt(struct iwl_mvm *mvm,
+ struct iwl_wowlan_wake_pkt_notif *notif,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 data_size, packet_len = le32_to_cpu(notif->wake_packet_length);
+
+ if (len < sizeof(*notif)) {
+ IWL_ERR(mvm, "Invalid WoWLAN wake packet notification!\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!status)) {
+ IWL_ERR(mvm, "Got wake packet notification but wowlan status data is NULL\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!(status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT))) {
+ IWL_ERR(mvm, "Got wakeup packet but wakeup reason is %x\n",
+ status->wakeup_reasons);
+ return -EIO;
+ }
+
+ data_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, wake_packet);
+
+ /* data_size got the padding from the notification, remove it. */
+ if (packet_len < data_size)
+ data_size = packet_len;
+
+ status->wake_packet = kmemdup(notif->wake_packet, data_size,
+ GFP_ATOMIC);
+
+ if (!status->wake_packet)
+ return -ENOMEM;
+
+ status->wake_packet_length = packet_len;
+ status->wake_packet_bufsize = data_size;
+
+ return 0;
+}
+
+static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data,
+ struct iwl_scan_offload_match_info *notif,
+ u32 len)
+{
+ struct iwl_wowlan_status_data *status = d3_data->status;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_nd_results *results = d3_data->nd_results;
+ size_t i, matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ iwl_umac_scan_get_max_profiles(mvm->fw);
+
+ if (IS_ERR_OR_NULL(vif))
+ return;
+
+ if (len < sizeof(struct iwl_scan_offload_match_info)) {
+ IWL_ERR(mvm, "Invalid scan match info notification\n");
+ return;
+ }
+
+ if (!mvm->net_detect) {
+ IWL_ERR(mvm, "Unexpected scan match info notification\n");
+ return;
+ }
+
+ if (!status || status->wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ IWL_ERR(mvm,
+ "Ignore scan match info notification: no reason\n");
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(notif->n_scans_done);
+#endif
+
+ results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+ IWL_INFO(mvm, "number of matched profiles=%u\n",
+ results->matched_profiles);
+
+ if (results->matched_profiles) {
+ memcpy(results->matches, notif->matches, matches_len);
+ d3_data->nd_results_valid = true;
+ }
+
+ /* no scan should be active at this point */
+ mvm->scan_status = 0;
+ for (i = 0; i < mvm->max_scans; i++)
+ mvm->scan_uid_status[i] = 0;
+}
+
+static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_d3_data *d3_data = data;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+ int ret;
+ int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw,
+ PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+
+ switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ /* We might get two notifications due to dual bss */
+ IWL_DEBUG_WOWLAN(mvm,
+ "Got additional wowlan info notification\n");
+ break;
+ }
+
+ if (wowlan_info_ver == 1) {
+ struct iwl_wowlan_info_notif_v1 *notif_v1 =
+ (void *)pkt->data;
+
+ iwl_mvm_parse_wowlan_info_notif_v1(mvm, notif_v1,
+ d3_data->status,
+ len);
+ } else if (wowlan_info_ver == 3) {
+ struct iwl_wowlan_info_notif_v3 *notif =
+ (void *)pkt->data;
+
+ iwl_mvm_parse_wowlan_info_notif_v3(mvm, notif,
+ d3_data->status, len);
+ } else if (wowlan_info_ver == 5) {
+ struct iwl_wowlan_info_notif *notif =
+ (void *)pkt->data;
+
+ iwl_mvm_parse_wowlan_info_notif(mvm, notif,
+ d3_data->status, len);
+ } else {
+ IWL_FW_CHECK(mvm, 1,
+ "Firmware advertises unknown WoWLAN info notification %d!\n",
+ wowlan_info_ver);
+ return false;
+ }
+
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+
+ if (d3_data->status &&
+ d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ /* We are supposed to get also wake packet notif */
+ d3_data->notif_expected |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+ struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+ /* We shouldn't get two wake packet notifications */
+ IWL_ERR(mvm,
+ "Got additional wowlan wake packet notification\n");
+ } else {
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ len = iwl_rx_packet_payload_len(pkt);
+ ret = iwl_mvm_wowlan_store_wake_pkt(mvm, notif,
+ d3_data->status,
+ len);
+ if (ret)
+ IWL_ERR(mvm,
+ "Can't parse WOWLAN_WAKE_PKT_NOTIFICATION\n");
+ }
+
+ break;
+ }
+ case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+ struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_ND_MATCH_INFO) {
+ IWL_ERR(mvm,
+ "Got additional netdetect match info\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_ND_MATCH_INFO;
+
+ /* explicitly set this in the 'expected' as well */
+ d3_data->notif_expected |= IWL_D3_ND_MATCH_INFO;
+
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_nd_match_info_handler(mvm, d3_data, notif, len);
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+ struct iwl_d3_end_notif *notif = (void *)pkt->data;
+
+ d3_data->d3_end_flags = __le32_to_cpu(notif->flags);
+ d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return d3_data->notif_received == d3_data->notif_expected;
+}
+
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+{
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+ bool reset = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ if (ret)
+ return ret;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ return -ENOENT;
+ }
+
+ /*
+ * We should trigger resume flow using command only for 22000 family
+ * AX210 and above don't need the command since they have
+ * the doorbell interrupt.
+ */
+ if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
+ fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) {
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3)
+
+static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data)
+{
+ static const u16 d3_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+ WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ static const u16 d3_fast_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ struct iwl_notification_wait wait_d3_notif;
+ int ret;
+
+ if (mvm->fast_resume)
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_fast_resume_notif,
+ ARRAY_SIZE(d3_fast_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+ else
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif,
+ ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+
+ ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
+ return ret;
+ }
+
+ return iwl_wait_notification(&mvm->notif_wait, &wait_d3_notif,
+ IWL_MVM_D3_NOTIF_TIMEOUT);
+}
+
+static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_WAKE_PKT_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0);
+}
+
+static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+{
+ struct ieee80211_vif *vif = NULL;
+ int ret = 1;
+ struct iwl_mvm_nd_results results = {};
+ struct iwl_d3_data d3_data = {
+ .test = test,
+ .notif_expected =
+ IWL_D3_NOTIF_WOWLAN_INFO |
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ .nd_results_valid = false,
+ .nd_results = &results,
+ };
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+ bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+ bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ enum rt_status rt_status;
+ bool keep = false;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Apparently, the device went away and device_powered_off() was called,
+ * don't even try to read the rt_status, the device is currently
+ * inaccessible.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ IWL_INFO(mvm,
+ "Can't resume, device_powered_off() was called during wowlan\n");
+ goto err;
+ }
+
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+
+ /* get the BSS vif pointer again */
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ goto err;
+
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
+ rt_status = iwl_mvm_check_rt_status(mvm, vif);
+ if (rt_status != FW_ALIVE) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
+ ret = 1;
+ goto err;
+ }
+
+ if (resume_notif_based) {
+ d3_data.status = kzalloc(sizeof(*d3_data.status), GFP_KERNEL);
+ if (!d3_data.status) {
+ IWL_ERR(mvm, "Failed to allocate wowlan status\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ if (ret)
+ goto err;
+ } else {
+ ret = iwl_mvm_resume_firmware(mvm, test);
+ if (ret < 0)
+ goto err;
+ }
+
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+
+ /* when reset is required we can't send these following commands */
+ if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
+ goto query_wakeup_reasons;
+
+ /*
+ * Query the current location and source from the D3 firmware so we
+ * can play it back when we re-intiailize the D0 firmware
+ */
+ iwl_mvm_update_changed_regdom(mvm);
+
+ /* Re-configure PPAG settings */
+ iwl_mvm_ppag_send_cmd(mvm);
+
+ if (!unified_image)
+ /* Re-configure default SAR profile */
+ iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+ if (mvm->net_detect && unified_image) {
+ /* If this is a non-unified image, we restart the FW,
+ * so no need to stop the netdetect scan. If that
+ * fails, continue and try to get the wake-up reasons,
+ * but trigger a HW restart by keeping a failure code
+ * in ret.
+ */
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
+ }
+
+query_wakeup_reasons:
+ keep = iwl_mvm_choose_query_wakeup_reasons(mvm, vif, &d3_data);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+
+err:
+ mutex_unlock(&mvm->mutex);
+out:
+ if (d3_data.status)
+ kfree(d3_data.status->wake_packet);
+ kfree(d3_data.status);
+ iwl_mvm_free_nd(mvm);
+
+ if (!d3_data.test && !mvm->net_detect)
+ ieee80211_iterate_active_interfaces_mtx(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter,
+ keep ? vif : NULL);
+
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
+ /* no need to reset the device in unified images, if successful */
+ if (unified_image && !ret) {
+ /* nothing else to do if we already sent D0I3_END_CMD */
+ if (d0i3_first)
+ return 0;
+
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0)) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (!ret)
+ return 0;
+ } else if (!(d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) {
+ return 0;
+ }
+ }
+
+ /*
+ * Reconfigure the device in one of the following cases:
+ * 1. We are not using a unified image
+ * 2. We are using a unified image but had an error while exiting D3
+ */
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+
+ return 1;
+}
+
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ ret = __iwl_mvm_resume(mvm, false);
+
+ iwl_mvm_resume_tcm(mvm);
+
+ iwl_fw_runtime_resume(&mvm->fwrt);
+
+ return ret;
+}
+
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ device_set_wakeup_enable(mvm->trans->dev, enabled);
+}
+
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n");
+
+ mvm->fast_resume = true;
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
+ WARN_ON(iwl_mvm_power_update_device(mvm));
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0,
+ sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
+ if (ret)
+ IWL_ERR(mvm,
+ "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+
+ ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ if (ret)
+ IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
+}
+
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_data d3_data = {
+ .notif_expected =
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ };
+ enum rt_status rt_status;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n");
+
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
+ rt_status = iwl_mvm_check_rt_status(mvm, NULL);
+ if (rt_status != FW_ALIVE) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm,
+ "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
+ mvm->trans->state = IWL_TRANS_NO_FW;
+ ret = -ENODEV;
+
+ goto out;
+ }
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
+ mvm->trans->state = IWL_TRANS_NO_FW;
+ }
+
+out:
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->fast_resume = false;
+
+ return ret;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ int err;
+
+ if (mvm->d3_test_active)
+ return -EBUSY;
+
+ file->private_data = inode->i_private;
+
+ iwl_mvm_pause_tcm(mvm, true);
+
+ iwl_fw_runtime_suspend(&mvm->fwrt);
+
+ /* start pseudo D3 */
+ rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
+ err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+ wiphy_unlock(mvm->hw->wiphy);
+ rtnl_unlock();
+ if (err > 0)
+ err = -EINVAL;
+ if (err)
+ return err;
+
+ mvm->d3_test_active = true;
+ mvm->keep_vif = NULL;
+ return 0;
+}
+
+static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ unsigned long end = jiffies + 60 * HZ;
+ u32 pme_asserted;
+
+ while (true) {
+ /* read pme_ptr if available */
+ if (mvm->d3_test_pme_ptr) {
+ pme_asserted = iwl_trans_read_mem32(mvm->trans,
+ mvm->d3_test_pme_ptr);
+ if (pme_asserted)
+ break;
+ }
+
+#if defined(__linux__)
+ if (msleep_interruptible(100))
+#elif defined(__FreeBSD__)
+ if (linux_msleep_interruptible(100))
+#endif
+ break;
+
+ if (time_is_before_jiffies(end)) {
+ IWL_ERR(mvm,
+ "ending pseudo-D3 with timeout after ~60 seconds\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ /* skip the one we keep connection on */
+ if (_data == vif)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_connection_loss(vif);
+}
+
+static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+{
+ struct iwl_mvm *mvm = inode->i_private;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ mvm->d3_test_active = false;
+
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
+ rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
+ __iwl_mvm_resume(mvm, true);
+ wiphy_unlock(mvm->hw->wiphy);
+ rtnl_unlock();
+
+ iwl_mvm_resume_tcm(mvm);
+
+ iwl_fw_runtime_resume(&mvm->fwrt);
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+ if (!unified_image) {
+ int remaining_time = 10;
+
+ ieee80211_restart_hw(mvm->hw);
+
+ /* wait for restart and disconnect all interfaces */
+ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ remaining_time > 0) {
+ remaining_time--;
+#if defined(__linux__)
+ msleep(1000);
+#elif defined(__FreeBSD__)
+ linux_msleep(1000);
+#endif
+ }
+
+ if (remaining_time == 0)
+ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
+
+ return 0;
+}
+
+const struct file_operations iwl_dbgfs_d3_test_ops = {
+ .open = iwl_mvm_d3_test_open,
+ .read = iwl_mvm_d3_test_read,
+ .release = iwl_mvm_d3_test_release,
+};
+#endif
diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c b/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 000000000000..f1303440d3dd
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,1016 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include "mvm.h"
+#include "debugfs.h"
+#if defined(__FreeBSD__)
+#include <linux/math64.h>
+#endif
+
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_dbgfs_pm_mask param, int val)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+ dbgfs_pm->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+ int dtimper = vif->bss_conf.dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+ if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+ IWL_WARN(mvm,
+ "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+ val * MSEC_PER_SEC, 3 * dtimper_msec);
+ dbgfs_pm->keep_alive_seconds = val;
+ break;
+ }
+ case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+ IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+ val ? "enabled" : "disabled");
+ dbgfs_pm->skip_over_dtim = val;
+ break;
+ case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+ IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+ dbgfs_pm->skip_dtim_periods = val;
+ break;
+ case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+ dbgfs_pm->rx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+ IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+ dbgfs_pm->tx_data_timeout = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_ENA:
+ IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+ dbgfs_pm->lprx_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+ IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+ dbgfs_pm->lprx_rssi_threshold = val;
+ break;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ break;
+ case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+ IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+ dbgfs_pm->uapsd_misbehaving = val;
+ break;
+ case MVM_DEBUGFS_PM_USE_PS_POLL:
+ IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
+ dbgfs_pm->use_ps_poll = val;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_pm_mask param;
+ int val, ret;
+
+ if (!strncmp("keep_alive=", buf, 11)) {
+ if (sscanf(buf + 11, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+ } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+ if (sscanf(buf + 15, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+ } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+ } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+ } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+ if (sscanf(buf + 16, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+ } else if (!strncmp("lprx=", buf, 5)) {
+ if (sscanf(buf + 5, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_ENA;
+ } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+ if (sscanf(buf + 20, "%d", &val) != 1)
+ return -EINVAL;
+ if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+ POWER_LPRX_RSSI_THRESHOLD_MIN)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+ } else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+ if (sscanf(buf + 18, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+ } else if (!strncmp("use_ps_poll=", buf, 12)) {
+ if (sscanf(buf + 12, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_USE_PS_POLL;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_pm(mvm, vif, param, val);
+ ret = iwl_mvm_power_update_mac(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+ vif->bss_conf.txpower);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 ap_sta_id;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ char buf[512];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ ap_sta_id = mvmvif->deflink.ap_sta_id;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+ break;
+ default:
+ break;
+ }
+
+ pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+ mvmvif->id, mvmvif->color);
+ pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+ vif->bss_conf.bssid);
+ pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
+ mvm->tcm.result.load[mvmvif->id]);
+ pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+ for (i = 0; i < ARRAY_SIZE(mvmvif->deflink.queue_params); i++)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+ i, mvmvif->deflink.queue_params[i].txop,
+ mvmvif->deflink.queue_params[i].cw_min,
+ mvmvif->deflink.queue_params[i].cw_max,
+ mvmvif->deflink.queue_params[i].aifs,
+ mvmvif->deflink.queue_params[i].uapsd);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ ap_sta_id != IWL_INVALID_STA) {
+ struct iwl_mvm_sta *mvm_sta;
+
+ mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+ if (mvm_sta) {
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "ap_sta_id %d - reduced Tx power %d\n",
+ ap_sta_id,
+ mvm_sta->bt_reduced_txpower);
+ }
+ }
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (chanctx_conf)
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "idle rx chains %d, active rx chains: %d\n",
+ chanctx_conf->rx_chains_static,
+ chanctx_conf->rx_chains_dynamic);
+ rcu_read_unlock();
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+ enum iwl_dbgfs_bf_mask param, int value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ dbgfs_bf->mask |= param;
+
+ switch (param) {
+ case MVM_DEBUGFS_BF_ENERGY_DELTA:
+ dbgfs_bf->bf_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+ dbgfs_bf->bf_roaming_energy_delta = value;
+ break;
+ case MVM_DEBUGFS_BF_ROAMING_STATE:
+ dbgfs_bf->bf_roaming_state = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+ dbgfs_bf->bf_enable_beacon_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_DEBUG_FLAG:
+ dbgfs_bf->bf_debug_flag = value;
+ break;
+ case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+ dbgfs_bf->bf_escape_timer = value;
+ break;
+ case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+ dbgfs_bf->ba_enable_beacon_abort = value;
+ break;
+ case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+ dbgfs_bf->ba_escape_timer = value;
+ break;
+ }
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_dbgfs_bf_mask param;
+ int value, ret = 0;
+
+ if (!strncmp("bf_energy_delta=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+ value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+ } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+ if (sscanf(buf+17, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ROAMING_STATE_MIN ||
+ value > IWL_BF_ROAMING_STATE_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ROAMING_STATE;
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+ } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf+24, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+ } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+ if (sscanf(buf+14, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+ } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+ value > IWL_BF_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+ } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+ if (sscanf(buf+16, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+ value > IWL_BA_ESCAPE_TIMER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+ } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+ if (sscanf(buf+23, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ iwl_dbgfs_update_bf(vif, param, value);
+ if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ else
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+ };
+
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+ if (mvmvif->bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+ else
+ cmd.bf_enable_beacon_filter = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+ le32_to_cpu(cmd.bf_debug_flag));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+ le32_to_cpu(cmd.bf_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+ le32_to_cpu(cmd.ba_escape_timer));
+ pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 curr_gp2;
+ u64 curr_os;
+ s64 diff;
+ char buf[64];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL);
+ mutex_unlock(&mvm->mutex);
+
+ do_div(curr_os, NSEC_PER_USEC);
+ diff = curr_os - curr_gp2;
+ pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_low_latency_write_handle(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t count, void *data)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *vif = data;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[10] = {};
+
+ return wiphy_locked_debugfs_write(mvm->hw->wiphy, file,
+ buf, sizeof(buf), user_buf, count,
+ iwl_dbgfs_low_latency_write_handle,
+ vif);
+}
+
+static ssize_t
+iwl_dbgfs_low_latency_force_write_handle(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t count, void *data)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *vif = data;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > NUM_LOW_LATENCY_FORCE)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (value == LOW_LATENCY_FORCE_UNSET) {
+ iwl_mvm_update_low_latency(mvm, vif, false,
+ LOW_LATENCY_DEBUGFS_FORCE);
+ iwl_mvm_update_low_latency(mvm, vif, false,
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
+ } else {
+ iwl_mvm_update_low_latency(mvm, vif,
+ value == LOW_LATENCY_FORCE_ON,
+ LOW_LATENCY_DEBUGFS_FORCE);
+ iwl_mvm_update_low_latency(mvm, vif, true,
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE);
+ }
+ mutex_unlock(&mvm->mutex);
+ return count;
+}
+
+static ssize_t
+iwl_dbgfs_low_latency_force_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[10] = {};
+
+ return wiphy_locked_debugfs_write(mvm->hw->wiphy, file,
+ buf, sizeof(buf), user_buf, count,
+ iwl_dbgfs_low_latency_force_write_handle,
+ vif);
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n"
+ "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n";
+
+ /*
+ * all values in format are boolean so the size of format is enough
+ * for holding the result string
+ */
+ char buf[sizeof(format) + 1] = {};
+ int len;
+
+ len = scnprintf(buf, sizeof(buf) - 1, format,
+ !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
+ !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
+ !!(mvmvif->low_latency & LOW_LATENCY_VCMD),
+ !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE),
+ !!(mvmvif->low_latency &
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE),
+ !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE),
+ !!(mvmvif->low_latency_actual));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[20];
+ int len;
+
+#if defined(__linux__)
+ len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_ap_addr);
+#elif defined(__FreeBSD__)
+ len = sprintf(buf, "%6D\n", mvmvif->uapsd_misbehaving_ap_addr, ":");
+#endif
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ bool ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = mac_pton(buf, mvmvif->uapsd_misbehaving_ap_addr);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? count : -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_bss_conf *link_conf;
+ u16 value;
+ int link_id, ret = -EINVAL;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ mvm->dbgfs_rx_phyinfo = value;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def min_def, ap_def;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 chains_static, chains_dynamic;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ continue;
+ }
+ /* A command can't be sent with RCU lock held, so copy
+ * everything here and use it after unlocking
+ */
+ min_def = chanctx_conf->min_def;
+ ap_def = chanctx_conf->ap;
+ chains_static = chanctx_conf->rx_chains_static;
+ chains_dynamic = chanctx_conf->rx_chains_dynamic;
+ rcu_read_unlock();
+
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+ if (!phy_ctxt)
+ continue;
+
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def,
+ chains_static, chains_dynamic);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[8];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "0x%04x\n",
+ mvmvif->mvm->dbgfs_rx_phyinfo);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static void iwl_dbgfs_quota_check(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int *ret = data;
+
+ if (mvmvif->dbgfs_quota_min)
+ *ret = -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 95)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->dbgfs_quota_min = 0;
+ ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_dbgfs_quota_check, &ret);
+ if (ret == 0) {
+ mvmvif->dbgfs_quota_min = value;
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[10];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_max_tx_op_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ mvmvif->max_tx_op = value;
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[10];
+ int len;
+
+ mutex_lock(&mvm->mutex);
+ len = scnprintf(buf, sizeof(buf), "%hu\n", mvmvif->max_tx_op);
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 action;
+ int ret;
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ if (kstrtou32(buf, 0, &action))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!action) {
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ } else if (action == 1) {
+ ret = iwl_mvm_int_mlo_scan(mvm, vif);
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ unsigned long esr_mask;
+ char *buf;
+ int bufsz, pos, i;
+ ssize_t rv;
+
+ mutex_lock(&mvm->mutex);
+ esr_mask = mvmvif->esr_disable_reason;
+ mutex_unlock(&mvm->mutex);
+
+ bufsz = hweight32(esr_mask) * 32 + 40;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
+ esr_mask);
+ for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
+ pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
+ iwl_get_esr_state_string(BIT(i)));
+
+ rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return rv;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 reason;
+ u8 block;
+ int ret;
+
+ ret = sscanf(buf, "%u %hhu", &reason, &block);
+ if (ret < 0)
+ return ret;
+
+ if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (block)
+ iwl_mvm_block_esr(mvm, vif, reason,
+ iwl_mvm_get_primary_link(vif));
+ else
+ iwl_mvm_unblock_esr(mvm, vif, reason);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
+ debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops); \
+ } while (0)
+
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+
+static const struct file_operations iwl_dbgfs_low_latency_ops = {
+ .write = iwl_dbgfs_low_latency_write,
+ .read = iwl_dbgfs_low_latency_read,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations iwl_dbgfs_low_latency_force_ops = {
+ .write = iwl_dbgfs_low_latency_force_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
+
+void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+ if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+ dbgfs_dir);
+ return;
+ }
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+ (vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
+ MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600);
+
+ MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
+ debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
+ &mvmvif->ftm_unprotected);
+ MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvmvif == mvm->bf_allowed_vif)
+ MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
+}
+
+void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+#if defined(__linux__)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
+#endif
+
+ /* this will happen in monitor mode */
+ if (!dbgfs_dir)
+ return;
+
+#if defined(__linux__)
+ /*
+ * Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+ */
+ snprintf(name, sizeof(name), "%pd", dbgfs_dir);
+ snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir);
+
+ mvmvif->dbgfs_slink =
+ debugfs_create_symlink(name, mvm->debugfs_dir, buf);
+#endif
+}
+
+void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ debugfs_remove(mvmvif->dbgfs_slink);
+ mvmvif->dbgfs_slink = NULL;
+}
+
+#define MVM_DEBUGFS_WRITE_LINK_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, \
+ struct ieee80211_bss_conf)
+#define MVM_DEBUGFS_READ_WRITE_LINK_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(link_##name, bufsz, \
+ struct ieee80211_bss_conf)
+#define MVM_DEBUGFS_ADD_LINK_FILE(name, parent, mode) \
+ debugfs_create_file(#name, mode, parent, link_conf, \
+ &iwl_dbgfs_link_##name##_ops)
+
+static void iwl_mvm_debugfs_add_link_files(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *mvm_dir)
+{
+ /* Add per-link files here*/
+}
+
+void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct dentry *mvm_dir;
+
+ if (WARN_ON(!link_info) || !dir)
+ return;
+
+ if (dir == vif->debugfs_dir) {
+ WARN_ON(!mvmvif->dbgfs_dir);
+ mvm_dir = mvmvif->dbgfs_dir;
+ } else {
+ mvm_dir = debugfs_create_dir("iwlmvm", dir);
+ if (IS_ERR_OR_NULL(mvm_dir)) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+ dir);
+ return;
+ }
+ }
+
+ iwl_mvm_debugfs_add_link_files(vif, link_conf, mvm_dir);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs.c b/sys/contrib/dev/iwlwifi/mvm/debugfs.c
new file mode 100644
index 000000000000..eb8ae6d574fa
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/debugfs.c
@@ -0,0 +1,2203 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2023, 2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
+#include <linux/dmi.h>
+
+#include "mvm.h"
+#include "sta.h"
+#include "iwl-io.h"
+#include "debugfs.h"
+#include "iwl-modparams.h"
+#include "iwl-drv.h"
+#include "iwl-utils.h"
+#include "fw/error-dump.h"
+#include "fw/api/phy-ctxt.h"
+
+static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos, budget;
+
+ if (!iwl_mvm_is_ctdp_supported(mvm))
+ return -EOPNOTSUPP;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
+ mutex_unlock(&mvm->mutex);
+
+ if (budget < 0)
+ return budget;
+
+ pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ bool force;
+
+ if (!kstrtobool(buf, &force))
+ IWL_DEBUG_INFO(mvm,
+ "force start is %d [0=disabled, 1=enabled]\n",
+ force);
+
+ /* we allow skipping cap support check and force stop ctdp
+ * statistics collection and with guerantee that it is
+ * safe to use.
+ */
+ if (!force && !iwl_mvm_is_ctdp_supported(mvm))
+ return -EOPNOTSUPP;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ bool force;
+
+ if (!kstrtobool(buf, &force))
+ IWL_DEBUG_INFO(mvm,
+ "force start is %d [0=disabled, 1=enabled]\n",
+ force);
+
+ /* we allow skipping cap support check and force enable ctdp
+ * for statistics collection and with guerantee that it is
+ * safe to use.
+ */
+ if (!force && !iwl_mvm_is_ctdp_supported(mvm))
+ return -EOPNOTSUPP;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 0);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ iwl_mvm_enter_ctkill(mvm);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ u32 flush_arg;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ if (kstrtou32(buf, 0, &flush_arg))
+ return -EINVAL;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "FLUSHING all tids queues on sta_id = %d\n",
+ flush_arg);
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF)
+ ? : count;
+ mutex_unlock(&mvm->mutex);
+ return ret;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n",
+ flush_arg);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count;
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ const struct fw_img *img;
+ unsigned int ofs, len;
+ size_t ret;
+ u8 *ptr;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EINVAL;
+
+ /* default is to dump the entire data segment */
+ img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ if (mvm->dbgfs_sram_len) {
+ ofs = mvm->dbgfs_sram_offset;
+ len = mvm->dbgfs_sram_len;
+ }
+
+ ptr = kzalloc(len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
+
+ kfree(ptr);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ const struct fw_img *img;
+ u32 offset, len;
+ u32 img_offset, img_len;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EINVAL;
+
+ img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+ img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ if ((offset & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ if (offset + len > img_offset + img_len)
+ return -EINVAL;
+
+ mvm->dbgfs_sram_offset = offset;
+ mvm->dbgfs_sram_len = len;
+ } else {
+ mvm->dbgfs_sram_offset = 0;
+ mvm->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos;
+
+ if (!mvm->temperature_test)
+ pos = scnprintf(buf, sizeof(buf), "disabled\n");
+ else
+ pos = scnprintf(buf, sizeof(buf), "%d\n", mvm->temperature);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/*
+ * Set NIC Temperature
+ * Cause the driver to ignore the actual NIC temperature reported by the FW
+ * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -
+ * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX
+ * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE
+ */
+static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int temperature;
+
+ if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test)
+ return -EIO;
+
+ if (kstrtoint(buf, 10, &temperature))
+ return -EINVAL;
+ /* not a legal temperature */
+ if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
+ temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
+ temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
+ if (!mvm->temperature_test)
+ goto out;
+
+ mvm->temperature_test = false;
+ /* Since we can't read the temp while awake, just set
+ * it to zero until we get the next RX stats from the
+ * firmware.
+ */
+ mvm->temperature = 0;
+ } else {
+ mvm->temperature_test = true;
+ mvm->temperature = temperature;
+ }
+ IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
+ mvm->temperature_test ? "En" : "Dis",
+ mvm->temperature);
+ /* handle the temperature change */
+ iwl_mvm_tt_handler(mvm);
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos, ret;
+ s32 temp;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_get_temp(mvm, &temp);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret)
+ return -EIO;
+
+ pos = scnprintf(buf, sizeof(buf), "%d\n", temp);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#ifdef CONFIG_ACPI
+static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[256];
+ int pos = 0;
+ int bufsz = sizeof(buf);
+ int tbl_idx;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+ if (tbl_idx < 0) {
+ mutex_unlock(&mvm->mutex);
+ return tbl_idx;
+ }
+
+ if (!tbl_idx) {
+ pos = scnprintf(buf, bufsz,
+ "SAR geographic profile disabled\n");
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Use geographic profile %d\n", tbl_idx);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "2.4GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n",
+ mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0],
+ mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1],
+ mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "5.2GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n",
+ mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0],
+ mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1],
+ mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int err, pos;
+ char buf[12];
+ u32 value;
+
+ err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (err)
+ return err;
+
+ pos = sprintf(buf, "0x%08x\n", value);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+#endif
+
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct ieee80211_sta *sta;
+ char buf[400];
+ int i, pos = 0, bufsz = sizeof(buf);
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (!sta)
+ pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+ else if (IS_ERR(sta))
+ pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
+ PTR_ERR(sta));
+ else
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rs_data_read(struct ieee80211_link_sta *link_sta,
+ struct iwl_mvm_sta *mvmsta,
+ struct iwl_mvm *mvm,
+ struct iwl_mvm_link_sta *mvm_link_sta,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvm_link_sta->lq_sta.rs_fw;
+ static const size_t bufsz = 2048;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+ lq_sta->pers.sta_id);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "fixed rate 0x%X\n",
+ lq_sta->pers.dbg_fixed_rate);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "A-MPDU size limit %d\n",
+ lq_sta->pers.dbg_agg_frame_count_lim);
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "valid_tx_ant %s%s\n",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "last tx rate=0x%X ",
+ lq_sta->last_rate_n_flags);
+
+ desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+ lq_sta->last_rate_n_flags);
+ if (desc < bufsz - 1)
+ buff[desc++] = '\n';
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_link_sta *link_sta,
+ struct iwl_mvm_sta *mvmsta,
+ struct iwl_mvm *mvm,
+ struct iwl_mvm_link_sta *mvm_link_sta,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int i;
+ u16 amsdu_len;
+
+ if (kstrtou16(buf, 0, &amsdu_len))
+ return -EINVAL;
+
+ /* only change from debug set <-> debug unset */
+ if (amsdu_len && mvm_link_sta->orig_amsdu_len)
+ return -EBUSY;
+
+ if (amsdu_len) {
+ mvm_link_sta->orig_amsdu_len = link_sta->agg.max_amsdu_len;
+ link_sta->agg.max_amsdu_len = amsdu_len;
+ for (i = 0; i < ARRAY_SIZE(link_sta->agg.max_tid_amsdu_len); i++)
+ link_sta->agg.max_tid_amsdu_len[i] = amsdu_len;
+ } else {
+ link_sta->agg.max_amsdu_len = mvm_link_sta->orig_amsdu_len;
+ mvm_link_sta->orig_amsdu_len = 0;
+ }
+
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_amsdu_len_read(struct ieee80211_link_sta *link_sta,
+ struct iwl_mvm_sta *mvmsta,
+ struct iwl_mvm *mvm,
+ struct iwl_mvm_link_sta *mvm_link_sta,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ int pos;
+
+ pos = scnprintf(buf, sizeof(buf), "current %d ",
+ link_sta->agg.max_amsdu_len);
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n",
+ mvm_link_sta->orig_amsdu_len);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos = 0;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+ mvm->disable_power_off);
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+ mvm->disable_power_off_d3);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret, val;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (!strncmp("disable_power_off_d0=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off = val;
+ } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+ if (sscanf(buf + 21, "%d", &val) != 1)
+ return -EINVAL;
+ mvm->disable_power_off_d3 = val;
+ } else {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_power_update_device(mvm);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_tas_status_resp *rsp = NULL;
+ static const size_t bufsz = 1024;
+ char *buff, *pos, *endpos;
+ const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
+ [TAS_DISABLED_DUE_TO_BIOS] =
+ "Due To BIOS",
+ [TAS_DISABLED_DUE_TO_SAR_6DBM] =
+ "Due To SAR Limit Less Than 6 dBm",
+ [TAS_DISABLED_REASON_INVALID] =
+ "N/A",
+ [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] =
+ "Due to table source invalid",
+ };
+ const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = {
+ [TAS_DYNA_INACTIVE] = "INACTIVE",
+ [TAS_DYNA_INACTIVE_MVM_MODE] =
+ "inactive due to mvm mode",
+ [TAS_DYNA_INACTIVE_TRIGGER_MODE] =
+ "inactive due to trigger mode",
+ [TAS_DYNA_INACTIVE_BLOCK_LISTED] =
+ "inactive due to block listed",
+ [TAS_DYNA_INACTIVE_UHB_NON_US] =
+ "inactive due to uhb non US",
+ [TAS_DYNA_ACTIVE] = "ACTIVE",
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, GET_TAS_STATUS),
+ .flags = CMD_WANT_SKB,
+ .len = { 0, },
+ .data = { NULL, },
+ };
+ int ret, i, tmp;
+ bool tas_enabled = false;
+ unsigned long dyn_status;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -ENODEV;
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DEBUG_GROUP, GET_TAS_STATUS,
+ 0) != 3)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+ if (ret < 0)
+ return ret;
+
+ buff = kzalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+ pos = buff;
+ endpos = pos + bufsz;
+
+ rsp = (void *)hcmd.resp_pkt->data;
+
+ pos += scnprintf(pos, endpos - pos, "TAS Conclusion:\n");
+ for (i = 0; i < rsp->in_dual_radio + 1; i++) {
+ if (rsp->tas_status_mac[i].dynamic_status &
+ BIT(TAS_DYNA_ACTIVE)) {
+ pos += scnprintf(pos, endpos - pos, "\tON for ");
+ switch (rsp->tas_status_mac[i].band) {
+ case PHY_BAND_5:
+ pos += scnprintf(pos, endpos - pos, "HB\n");
+ break;
+ case PHY_BAND_24:
+ pos += scnprintf(pos, endpos - pos, "LB\n");
+ break;
+ case PHY_BAND_6:
+ pos += scnprintf(pos, endpos - pos, "UHB\n");
+ break;
+ default:
+ pos += scnprintf(pos, endpos - pos,
+ "Unsupported band (%d)\n",
+ rsp->tas_status_mac[i].band);
+ goto out;
+ }
+ tas_enabled = true;
+ }
+ }
+ if (!tas_enabled)
+ pos += scnprintf(pos, endpos - pos, "\tOFF\n");
+
+ pos += scnprintf(pos, endpos - pos, "TAS Report\n");
+ pos += scnprintf(pos, endpos - pos, "TAS FW version: %d\n",
+ rsp->tas_fw_version);
+ pos += scnprintf(pos, endpos - pos, "Is UHB enabled for USA?: %s\n",
+ rsp->is_uhb_for_usa_enable ? "True" : "False");
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT))
+ pos += scnprintf(pos, endpos - pos,
+ "Is UHB enabled for CANADA?: %s\n",
+ rsp->uhb_allowed_flags &
+ TAS_UHB_ALLOWED_CANADA ? "True" : "False");
+
+ pos += scnprintf(pos, endpos - pos, "Current MCC: 0x%x\n",
+ le16_to_cpu(rsp->curr_mcc));
+
+ pos += scnprintf(pos, endpos - pos, "Block list entries:");
+ for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++)
+ pos += scnprintf(pos, endpos - pos, " 0x%x",
+ le16_to_cpu(rsp->block_list[i]));
+
+ pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n",
+ iwl_is_tas_approved() ? "YES" : "NO");
+ pos += scnprintf(pos, endpos - pos,
+ "\tDo TAS Support Dual Radio?: %s\n",
+ rsp->in_dual_radio ? "TRUE" : "FALSE");
+
+ for (i = 0; i < rsp->in_dual_radio + 1; i++) {
+ if (rsp->tas_status_mac[i].static_status == 0) {
+ pos += scnprintf(pos, endpos - pos,
+ "Static status: disabled\n");
+ pos += scnprintf(pos, endpos - pos,
+ "Static disabled reason: %s (0)\n",
+ tas_dis_reason[0]);
+ goto out;
+ }
+
+ pos += scnprintf(pos, endpos - pos, "TAS status for ");
+ switch (rsp->tas_status_mac[i].band) {
+ case PHY_BAND_5:
+ pos += scnprintf(pos, endpos - pos, "High band\n");
+ break;
+ case PHY_BAND_24:
+ pos += scnprintf(pos, endpos - pos, "Low band\n");
+ break;
+ case PHY_BAND_6:
+ pos += scnprintf(pos, endpos - pos,
+ "Ultra high band\n");
+ break;
+ default:
+ pos += scnprintf(pos, endpos - pos,
+ "Unsupported band (%d)\n",
+ rsp->tas_status_mac[i].band);
+ goto out;
+ }
+ pos += scnprintf(pos, endpos - pos, "Static status: %sabled\n",
+ rsp->tas_status_mac[i].static_status ?
+ "En" : "Dis");
+ pos += scnprintf(pos, endpos - pos,
+ "\tStatic Disabled Reason: ");
+ if (rsp->tas_status_mac[i].static_dis_reason < TAS_DISABLED_REASON_MAX)
+ pos += scnprintf(pos, endpos - pos, "%s (%d)\n",
+ tas_dis_reason[rsp->tas_status_mac[i].static_dis_reason],
+ rsp->tas_status_mac[i].static_dis_reason);
+ else
+ pos += scnprintf(pos, endpos - pos,
+ "unsupported value (%d)\n",
+ rsp->tas_status_mac[i].static_dis_reason);
+
+ pos += scnprintf(pos, endpos - pos, "Dynamic status:\n");
+ dyn_status = (rsp->tas_status_mac[i].dynamic_status);
+ for_each_set_bit(tmp, &dyn_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(pos, endpos - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
+ }
+
+ pos += scnprintf(pos, endpos - pos,
+ "Is near disconnection?: %s\n",
+ rsp->tas_status_mac[i].near_disconnection ?
+ "True" : "False");
+ tmp = le16_to_cpu(rsp->tas_status_mac[i].max_reg_pwr_limit);
+ pos += scnprintf(pos, endpos - pos,
+ "Max. regulatory pwr limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ tmp = le16_to_cpu(rsp->tas_status_mac[i].sar_limit);
+ pos += scnprintf(pos, endpos - pos,
+ "SAR limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ }
+
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char *buf;
+ size_t bufsz;
+ int pos;
+ ssize_t ret;
+
+ bufsz = mvm->fw->phy_integration_ver_len + 2;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len,
+ mvm->fw->phy_integration_ver);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+ kfree(buf);
+ return ret;
+}
+
+#define PRINT_STATS_LE32(_struct, _memb) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ fmt_table, #_memb, \
+ le32_to_cpu(_struct->_memb))
+
+static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ static const char *fmt_table = "\t%-30s %10u\n";
+ static const char *fmt_header = "%-32s\n";
+ int pos = 0;
+ char *buf;
+ int ret;
+ size_t bufsz;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (iwl_mvm_has_new_rx_stats_api(mvm))
+ bufsz = ((sizeof(struct mvm_statistics_rx) /
+ sizeof(__le32)) * 43) + (4 * 33) + 1;
+ else
+ /* 43 = size of each data line; 33 = size of each header */
+ bufsz = ((sizeof(struct mvm_statistics_rx_v3) /
+ sizeof(__le32)) * 43) + (4 * 33) + 1;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ if (iwl_mvm_firmware_running(mvm))
+ iwl_mvm_request_statistics(mvm, false);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm;
+
+ PRINT_STATS_LE32(ofdm, ina_cnt);
+ PRINT_STATS_LE32(ofdm, fina_cnt);
+ PRINT_STATS_LE32(ofdm, plcp_err);
+ PRINT_STATS_LE32(ofdm, crc32_err);
+ PRINT_STATS_LE32(ofdm, overrun_err);
+ PRINT_STATS_LE32(ofdm, early_overrun_err);
+ PRINT_STATS_LE32(ofdm, crc32_good);
+ PRINT_STATS_LE32(ofdm, false_alarm_cnt);
+ PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
+ PRINT_STATS_LE32(ofdm, sfd_timeout);
+ PRINT_STATS_LE32(ofdm, fina_timeout);
+ PRINT_STATS_LE32(ofdm, unresponded_rts);
+ PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(ofdm, sent_ack_cnt);
+ PRINT_STATS_LE32(ofdm, sent_cts_cnt);
+ PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(ofdm, dsp_self_kill);
+ PRINT_STATS_LE32(ofdm, mh_format_err);
+ PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
+ PRINT_STATS_LE32(ofdm, reserved);
+ } else {
+ struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm;
+
+ PRINT_STATS_LE32(ofdm, unresponded_rts);
+ PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(ofdm, dsp_self_kill);
+ PRINT_STATS_LE32(ofdm, reserved);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck;
+
+ PRINT_STATS_LE32(cck, ina_cnt);
+ PRINT_STATS_LE32(cck, fina_cnt);
+ PRINT_STATS_LE32(cck, plcp_err);
+ PRINT_STATS_LE32(cck, crc32_err);
+ PRINT_STATS_LE32(cck, overrun_err);
+ PRINT_STATS_LE32(cck, early_overrun_err);
+ PRINT_STATS_LE32(cck, crc32_good);
+ PRINT_STATS_LE32(cck, false_alarm_cnt);
+ PRINT_STATS_LE32(cck, fina_sync_err_cnt);
+ PRINT_STATS_LE32(cck, sfd_timeout);
+ PRINT_STATS_LE32(cck, fina_timeout);
+ PRINT_STATS_LE32(cck, unresponded_rts);
+ PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(cck, sent_ack_cnt);
+ PRINT_STATS_LE32(cck, sent_cts_cnt);
+ PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(cck, dsp_self_kill);
+ PRINT_STATS_LE32(cck, mh_format_err);
+ PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
+ PRINT_STATS_LE32(cck, reserved);
+ } else {
+ struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck;
+
+ PRINT_STATS_LE32(cck, unresponded_rts);
+ PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+ PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+ PRINT_STATS_LE32(cck, dsp_self_kill);
+ PRINT_STATS_LE32(cck, reserved);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_non_phy_v3 *general =
+ &mvm->rx_stats_v3.general;
+
+ PRINT_STATS_LE32(general, bogus_cts);
+ PRINT_STATS_LE32(general, bogus_ack);
+ PRINT_STATS_LE32(general, non_bssid_frames);
+ PRINT_STATS_LE32(general, filtered_frames);
+ PRINT_STATS_LE32(general, non_channel_beacons);
+ PRINT_STATS_LE32(general, channel_beacons);
+ PRINT_STATS_LE32(general, num_missed_bcon);
+ PRINT_STATS_LE32(general, adc_rx_saturation_time);
+ PRINT_STATS_LE32(general, ina_detection_search_time);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+ PRINT_STATS_LE32(general, interference_data_flag);
+ PRINT_STATS_LE32(general, channel_load);
+ PRINT_STATS_LE32(general, dsp_false_alarms);
+ PRINT_STATS_LE32(general, beacon_rssi_a);
+ PRINT_STATS_LE32(general, beacon_rssi_b);
+ PRINT_STATS_LE32(general, beacon_rssi_c);
+ PRINT_STATS_LE32(general, beacon_energy_a);
+ PRINT_STATS_LE32(general, beacon_energy_b);
+ PRINT_STATS_LE32(general, beacon_energy_c);
+ PRINT_STATS_LE32(general, num_bt_kills);
+ PRINT_STATS_LE32(general, mac_id);
+ PRINT_STATS_LE32(general, directed_data_mpdu);
+ } else {
+ struct mvm_statistics_rx_non_phy *general =
+ &mvm->rx_stats.general;
+
+ PRINT_STATS_LE32(general, bogus_cts);
+ PRINT_STATS_LE32(general, bogus_ack);
+ PRINT_STATS_LE32(general, non_channel_beacons);
+ PRINT_STATS_LE32(general, channel_beacons);
+ PRINT_STATS_LE32(general, num_missed_bcon);
+ PRINT_STATS_LE32(general, adc_rx_saturation_time);
+ PRINT_STATS_LE32(general, ina_detection_search_time);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+ PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+ PRINT_STATS_LE32(general, interference_data_flag);
+ PRINT_STATS_LE32(general, channel_load);
+ PRINT_STATS_LE32(general, beacon_rssi_a);
+ PRINT_STATS_LE32(general, beacon_rssi_b);
+ PRINT_STATS_LE32(general, beacon_rssi_c);
+ PRINT_STATS_LE32(general, beacon_energy_a);
+ PRINT_STATS_LE32(general, beacon_energy_b);
+ PRINT_STATS_LE32(general, beacon_energy_c);
+ PRINT_STATS_LE32(general, num_bt_kills);
+ PRINT_STATS_LE32(general, mac_id);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - HT");
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct mvm_statistics_rx_ht_phy_v1 *ht =
+ &mvm->rx_stats_v3.ofdm_ht;
+
+ PRINT_STATS_LE32(ht, plcp_err);
+ PRINT_STATS_LE32(ht, overrun_err);
+ PRINT_STATS_LE32(ht, early_overrun_err);
+ PRINT_STATS_LE32(ht, crc32_good);
+ PRINT_STATS_LE32(ht, crc32_err);
+ PRINT_STATS_LE32(ht, mh_format_err);
+ PRINT_STATS_LE32(ht, agg_crc32_good);
+ PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+ PRINT_STATS_LE32(ht, agg_cnt);
+ PRINT_STATS_LE32(ht, unsupport_mcs);
+ } else {
+ struct mvm_statistics_rx_ht_phy *ht =
+ &mvm->rx_stats.ofdm_ht;
+
+ PRINT_STATS_LE32(ht, mh_format_err);
+ PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+ PRINT_STATS_LE32(ht, agg_cnt);
+ PRINT_STATS_LE32(ht, unsupport_mcs);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+#undef PRINT_STAT_LE32
+
+static ssize_t iwl_dbgfs_fw_system_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff, *pos, *endpos;
+ int ret;
+ size_t bufsz;
+ int i;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+ struct iwl_mvm *mvm = file->private_data;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ /* in case of a wrong cmd version, allocate buffer only for error msg */
+ bufsz = (cmd_ver == 1) ? 4096 : 64;
+
+ buff = kzalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ if (cmd_ver != 1) {
+ pos += scnprintf(pos, endpos - pos,
+ "System stats not supported:%d\n", cmd_ver);
+ goto send_out;
+ }
+
+ mutex_lock(&mvm->mutex);
+ if (iwl_mvm_firmware_running(mvm))
+ iwl_mvm_request_statistics(mvm, false);
+
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false);
+ if (!vif)
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+ }
+
+ if (i == NUM_MAC_INDEX_DRIVER || !vif) {
+ pos += scnprintf(pos, endpos - pos, "vif is NULL\n");
+ goto release_send_out;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (!mvmvif) {
+ pos += scnprintf(pos, endpos - pos, "mvmvif is NULL\n");
+ goto release_send_out;
+ }
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[i];
+
+ pos += scnprintf(pos, endpos - pos,
+ "link_id %d", i);
+ pos += scnprintf(pos, endpos - pos,
+ " num_beacons %d",
+ link_info->beacon_stats.num_beacons);
+ pos += scnprintf(pos, endpos - pos,
+ " accu_num_beacons %d",
+ link_info->beacon_stats.accu_num_beacons);
+ pos += scnprintf(pos, endpos - pos,
+ " avg_signal %d\n",
+ link_info->beacon_stats.avg_signal);
+ }
+
+ pos += scnprintf(pos, endpos - pos,
+ "radio_stats.rx_time %lld\n",
+ mvm->radio_stats.rx_time);
+ pos += scnprintf(pos, endpos - pos,
+ "radio_stats.tx_time %lld\n",
+ mvm->radio_stats.tx_time);
+ pos += scnprintf(pos, endpos - pos,
+ "accu_radio_stats.rx_time %lld\n",
+ mvm->accu_radio_stats.rx_time);
+ pos += scnprintf(pos, endpos - pos,
+ "accu_radio_stats.tx_time %lld\n",
+ mvm->accu_radio_stats.tx_time);
+
+release_send_out:
+ mutex_unlock(&mvm->mutex);
+
+send_out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
+ char __user *user_buf, size_t count,
+ loff_t *ppos,
+ struct iwl_mvm_frame_stats *stats)
+{
+ char *buff, *pos, *endpos;
+ int idx, i;
+ int ret;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ spin_lock_bh(&mvm->drv_stats_lock);
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos,
+ "Legacy/HT/VHT\t:\t%d/%d/%d\n",
+ stats->legacy_frames,
+ stats->ht_frames,
+ stats->vht_frames);
+ pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
+ stats->bw_20_frames,
+ stats->bw_40_frames,
+ stats->bw_80_frames);
+ pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
+ stats->ngi_frames,
+ stats->sgi_frames);
+ pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
+ stats->siso_frames,
+ stats->mimo2_frames);
+ pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
+ stats->fail_frames,
+ stats->success_frames);
+ pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
+ stats->agg_frames);
+ pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
+ stats->ampdu_count);
+ pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+ stats->ampdu_count > 0 ?
+ (stats->agg_frames / stats->ampdu_count) : 0);
+
+ pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+
+ idx = stats->last_frame_idx - 1;
+ for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+ idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+ if (stats->last_rates[idx] == 0)
+ continue;
+ pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
+ (int)(ARRAY_SIZE(stats->last_rates) - i));
+ pos += rs_pretty_print_rate_v1(pos, endpos - pos,
+ stats->last_rates[idx]);
+ if (pos < endpos - 1)
+ *pos++ = '\n';
+ }
+ spin_unlock_bh(&mvm->drv_stats_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+
+ return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
+ &mvm->drv_rx_stats);
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int __maybe_unused ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+
+ if (count == 6 && !strcmp(buf, "nolog\n")) {
+ set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
+ iwl_trans_suppress_cmd_error_once(mvm->trans);
+ }
+
+ /* take the return value to make compiler happy - it will fail anyway */
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(LONG_GROUP, REPLY_ERROR),
+ 0, 0, NULL);
+
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ IWL_ERR(mvm, "Triggering an NMI from debugfs\n");
+
+ if (count == 6 && !strcmp(buf, "nolog\n"))
+ set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
+
+ iwl_force_nmi(mvm->trans);
+
+ return count;
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ /* print which antennas were set for the scan command by the user */
+ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+ if (mvm->scan_rx_ant & ANT_A)
+ pos += scnprintf(buf + pos, bufsz - pos, "A");
+ if (mvm->scan_rx_ant & ANT_B)
+ pos += scnprintf(buf + pos, bufsz - pos, "B");
+ pos += scnprintf(buf + pos, bufsz - pos, " (%x)\n", mvm->scan_rx_ant);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 scan_rx_ant;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+ return -EINVAL;
+ if (scan_rx_ant > ANT_ABC)
+ return -EINVAL;
+ if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
+ return -EINVAL;
+
+ if (mvm->scan_rx_ant != scan_rx_ant) {
+ mvm->scan_rx_ant = scan_rx_ant;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_UDP |
+ IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+ IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_UDP |
+ IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+ };
+ int ret, i, num_repeats, nbytes = count / 2;
+
+ ret = hex2bin(cmd.indirection_table, buf, nbytes);
+ if (ret)
+ return ret;
+
+ /*
+ * The input is the redirection table, partial or full.
+ * Repeat the pattern if needed.
+ * For example, input of 01020F will be repeated 42 times,
+ * indirecting RSS hash results to queues 1, 2, 15 (skipping
+ * queues 3 - 14).
+ */
+ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+ for (i = 1; i < num_repeats; i++)
+ memcpy(&cmd.indirection_table[i * nbytes],
+ cmd.indirection_table, nbytes);
+ /* handle cut in the middle pattern for the last places */
+ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+ ARRAY_SIZE(cmd.indirection_table) % nbytes);
+
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+ mutex_lock(&mvm->mutex);
+ if (iwl_mvm_firmware_running(mvm))
+ ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0,
+ sizeof(cmd), &cmd);
+ else
+ ret = 0;
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_op_mode *opmode = container_of((void *)mvm,
+ struct iwl_op_mode,
+ op_mode_specific);
+ struct iwl_rx_cmd_buffer rxb = {
+ ._rx_page_order = 0,
+ .truesize = 0, /* not used */
+ ._offset = 0,
+ };
+ struct iwl_rx_packet *pkt;
+ int bin_len = count / 2;
+ int ret = -EINVAL;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ /* supporting only MQ RX */
+ if (!mvm->trans->mac_cfg->mq_rx_supported)
+ return -EOPNOTSUPP;
+
+ rxb._page = alloc_pages(GFP_ATOMIC, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, bin_len);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access and malformed packet */
+ if (bin_len < sizeof(*pkt) ||
+ bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
+ goto out;
+
+ local_bh_disable();
+ iwl_mvm_rx_mq(opmode, NULL, &rxb);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
+static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
+{
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+ struct sk_buff *beacon;
+ struct ieee80211_tx_info *info;
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ unsigned int link_id;
+ u8 rate;
+ int i;
+
+ len /= 2;
+
+ /* Element len should be represented by u8 */
+ if (len >= U8_MAX)
+ return -EINVAL;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (!iwl_mvm_has_new_tx_api(mvm) &&
+ !fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false);
+ if (!vif)
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ break;
+ }
+
+ if (i == NUM_MAC_INDEX_DRIVER || !vif)
+ goto out_err;
+
+ mvm->hw->extra_beacon_tailroom = len;
+
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0);
+ if (!beacon)
+ goto out_err;
+
+ if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) {
+ dev_kfree_skb(beacon);
+ goto out_err;
+ }
+
+ mvm->beacon_inject_active = true;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ info = IEEE80211_SKB_CB(beacon);
+ rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ beacon_cmd.flags =
+ cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw,
+ rate));
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 12)
+ beacon_cmd.link_id =
+ cpu_to_le32(mvmvif->link[link_id]->fw_link_id);
+ else
+ beacon_cmd.link_id = cpu_to_le32((u32)mvmvif->id);
+
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ BEACON_TEMPLATE_CMD, 0) >= 14) {
+ u32 offset = iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+ }
+
+ iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+ }
+ mutex_unlock(&mvm->mutex);
+
+ dev_kfree_skb(beacon);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&mvm->mutex);
+ return -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count);
+
+ mvm->hw->extra_beacon_tailroom = 0;
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm,
+ char *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0);
+
+ mvm->hw->extra_beacon_tailroom = 0;
+ mvm->beacon_inject_active = false;
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int conf;
+ char buf[8];
+ const size_t bufsz = sizeof(buf);
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ conf = mvm->fwrt.dump.conf;
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int conf_id;
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ ret = kstrtouint(buf, 0, &conf_id);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ return -EOPNOTSUPP;
+
+ /*
+ * If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (!iwl_mvm_firmware_running(mvm))
+ return count;
+
+ mutex_lock(&mvm->mutex);
+ iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ u32 timepoint;
+
+ if (kstrtou32(buf, 0, &timepoint))
+ return -EINVAL;
+
+ if (timepoint == IWL_FW_INI_TIME_POINT_INVALID ||
+ timepoint >= IWL_FW_INI_TIME_POINT_NUM)
+ return -EINVAL;
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL);
+
+ return count;
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ debugfs_create_file(alias, mode, parent, mvm, \
+ &iwl_dbgfs_##name##_ops); \
+ } while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+_iwl_dbgfs_link_sta_wrap_write(ssize_t (*real)(struct ieee80211_link_sta *,
+ struct iwl_mvm_sta *,
+ struct iwl_mvm *,
+ struct iwl_mvm_link_sta *,
+ char *,
+ size_t, loff_t *),
+ struct file *file,
+ char *buf, size_t buf_size, loff_t *ppos)
+{
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(link_sta->sta);
+ struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(mvmsta->vif)->mvm;
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ ssize_t ret;
+
+ mutex_lock(&mvm->mutex);
+
+ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_sta->link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ON(!mvm_link_sta)) {
+ mutex_unlock(&mvm->mutex);
+ return -ENODEV;
+ }
+
+ ret = real(link_sta, mvmsta, mvm, mvm_link_sta, buf, buf_size, ppos);
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static ssize_t
+_iwl_dbgfs_link_sta_wrap_read(ssize_t (*real)(struct ieee80211_link_sta *,
+ struct iwl_mvm_sta *,
+ struct iwl_mvm *,
+ struct iwl_mvm_link_sta *,
+ char __user *,
+ size_t, loff_t *),
+ struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(link_sta->sta);
+ struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(mvmsta->vif)->mvm;
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ ssize_t ret;
+
+ mutex_lock(&mvm->mutex);
+
+ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_sta->link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ON(!mvm_link_sta)) {
+ mutex_unlock(&mvm->mutex);
+ return -ENODEV;
+ }
+
+ ret = real(link_sta, mvmsta, mvm, mvm_link_sta, user_buf, count, ppos);
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+#define MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, buflen) \
+static ssize_t _iwl_dbgfs_link_sta_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return _iwl_dbgfs_link_sta_wrap_write(iwl_dbgfs_##name##_write, \
+ file, \
+ buf, buf_size, ppos); \
+} \
+
+#define MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_link_sta_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return _iwl_dbgfs_link_sta_wrap_read(iwl_dbgfs_##name##_read, \
+ file, \
+ user_buf, count, ppos); \
+} \
+
+#define MVM_DEBUGFS_WRITE_LINK_STA_FILE_OPS(name, bufsz) \
+MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, bufsz) \
+static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \
+ .write = _iwl_dbgfs_link_sta_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(name) \
+MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \
+static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \
+ .read = _iwl_dbgfs_link_sta_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_READ_WRITE_LINK_STA_FILE_OPS(name, bufsz) \
+MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \
+MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, bufsz) \
+static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \
+ .read = _iwl_dbgfs_link_sta_##name##_read, \
+ .write = _iwl_dbgfs_link_sta_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_ADD_LINK_STA_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, link_sta, \
+ &iwl_dbgfs_link_sta_##name##_ops)
+#define MVM_DEBUGFS_ADD_LINK_STA_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_LINK_STA_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+iwl_dbgfs_prph_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[32];
+ const size_t bufsz = sizeof(buf);
+
+ if (!mvm->dbgfs_prph_reg_addr)
+ return -EINVAL;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
+ mvm->dbgfs_prph_reg_addr,
+ iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 args;
+ u32 value;
+
+ args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+ /* if we only want to set the reg address - nothing more to do */
+ if (args == 1)
+ goto out;
+
+ /* otherwise, make sure we have both address and value */
+ if (args != 2)
+ return -EINVAL;
+
+ iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+
+out:
+ return count;
+}
+
+static ssize_t
+iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+struct iwl_mvm_sniffer_apply {
+ struct iwl_mvm *mvm;
+ u8 *bssid;
+ u16 aid;
+};
+
+static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm_sniffer_apply *apply = data;
+
+ apply->mvm->cur_aid = cpu_to_le16(apply->aid);
+ memcpy(apply->mvm->cur_bssid, apply->bssid,
+ sizeof(apply->mvm->cur_bssid));
+
+ return true;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_notification_wait wait;
+ struct iwl_he_monitor_cmd he_mon_cmd = {};
+ struct iwl_mvm_sniffer_apply apply = {
+ .mvm = mvm,
+ };
+ u16 wait_cmds[] = {
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ };
+ u32 aid;
+ int ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
+ &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
+ &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+ &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+ if (ret != 7)
+ return -EINVAL;
+
+ he_mon_cmd.aid = cpu_to_le16(aid);
+
+ apply.aid = aid;
+ apply.bssid = (void *)he_mon_cmd.bssid;
+
+ mutex_lock(&mvm->mutex);
+
+ /*
+ * Use the notification waiter to get our function triggered
+ * in sequence with other RX. This ensures that frames we get
+ * on the RX queue _before_ the new configuration is applied
+ * still have mvm->cur_aid pointing to the old AID, and that
+ * frames on the RX queue _after_ the firmware processed the
+ * new configuration (and sent the response, synchronously)
+ * get mvm->cur_aid correctly set to the new AID.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait,
+ wait_cmds, ARRAY_SIZE(wait_cmds),
+ iwl_mvm_sniffer_apply, &apply);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ 0,
+ sizeof(he_mon_cmd), &he_mon_cmd);
+
+ /* no need to really wait, we already did anyway */
+ iwl_remove_notification(&mvm->notif_wait, &wait);
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u8 buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf),
+ "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
+ le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0],
+ mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3],
+ mvm->cur_bssid[4], mvm->cur_bssid[5]);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
+ unsigned int pos = 0;
+ size_t bufsz = sizeof(buf);
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++)
+ pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+ mvm->uapsd_noagg_bssids[i].addr);
+
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm,
+ char *buf, size_t count, loff_t *ppos)
+{
+ int ret;
+ struct iwl_ltr_config_cmd ltr_config = {0};
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x",
+ &ltr_config.flags,
+ &ltr_config.static_long,
+ &ltr_config.static_short,
+ &ltr_config.ltr_cfg_values[0],
+ &ltr_config.ltr_cfg_values[1],
+ &ltr_config.ltr_cfg_values[2],
+ &ltr_config.ltr_cfg_values[3]) != 7) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config),
+ &ltr_config);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret)
+ IWL_ERR(mvm, "failed to send ltr configuration cmd\n");
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret = 0;
+ u16 op_id;
+
+ if (kstrtou16(buf, 10, &op_id))
+ return -EINVAL;
+
+ /* value zero triggers re-sending the default table to the device */
+ if (!op_id) {
+ mutex_lock(&mvm->mutex);
+ ret = iwl_rfi_send_config_cmd(mvm, NULL);
+ mutex_unlock(&mvm->mutex);
+ } else {
+ ret = -EOPNOTSUPP; /* in the future a new table will be added */
+ }
+
+ return ret ?: count;
+}
+
+/* The size computation is as follows:
+ * each number needs at most 3 characters, number of rows is the size of
+ * the table; So, need 5 chars for the "freq: " part and each tuple afterwards
+ * needs 6 characters for numbers and 5 for the punctuation around.
+ */
+#define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\
+ (5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5)))
+
+static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_rfi_freq_table_resp_cmd *resp;
+ u32 status;
+ char buf[IWL_RFI_BUF_SIZE];
+ int i, j, pos = 0;
+
+ resp = iwl_rfi_get_freq_table(mvm);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ status = le32_to_cpu(resp->status);
+ if (status != RFI_FREQ_TABLE_OK) {
+ scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status);
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(resp->table); i++) {
+ pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ",
+ resp->table[i].freq);
+
+ for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++)
+ pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos,
+ "(%d, %d) ",
+ resp->table[i].channels[j],
+ resp->table[i].bands[j]);
+ pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n");
+ }
+
+out:
+ kfree(resp);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
+
+/* Device wide debugfs entries */
+MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget);
+MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
+MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
+MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
+MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats);
+MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
+MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_clear, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
+ (IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512);
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
+
+MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
+
+#ifdef CONFIG_ACPI
+MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
+MVM_DEBUGFS_READ_FILE_OPS(wifi_6e_enable);
+#endif
+
+MVM_DEBUGFS_READ_WRITE_LINK_STA_FILE_OPS(amsdu_len, 16);
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32);
+
+MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16);
+
+static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd cmd = {};
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ };
+ size_t delta;
+ ssize_t ret, len;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
+ cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+
+ /* Take care of alignment of both the position and the length */
+ delta = *ppos & 0x3;
+ cmd.addr = cpu_to_le32(*ppos - delta);
+ cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4,
+ (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ len = min((size_t)le32_to_cpu(rsp->len) << 2,
+ iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+ len = min(len - delta, count);
+ if (len < 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_dbg_mem_access_cmd *cmd;
+ struct iwl_dbg_mem_access_rsp *rsp;
+ struct iwl_host_cmd hcmd = {};
+ size_t cmd_size;
+ size_t data_size;
+ u32 op, len;
+ ssize_t ret;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return -EIO;
+
+ hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR);
+
+ if (*ppos & 0x3 || count < 4) {
+ op = DEBUG_MEM_OP_WRITE_BYTES;
+ len = min(count, (size_t)(4 - (*ppos & 0x3)));
+ data_size = len;
+ } else {
+ op = DEBUG_MEM_OP_WRITE;
+ len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+ data_size = len << 2;
+ }
+
+ cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->op = cpu_to_le32(op);
+ cmd->len = cpu_to_le32(len);
+ cmd->addr = cpu_to_le32(*ppos);
+ if (copy_from_user((void *)cmd->data, user_buf, data_size)) {
+ kfree(cmd);
+ return -EFAULT;
+ }
+
+ hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ hcmd.data[0] = (void *)cmd;
+ hcmd.len[0] = cmd_size;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ mutex_unlock(&mvm->mutex);
+
+ kfree(cmd);
+
+ if (ret < 0)
+ return ret;
+
+ if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+ if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = data_size;
+ *ppos += ret;
+
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static const struct file_operations iwl_dbgfs_mem_ops = {
+ .read = iwl_dbgfs_mem_read,
+ .write = iwl_dbgfs_mem_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ MVM_DEBUGFS_ADD_LINK_STA_FILE(rs_data, dir, 0400);
+ }
+
+ MVM_DEBUGFS_ADD_LINK_STA_FILE(amsdu_len, dir, 0600);
+}
+
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
+{
+ struct dentry *bcast_dir __maybe_unused;
+
+ spin_lock_init(&mvm->drv_stats_lock);
+
+ MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_clear, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600);
+
+ if (mvm->fw->phy_integration_ver)
+ MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(tas_get_status, mvm->debugfs_dir, 0400);
+#ifdef CONFIG_ACPI
+ MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(wifi_6e_enable, mvm->debugfs_dir, 0400);
+#endif
+ MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
+ MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200);
+
+ debugfs_create_bool("enable_scan_iteration_notif", 0600,
+ mvm->debugfs_dir, &mvm->scan_iter_notif_enabled);
+ debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir,
+ &mvm->drop_bcn_ap_mode);
+
+ MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
+
+#ifdef CONFIG_PM_SLEEP
+ MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
+ debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
+ &mvm->d3_wake_sysassert);
+ debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
+ &mvm->last_netdetect_scans);
+#endif
+
+ debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir,
+ &mvm->ps_disabled);
+ debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir,
+ &mvm->nvm_hw_blob);
+ debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir,
+ &mvm->nvm_sw_blob);
+ debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir,
+ &mvm->nvm_calib_blob);
+ debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir,
+ &mvm->nvm_prod_blob);
+ debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir,
+ &mvm->nvm_phy_sku_blob);
+ debugfs_create_blob("nvm_reg", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_reg_blob);
+
+ debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm,
+ &iwl_dbgfs_mem_ops);
+
+ debugfs_create_bool("rx_ts_ptp", 0600, mvm->debugfs_dir,
+ &mvm->rx_ts_ptp);
+
+#if defined(__linux__)
+ /*
+ * Create a symlink with mac80211. It will be removed when mac80211
+ * exists (before the opmode exists which removes the target.)
+ */
+ if (!IS_ERR(mvm->debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir,
+ buf);
+ }
+#endif
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs.h b/sys/contrib/dev/iwlwifi/mvm/debugfs.h
new file mode 100644
index 000000000000..cc2c45b45ddc
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ */
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
+} \
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
diff --git a/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c b/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c
new file mode 100644
index 000000000000..83ae969db0da
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include <linux/etherdevice.h>
+#include <linux/math64.h>
+#include <net/cfg80211.h>
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "constants.h"
+
+struct iwl_mvm_loc_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 lci_len, civic_len;
+ u8 buf[];
+};
+
+struct iwl_mvm_smooth_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ s64 rtt_avg;
+ u64 host_time;
+};
+
+enum iwl_mvm_pasn_flags {
+ IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
+};
+
+struct iwl_mvm_ftm_pasn_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ u8 cipher;
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u32 flags;
+};
+
+struct iwl_mvm_ftm_iter_data {
+ u8 *cipher;
+ u8 *bssid;
+ u8 *tk;
+};
+
+static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_loc_entry *e, *t;
+
+ mvm->ftm_initiator.req = NULL;
+ mvm->ftm_initiator.req_wdev = NULL;
+ memset(mvm->ftm_initiator.responses, 0,
+ sizeof(mvm->ftm_initiator.responses));
+
+ list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
+{
+ struct cfg80211_pmsr_result result = {
+ .status = NL80211_PMSR_STATUS_FAILURE,
+ .final = 1,
+ .host_time = ktime_get_boottime_ns(),
+ .type = NL80211_PMSR_TYPE_FTM,
+ };
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->ftm_initiator.req)
+ return;
+
+ for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
+ memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
+ ETH_ALEN);
+ result.ftm.burst_index = mvm->ftm_initiator.responses[i];
+
+ cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req,
+ &result, GFP_KERNEL);
+ }
+
+ cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req, GFP_KERNEL);
+ iwl_mvm_ftm_reset(mvm);
+}
+
+void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
+{
+ INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
+
+ IWL_DEBUG_INFO(mvm,
+ "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
+ IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
+}
+
+void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_smooth_entry *se, *st;
+
+ list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
+ list) {
+ list_del(&se->list);
+ kfree(se);
+ }
+}
+
+static int
+iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
+{
+ switch (s) {
+ case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
+ return 0;
+ case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
+ return -EBUSY;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
+static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd_v5 *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ int i;
+
+ cmd->request_id = req->cookie;
+ cmd->num_of_ap = req->n_peers;
+
+ /* use maximum for "no timeout" or bigger than what we can do */
+ if (!req->timeout || req->timeout > 255 * 100)
+ cmd->req_timeout = 255;
+ else
+ cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
+
+ /*
+ * We treat it always as random, since if not we'll
+ * have filled our local address there instead.
+ */
+ cmd->macaddr_random = 1;
+ memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+ if (vif->cfg.assoc)
+ memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->range_req_bssid);
+}
+
+static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+#if defined(__linux__)
+ struct iwl_tof_range_req_cmd_v9 *cmd,
+#elif defined(__FreeBSD__)
+ struct iwl_tof_range_req_cmd_v9 *cmd, /* XXX-BZ Probably better solved by a common struct in fw for top parts of the struct. */
+#endif
+ struct cfg80211_pmsr_request *req)
+{
+ int i;
+
+ cmd->initiator_flags =
+ cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
+ IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
+ cmd->request_id = req->cookie;
+ cmd->num_of_ap = req->n_peers;
+
+ /*
+ * Use a large value for "no timeout". Don't use the maximum value
+ * because of fw limitations.
+ */
+ if (req->timeout)
+ cmd->req_timeout_ms = cpu_to_le32(req->timeout);
+ else
+ cmd->req_timeout_ms = cpu_to_le32(0xfffff);
+
+ memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+ if (vif->cfg.assoc) {
+ memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ /* AP's TSF is only relevant if associated */
+ for (i = 0; i < req->n_peers; i++) {
+ if (req->peers[i].report_ap_tsf) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
+ return;
+ }
+ }
+ } else {
+ eth_broadcast_addr(cmd->range_req_bssid);
+ }
+
+ /* Don't report AP's TSF */
+ cmd->tsf_mac_id = cpu_to_le32(0xff);
+}
+
+static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd_v8 *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
+}
+
+static int
+iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *bandwidth,
+ u8 *ctrl_ch_position)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ *channel = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *bandwidth = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *bandwidth = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *bandwidth = IWL_TOF_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *bandwidth = IWL_TOF_BW_80;
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+ }
+
+ *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
+
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *channel, u8 *format_bw,
+ u8 *ctrl_ch_position)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+ u8 cmd_ver;
+
+ *channel = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver >= 13) {
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+ *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+ break;
+ }
+ fallthrough;
+ default:
+ IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+ }
+
+ /* non EDCA based measurement must use HE preamble */
+ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+ *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
+
+ *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
+
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v2 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->measure_type = 0; /* regular two-sided FTM */
+ target->retries_per_sample = peer->ftm.ftmr_retries;
+ target->asap_mode = peer->ftm.asap;
+ target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
+
+ if (peer->ftm.request_lci)
+ target->location_req |= IWL_TOF_LOC_LCI;
+ if (peer->ftm.request_civicloc)
+ target->location_req |= IWL_TOF_LOC_CIVIC;
+
+ target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
+
+ return 0;
+}
+
+#define FTM_SET_FLAG(flag) (*flags |= \
+ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
+
+static void
+iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ __le32 *flags)
+{
+ *flags = cpu_to_le32(0);
+
+ if (peer->ftm.asap)
+ FTM_SET_FLAG(ASAP);
+
+ if (peer->ftm.request_lci)
+ FTM_SET_FLAG(LCI_REQUEST);
+
+ if (peer->ftm.request_civicloc)
+ FTM_SET_FLAG(CIVIC_REQUEST);
+
+ if (IWL_MVM_FTM_INITIATOR_DYNACK)
+ FTM_SET_FLAG(DYN_ACK);
+
+ if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
+ FTM_SET_FLAG(ALGO_LR);
+ else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
+ FTM_SET_FLAG(ALGO_FFT);
+
+ if (peer->ftm.trigger_based)
+ FTM_SET_FLAG(TB);
+ else if (peer->ftm.non_trigger_based)
+ FTM_SET_FLAG(NON_TB);
+
+ if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+ peer->ftm.lmr_feedback)
+ FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->ftmr_max_retries = peer->ftm.ftmr_retries;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
+}
+
+static int
+iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v3 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
+ &target->bandwidth,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ /*
+ * Versions 3 and 4 has some common fields, so
+ * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
+
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v4 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *sta_id, __le32 *flags)
+{
+ if (vif->cfg.assoc) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
+ continue;
+
+ *sta_id = mvmvif->link[link_id]->ap_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return PTR_ERR_OR_ZERO(sta);
+ }
+
+ if (sta->mfp && (peer->ftm.trigger_based ||
+ peer->ftm.non_trigger_based))
+ FTM_SET_FLAG(PMF);
+ break;
+ }
+ rcu_read_unlock();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->ftm_unprotected) {
+ *sta_id = IWL_INVALID_STA;
+ *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
+ }
+#endif
+ } else {
+ *sta_id = IWL_INVALID_STA;
+ }
+
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+
+ /*
+ * TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+ return 0;
+}
+
+static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
+{
+ u32 status;
+ int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
+
+ if (!err && status) {
+ IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
+ status);
+ err = iwl_ftm_range_request_status_to_err(status);
+ }
+
+ return err;
+}
+
+static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v5 cmd_v5;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v5,
+ .len[0] = sizeof(cmd_v5),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
+
+ for (i = 0; i < cmd_v5.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v7 cmd_v7;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd_v7,
+ .len[0] = sizeof(cmd_v7),
+ };
+ u8 i;
+ int err;
+
+ /*
+ * Versions 7 and 8 has the same structure except from the responders
+ * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
+ */
+ iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
+
+ for (i = 0; i < cmd_v7.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v8 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v9 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static void iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm_ftm_iter_data *target = data;
+
+ if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
+ return;
+
+ WARN_ON(!sta->mfp);
+
+ target->tk = key->key;
+ *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
+ WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID);
+}
+
+static void
+iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk,
+ u8 *rx_pn, u8 *tx_pn, __le32 *flags)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ftm_unprotected)
+ return;
+#endif
+
+ if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB |
+ IWL_INITIATOR_AP_FLAGS_TB)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
+ if (memcmp(entry->addr, bssid, sizeof(entry->addr)))
+ continue;
+
+ *cipher = entry->cipher;
+
+ if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
+ memcpy(hltk, entry->hltk, sizeof(entry->hltk));
+ else
+ memset(hltk, 0, sizeof(entry->hltk));
+
+ if (vif->cfg.assoc &&
+ !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) {
+ struct iwl_mvm_ftm_iter_data target;
+
+ target.bssid = bssid;
+ target.cipher = cipher;
+ target.tk = NULL;
+ ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+
+ if (!WARN_ON(!target.tk))
+ memcpy(tk, target.tk, TK_11AZ_LEN);
+ } else {
+ memcpy(tk, entry->tk, sizeof(entry->tk));
+ }
+
+ memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn));
+ memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn));
+
+ FTM_SET_FLAG(SECURED);
+ return;
+ }
+}
+
+static int
+iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v7 *target)
+{
+ int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
+ if (err)
+ return err;
+
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
+ return err;
+}
+
+static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v11 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static void
+iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
+ struct iwl_tof_range_req_ap_entry_v8 *target)
+{
+ /* Only 2 STS are supported on Tx */
+ u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MVM_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
+ target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
+ target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
+ target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
+}
+
+static int
+iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v8 *target)
+{
+ u32 flags;
+ int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
+
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_set_ndp_params(mvm, target);
+
+ /*
+ * If secure LTF is turned off, replace the flag with PMF only
+ */
+ flags = le32_to_cpu(target->initiator_ap_flags);
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
+ flags |= IWL_INITIATOR_AP_FLAGS_PMF;
+ target->initiator_ap_flags = cpu_to_le32(flags);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v12 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v13 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
+ if (err)
+ return err;
+
+ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+ target->bss_color = peer->ftm.bss_color;
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ target->band =
+ iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+static int
+iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ u32 i2r_max_sts, flags;
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
+
+ i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MVM_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+ target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ target->band =
+ iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
+
+ /*
+ * TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+
+ /*
+ * If secure LTF is turned off, replace the flag with PMF only
+ */
+ flags = le32_to_cpu(target->initiator_ap_flags);
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
+ flags |= IWL_INITIATOR_AP_FLAGS_PMF;
+ target->initiator_ap_flags = cpu_to_le32(flags);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ int err;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->ftm_initiator.req)
+ return -EBUSY;
+
+ if (new_api) {
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ switch (cmd_ver) {
+ case 15:
+ /* Version 15 has the same struct as 14 */
+ case 14:
+ err = iwl_mvm_ftm_start_v14(mvm, vif, req);
+ break;
+ case 13:
+ err = iwl_mvm_ftm_start_v13(mvm, vif, req);
+ break;
+ case 12:
+ err = iwl_mvm_ftm_start_v12(mvm, vif, req);
+ break;
+ case 11:
+ err = iwl_mvm_ftm_start_v11(mvm, vif, req);
+ break;
+ case 9:
+ case 10:
+ err = iwl_mvm_ftm_start_v9(mvm, vif, req);
+ break;
+ case 8:
+ err = iwl_mvm_ftm_start_v8(mvm, vif, req);
+ break;
+ default:
+ err = iwl_mvm_ftm_start_v7(mvm, vif, req);
+ break;
+ }
+ } else {
+ err = iwl_mvm_ftm_start_v5(mvm, vif, req);
+ }
+
+ if (!err) {
+ mvm->ftm_initiator.req = req;
+ mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
+ }
+
+ return err;
+}
+
+void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_abort_cmd cmd = {
+ .request_id = req->cookie,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (req != mvm->ftm_initiator.req)
+ return;
+
+ iwl_mvm_ftm_reset(mvm);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
+ 0, sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "failed to abort FTM process\n");
+}
+
+static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
+ const u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < req->n_peers; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ if (ether_addr_equal_unaligned(peer->addr, addr))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
+{
+ u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
+ u32 curr_gp2, diff;
+ u64 now_from_boot_ns;
+
+ iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
+ &now_from_boot_ns, NULL);
+
+ if (curr_gp2 >= gp2_ts)
+ diff = curr_gp2 - gp2_ts;
+ else
+ diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
+
+ return now_from_boot_ns - (u64)diff * 1000;
+}
+
+static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_result *res)
+{
+ struct iwl_mvm_loc_entry *entry;
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
+ if (!ether_addr_equal_unaligned(res->addr, entry->addr))
+ continue;
+
+ if (entry->lci_len) {
+ res->ftm.lci_len = entry->lci_len;
+ res->ftm.lci = entry->buf;
+ }
+
+ if (entry->civic_len) {
+ res->ftm.civicloc_len = entry->civic_len;
+ res->ftm.civicloc = entry->buf + entry->lci_len;
+ }
+
+ /* we found the entry we needed */
+ break;
+ }
+}
+
+static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
+ u8 num_of_aps)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
+ IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
+ request_id, (u8)mvm->ftm_initiator.req->cookie);
+ return -EINVAL;
+ }
+
+ if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
+ IWL_ERR(mvm, "FTM range response invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_result *res)
+{
+ struct iwl_mvm_smooth_entry *resp = NULL, *iter;
+ s64 rtt_avg, rtt = res->ftm.rtt_avg;
+ u32 undershoot, overshoot;
+ u8 alpha;
+
+ if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
+ return;
+
+ WARN_ON(rtt < 0);
+
+ if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
+ IWL_DEBUG_INFO(mvm,
+ ": %pM: ignore failed measurement. Status=%u\n",
+ res->addr, res->status);
+ return;
+ }
+
+ list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
+ if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
+ resp = iter;
+ break;
+ }
+ }
+
+ if (!resp) {
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return;
+
+ memcpy(resp->addr, res->addr, ETH_ALEN);
+ list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
+
+ resp->rtt_avg = rtt;
+
+ IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
+ resp->addr, resp->rtt_avg);
+ goto update_time;
+ }
+
+ if (res->host_time - resp->host_time >
+ IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
+ resp->rtt_avg = rtt;
+
+ IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
+ resp->addr, resp->rtt_avg);
+ goto update_time;
+ }
+
+ /* Smooth the results based on the tracked RTT average */
+ undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
+ overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
+ alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
+
+ rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
+
+ IWL_DEBUG_INFO(mvm,
+ "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
+ resp->addr, resp->rtt_avg, rtt_avg, rtt);
+
+ /*
+ * update the responder's average RTT results regardless of
+ * the under/over shoot logic below
+ */
+ resp->rtt_avg = rtt_avg;
+
+ /* smooth the results */
+ if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
+ res->ftm.rtt_avg = rtt_avg;
+
+ IWL_DEBUG_INFO(mvm,
+ "undershoot: val=%lld\n",
+ (rtt_avg - rtt));
+ } else if (rtt_avg < rtt && (rtt - rtt_avg) >
+ overshoot) {
+ res->ftm.rtt_avg = rtt_avg;
+ IWL_DEBUG_INFO(mvm,
+ "overshoot: val=%lld\n",
+ (rtt - rtt_avg));
+ }
+
+update_time:
+ resp->host_time = res->host_time;
+}
+
+static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
+ struct cfg80211_pmsr_result *res)
+{
+ s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
+
+ IWL_DEBUG_INFO(mvm, "entry %d\n", index);
+ IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
+ IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
+ IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
+ IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
+ IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
+ IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
+ IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
+ IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
+ IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
+}
+
+static void
+iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
+ if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
+ continue;
+
+ memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
+ memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
+ return;
+ }
+}
+
+static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
+{
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
+ return 5;
+
+ /* Starting from version 8, the FW advertises the version */
+ if (mvm->cmd_ver.range_resp >= 8)
+ return mvm->cmd_ver.range_resp;
+ else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ return 7;
+
+ /* The first version of the new range request API */
+ return 6;
+}
+
+static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
+{
+ switch (ver) {
+ case 9:
+ case 8:
+ return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy);
+ case 7:
+ return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
+ case 6:
+ return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
+ case 5:
+ return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
+ default:
+ WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
+ return false;
+ }
+}
+
+void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy *fw_resp_v8 = (void *)pkt->data;
+ int i;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
+ u8 num_of_aps, last_in_batch;
+ u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->ftm_initiator.req) {
+ return;
+ }
+
+ if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
+ return;
+
+ if (new_api) {
+ if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
+ fw_resp_v8->num_of_aps))
+ return;
+
+ num_of_aps = fw_resp_v8->num_of_aps;
+ last_in_batch = fw_resp_v8->last_report;
+ } else {
+ if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
+ fw_resp_v5->num_of_aps))
+ return;
+
+ num_of_aps = fw_resp_v5->num_of_aps;
+ last_in_batch = fw_resp_v5->last_in_batch;
+ }
+
+ IWL_DEBUG_INFO(mvm, "Range response received\n");
+ IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
+ mvm->ftm_initiator.req->cookie, num_of_aps);
+
+ for (i = 0; i < num_of_aps && i < IWL_TOF_MAX_APS; i++) {
+ struct cfg80211_pmsr_result result = {};
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+ int peer_idx;
+
+ if (new_api) {
+ if (notif_ver >= 8) {
+ fw_ap = &fw_resp_v8->ap[i];
+ iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
+ } else if (notif_ver == 7) {
+ fw_ap = (void *)&fw_resp_v7->ap[i];
+ } else {
+ fw_ap = (void *)&fw_resp_v6->ap[i];
+ }
+
+ result.final = fw_ap->last_burst;
+ result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
+ result.ap_tsf_valid = 1;
+ } else {
+ /* the first part is the same for old and new APIs */
+ fw_ap = (void *)&fw_resp_v5->ap[i];
+ /*
+ * FIXME: the firmware needs to report this, we don't
+ * even know the number of bursts the responder picked
+ * (if we asked it to)
+ */
+ result.final = 0;
+ }
+
+ peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
+ fw_ap->bssid);
+ if (peer_idx < 0) {
+ IWL_WARN(mvm,
+ "Unknown address (%pM, target #%d) in FTM response\n",
+ fw_ap->bssid, i);
+ continue;
+ }
+
+ switch (fw_ap->measure_status) {
+ case IWL_TOF_ENTRY_SUCCESS:
+ result.status = NL80211_PMSR_STATUS_SUCCESS;
+ break;
+ case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
+ result.status = NL80211_PMSR_STATUS_TIMEOUT;
+ break;
+ case IWL_TOF_ENTRY_NO_RESPONSE:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
+ break;
+ case IWL_TOF_ENTRY_REQUEST_REJECTED:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
+ result.ftm.busy_retry_time = fw_ap->refusal_period;
+ break;
+ default:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
+ break;
+ }
+ memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
+ result.host_time = iwl_mvm_ftm_get_host_time(mvm,
+ fw_ap->timestamp);
+ result.type = NL80211_PMSR_TYPE_FTM;
+ result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
+ mvm->ftm_initiator.responses[peer_idx]++;
+ result.ftm.rssi_avg = fw_ap->rssi;
+ result.ftm.rssi_avg_valid = 1;
+ result.ftm.rssi_spread = fw_ap->rssi_spread;
+ result.ftm.rssi_spread_valid = 1;
+ result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
+ result.ftm.rtt_avg_valid = 1;
+ result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
+ result.ftm.rtt_variance_valid = 1;
+ result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
+ result.ftm.rtt_spread_valid = 1;
+
+ iwl_mvm_ftm_get_lci_civic(mvm, &result);
+
+ iwl_mvm_ftm_rtt_smoothing(mvm, &result);
+
+ cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req,
+ &result, GFP_KERNEL);
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
+ fw_ap->rttConfidence);
+
+ iwl_mvm_debug_range_resp(mvm, i, &result);
+ }
+
+ if (last_in_batch) {
+ cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
+ mvm->ftm_initiator.req,
+ GFP_KERNEL);
+ iwl_mvm_ftm_reset(mvm);
+ }
+}
+
+void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
+ size_t len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mvm_loc_entry *entry;
+ const u8 *ies, *lci, *civic, *msr_ie;
+ size_t ies_len, lci_len = 0, civic_len = 0;
+ size_t baselen = IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.ftm);
+ static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
+ static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
+
+ if (len <= baselen)
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ies = mgmt->u.action.u.ftm.variable;
+ ies_len = len - baselen;
+
+ msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
+ &rprt_type_lci, 1, 4);
+ if (msr_ie) {
+ lci = msr_ie + 2;
+ lci_len = msr_ie[1];
+ }
+
+ msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
+ &rprt_type_civic, 1, 4);
+ if (msr_ie) {
+ civic = msr_ie + 2;
+ civic_len = msr_ie[1];
+ }
+
+ entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
+ if (!entry)
+ return;
+
+ memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
+
+ entry->lci_len = lci_len;
+ if (lci_len)
+ memcpy(entry->buf, lci, lci_len);
+
+ entry->civic_len = civic_len;
+ if (civic_len)
+ memcpy(entry->buf + lci_len, civic, civic_len);
+
+ list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c b/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c
new file mode 100644
index 000000000000..83f6e508a094
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2024 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <linux/etherdevice.h>
+#include "mvm.h"
+#include "constants.h"
+
+struct iwl_mvm_pasn_sta {
+ struct list_head list;
+ struct iwl_mvm_int_sta int_sta;
+ u8 addr[ETH_ALEN];
+
+ /* must be last as it followed by buffer holding the key */
+ struct ieee80211_key_conf keyconf;
+};
+
+struct iwl_mvm_pasn_hltk_data {
+ u8 *addr;
+ u8 cipher;
+ u8 *hltk;
+};
+
+static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
+ u8 *bw, u8 *ctrl_ch_position)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *bw = IWL_TOF_BW_20_LEGACY;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *bw = IWL_TOF_BW_20_HT;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *bw = IWL_TOF_BW_40;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *bw = IWL_TOF_BW_80;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
+ u8 *format_bw, u8 *ctrl_ch_position,
+ u8 cmd_ver)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ if (cmd_ver >= 9) {
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+ *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ }
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm,
+ struct iwl_tof_responder_config_cmd *cmd)
+{
+ /* Up to 2 R2I STS are allowed on the responder */
+ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ?
+ IWL_MVM_FTM_R2I_MAX_STS : 1;
+
+ cmd->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (r2i_max_sts << IWL_RESPONDER_STS_POS) |
+ (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS);
+ cmd->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (IWL_MVM_FTM_I2R_MAX_STS << IWL_RESPONDER_STS_POS) |
+ (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS);
+ cmd->cmd_valid_fields |=
+ cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS);
+}
+
+static int
+iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_bss_conf *link_conf)
+{
+ u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ /*
+ * The command structure is the same for versions 6, 7 and 8 (only the
+ * field interpretation is different), so the same struct can be use
+ * for all cases.
+ */
+ struct iwl_tof_responder_config_cmd cmd = {
+ .channel_num = chandef->chan->hw_value,
+ .cmd_valid_fields =
+ cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
+ IWL_TOF_RESPONDER_CMD_VALID_BSSID |
+ IWL_TOF_RESPONDER_CMD_VALID_STA_ID),
+ .sta_id = mvmvif->link[link_conf->link_id]->bcast_sta.sta_id,
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6);
+ int err;
+ int cmd_size;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver >= 10) {
+ cmd.band =
+ iwl_mvm_phy_band_from_nl80211(chandef->chan->band);
+ }
+
+ /* Use a default of bss_color=1 for now */
+ if (cmd_ver >= 9) {
+ cmd.cmd_valid_fields |=
+ cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR |
+ IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR);
+ cmd.bss_color = 1;
+ cmd.min_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ cmd.max_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v9);
+ } else {
+ /* All versions up to version 8 have the same size */
+ cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v8);
+ }
+
+ if (cmd_ver >= 8)
+ iwl_mvm_ftm_responder_set_ndp(mvm, (void *)&cmd);
+
+ if (cmd_ver >= 7)
+ err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position,
+ cmd_ver);
+ else
+ err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw,
+ &cmd.ctrl_ch_position);
+
+ if (err) {
+ IWL_ERR(mvm, "Failed to set responder bandwidth\n");
+ return err;
+ }
+
+ memcpy(cmd.bssid, vif->addr, ETH_ALEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
+}
+
+static int
+iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
+{
+ struct iwl_tof_responder_dyn_config_cmd_v2 cmd = {
+ .lci_len = cpu_to_le32(params->lci_len + 2),
+ .civic_len = cpu_to_le32(params->civicloc_len + 2),
+ };
+ u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0};
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ .data[1] = &data,
+ /* .len[1] set later */
+ /* may not be able to DMA from stack */
+ .dataflags[1] = IWL_HCMD_DFL_DUP,
+ };
+ u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4);
+ u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4);
+ u8 *pos = data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) {
+ IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n",
+ params->lci_len, params->civicloc_len);
+ return -ENOBUFS;
+ }
+
+ pos[0] = WLAN_EID_MEASURE_REPORT;
+ pos[1] = params->lci_len;
+ memcpy(pos + 2, params->lci, params->lci_len);
+
+ pos += aligned_lci_len;
+ pos[0] = WLAN_EID_MEASURE_REPORT;
+ pos[1] = params->civicloc_len;
+ memcpy(pos + 2, params->civicloc, params->civicloc_len);
+
+ hcmd.len[1] = aligned_lci_len + aligned_civicloc_len;
+
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
+
+static int
+iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params,
+ struct iwl_mvm_pasn_hltk_data *hltk_data)
+{
+ struct iwl_tof_responder_dyn_config_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ /* may not be able to DMA from stack */
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.valid_flags = 0;
+
+ if (params) {
+ if (params->lci_len + 2 > sizeof(cmd.lci_buf) ||
+ params->civicloc_len + 2 > sizeof(cmd.civic_buf)) {
+ IWL_ERR(mvm,
+ "LCI/civic data too big (lci=%zd, civic=%zd)\n",
+ params->lci_len, params->civicloc_len);
+ return -ENOBUFS;
+ }
+
+ cmd.lci_buf[0] = WLAN_EID_MEASURE_REPORT;
+ cmd.lci_buf[1] = params->lci_len;
+ memcpy(cmd.lci_buf + 2, params->lci, params->lci_len);
+ cmd.lci_len = params->lci_len + 2;
+
+ cmd.civic_buf[0] = WLAN_EID_MEASURE_REPORT;
+ cmd.civic_buf[1] = params->civicloc_len;
+ memcpy(cmd.civic_buf + 2, params->civicloc,
+ params->civicloc_len);
+ cmd.civic_len = params->civicloc_len + 2;
+
+ cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_LCI |
+ IWL_RESPONDER_DYN_CFG_VALID_CIVIC;
+ }
+
+ if (hltk_data) {
+ if (hltk_data->cipher > IWL_LOCATION_CIPHER_GCMP_256) {
+ IWL_ERR(mvm, "invalid cipher: %u\n",
+ hltk_data->cipher);
+ return -EINVAL;
+ }
+
+ cmd.cipher = hltk_data->cipher;
+ memcpy(cmd.addr, hltk_data->addr, sizeof(cmd.addr));
+ memcpy(cmd.hltk_buf, hltk_data->hltk, sizeof(cmd.hltk_buf));
+ cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_PASN_STA;
+ }
+
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
+
+static int
+iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
+{
+ int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
+ 2);
+
+ switch (cmd_ver) {
+ case 2:
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v2(mvm, vif,
+ params);
+ break;
+ case 3:
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif,
+ params, NULL);
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n",
+ cmd_ver);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_pasn_sta *sta)
+{
+ list_del(&sta->list);
+
+ if (sta->keyconf.keylen)
+ iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id),
+ &sta->keyconf);
+
+ if (iwl_mvm_has_mld_api(mvm->fw))
+ iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
+ else
+ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
+
+ iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
+ kfree(sta);
+}
+
+int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_ftm_responder_params *params;
+ struct ieee80211_chanctx_conf ctx, *pctx;
+ u16 *phy_ctxt_id;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ int ret;
+
+ params = bss_conf->ftmr_params;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!bss_conf->ftm_responder))
+ return -EINVAL;
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+ !mvmvif->ap_ibss_active) {
+ IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+ return -EIO;
+ }
+
+ rcu_read_lock();
+ pctx = rcu_dereference(bss_conf->chanctx_conf);
+ /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care
+ * about changes in the ctx after releasing the lock because the driver
+ * is still protected by the mutex. */
+ ctx = *pctx;
+ phy_ctxt_id = (u16 *)pctx->drv_priv;
+ rcu_read_unlock();
+
+ phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap,
+ ctx.rx_chains_static,
+ ctx.rx_chains_dynamic);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def, bss_conf);
+ if (ret)
+ return ret;
+
+ if (params)
+ ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params);
+
+ return ret;
+}
+
+void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_pasn_sta *sta, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list)
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+}
+
+void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ if (!bss_conf->ftm_responder)
+ return;
+
+ iwl_mvm_ftm_responder_clear(mvm, vif);
+ iwl_mvm_ftm_start_responder(mvm, vif, bss_conf);
+}
+
+void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_ftm_responder_stats *resp = (void *)pkt->data;
+ struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats;
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (resp->success_ftm == resp->ftm_per_burst)
+ stats->success_num++;
+ else if (resp->success_ftm >= 2)
+ stats->partial_num++;
+ else
+ stats->failed_num++;
+
+ if ((flags & FTM_RESP_STAT_ASAP_REQ) &&
+ (flags & FTM_RESP_STAT_ASAP_RESP))
+ stats->asap_num++;
+
+ if (flags & FTM_RESP_STAT_NON_ASAP_RESP)
+ stats->non_asap_num++;
+
+ stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC;
+
+ if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN)
+ stats->unknown_triggers_num++;
+
+ if (flags & FTM_RESP_STAT_DUP)
+ stats->reschedule_requests_num++;
+
+ if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN)
+ stats->out_of_window_triggers_num++;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/fw-api.h b/sys/contrib/dev/iwlwifi/mvm/fw-api.h
new file mode 100644
index 000000000000..083f86fa5017
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/fw-api.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __fw_api_h__
+#define __fw_api_h__
+
+#include "fw/api/tdls.h"
+#include "fw/api/mac-cfg.h"
+#include "fw/api/offload.h"
+#include "fw/api/context.h"
+#include "fw/api/time-event.h"
+#include "fw/api/datapath.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/api/system.h"
+#include "fw/api/alive.h"
+#include "fw/api/binding.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/coex.h"
+#include "fw/api/commands.h"
+#include "fw/api/d3.h"
+#include "fw/api/filter.h"
+#include "fw/api/led.h"
+#include "fw/api/mac.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/phy-ctxt.h"
+#include "fw/api/power.h"
+#include "fw/api/rs.h"
+#include "fw/api/rx.h"
+#include "fw/api/scan.h"
+#include "fw/api/sf.h"
+#include "fw/api/sta.h"
+#include "fw/api/stats.h"
+#include "fw/api/location.h"
+#include "fw/api/tx.h"
+#include "fw/api/rfi.h"
+
+#endif /* __fw_api_h__ */
diff --git a/sys/contrib/dev/iwlwifi/mvm/fw.c b/sys/contrib/dev/iwlwifi/mvm/fw.c
new file mode 100644
index 000000000000..d931c6eaf12f
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/fw.c
@@ -0,0 +1,1646 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+#include <linux/dmi.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/img.h"
+#include "iwl-debug.h"
+#include "iwl-prph.h"
+#include "fw/acpi.h"
+#include "fw/pnvm.h"
+#include "fw/uefi.h"
+#include "fw/regulatory.h"
+
+#include "mvm.h"
+#include "fw/dbg.h"
+#include "iwl-phy-db.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+#include "time-sync.h"
+
+#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
+#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
+
+struct iwl_mvm_alive_data {
+ __le32 sku_id[3];
+ bool valid;
+};
+
+static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
+ return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
+ sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+{
+ int i;
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
+ };
+
+ if (mvm->trans->info.num_rxqs == 1)
+ return 0;
+
+ /* Do not direct RSS traffic to Q 0 which is our fallback queue */
+ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+ cmd.indirection_table[i] =
+ 1 + (i % (mvm->trans->info.num_rxqs - 1));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+ return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+}
+
+static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_dqa_enable_cmd dqa_cmd = {
+ .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
+ else
+ IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
+
+ return ret;
+}
+
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+
+ if (mfu_dump_notif->index_num == 0)
+ IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+ le32_to_cpu(mfu_dump_notif->assert_id));
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_alive_data *alive_data = data;
+ struct iwl_umac_alive *umac;
+ struct iwl_lmac_alive *lmac1;
+ struct iwl_lmac_alive *lmac2 = NULL;
+ u16 status;
+ u32 lmac_error_event_table, umac_error_table;
+ u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ UCODE_ALIVE_NTFY, 0);
+ u32 i;
+
+
+ if (version >= 6) {
+ struct iwl_alive_ntf_v6 *palive;
+
+ if (pkt_len < sizeof(*palive))
+ return false;
+
+ palive = (void *)pkt->data;
+
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+
+ BUILD_BUG_ON(sizeof(palive->sku_id.data) !=
+ sizeof(alive_data->sku_id));
+ memcpy(alive_data->sku_id, palive->sku_id.data,
+ sizeof(palive->sku_id.data));
+
+ IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
+ le32_to_cpu(alive_data->sku_id[0]),
+ le32_to_cpu(alive_data->sku_id[1]),
+ le32_to_cpu(alive_data->sku_id[2]));
+
+ mvm->trans->dbg.imr_data.imr_enable =
+ le32_to_cpu(palive->imr.enabled);
+ mvm->trans->dbg.imr_data.imr_size =
+ le32_to_cpu(palive->imr.size);
+ mvm->trans->dbg.imr_data.imr2sram_remainbyte =
+ mvm->trans->dbg.imr_data.imr_size;
+ mvm->trans->dbg.imr_data.imr_base_addr =
+ palive->imr.base_addr;
+ mvm->trans->dbg.imr_data.imr_curr_addr =
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
+ IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
+ mvm->trans->dbg.imr_data.imr_enable,
+ mvm->trans->dbg.imr_data.imr_size,
+ le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
+
+ if (!mvm->trans->dbg.imr_data.imr_enable) {
+ for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
+ struct iwl_ucode_tlv *reg_tlv;
+ struct iwl_fw_ini_region_tlv *reg;
+
+ reg_tlv = mvm->trans->dbg.active_regions[i];
+ if (!reg_tlv)
+ continue;
+
+ reg = (void *)reg_tlv->data;
+ /*
+ * We have only one DRAM IMR region, so we
+ * can break as soon as we find the first
+ * one.
+ */
+ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+ mvm->trans->dbg.unsupported_region_msk |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ if (version >= 8) {
+ const struct iwl_alive_ntf *palive_v8 =
+ (void *)pkt->data;
+
+ if (pkt_len < sizeof(*palive_v8))
+ return false;
+
+ IWL_DEBUG_FW(mvm, "platform id: 0x%llx\n",
+ palive_v8->platform_id);
+ }
+ } else if (iwl_rx_packet_payload_len(pkt) ==
+ sizeof(struct iwl_alive_ntf_v3)) {
+ struct iwl_alive_ntf_v3 *palive3;
+
+ if (pkt_len < sizeof(*palive3))
+ return false;
+
+ palive3 = (void *)pkt->data;
+ umac = &palive3->umac_data;
+ lmac1 = &palive3->lmac_data;
+ status = le16_to_cpu(palive3->status);
+ } else {
+ WARN(1, "unsupported alive notification (size %d)\n",
+ iwl_rx_packet_payload_len(pkt));
+ /* get timeout later */
+ return false;
+ }
+
+ lmac_error_event_table =
+ le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
+ iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
+
+ if (lmac2)
+ mvm->trans->dbg.lmac_error_event_table[1] =
+ le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
+
+ umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
+ ~FW_ADDR_CACHE_CONTROL;
+
+ if (umac_error_table) {
+ if (umac_error_table >=
+ mvm->trans->mac_cfg->base->min_umac_error_event_table) {
+ iwl_fw_umac_set_alive_err_table(mvm->trans,
+ umac_error_table);
+ } else {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ umac_error_table,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+ "Init" : "RT");
+ }
+ }
+
+ alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+
+ IWL_DEBUG_FW(mvm,
+ "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ status, lmac1->ver_type, lmac1->ver_subtype);
+
+ if (lmac2)
+ IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(umac->umac_major),
+ le32_to_cpu(umac->umac_minor));
+
+ iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
+
+ return true;
+}
+
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+
+ return true;
+}
+
+static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_phy_db *phy_db = data;
+
+ if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+ WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+ return true;
+ }
+
+ WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
+
+ return false;
+}
+
+static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
+{
+#define IWL_FW_PRINT_REG_INFO(reg_name) \
+ IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
+
+ struct iwl_trans *trans = mvm->trans;
+ enum iwl_device_family device_family = trans->mac_cfg->device_family;
+
+ if (device_family < IWL_DEVICE_FAMILY_8000)
+ return;
+
+ if (device_family <= IWL_DEVICE_FAMILY_9000)
+ IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION);
+ else
+ IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
+
+ IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
+
+ /* print OPT info */
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
+}
+
+static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+ enum iwl_ucode_type ucode_type)
+{
+ struct iwl_notification_wait alive_wait;
+ struct iwl_mvm_alive_data alive_data = {};
+ int ret;
+ enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
+ static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
+ bool run_in_rfkill =
+ ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
+ u8 count;
+ struct iwl_pc_data *pc_data;
+
+ if (ucode_type == IWL_UCODE_REGULAR &&
+ iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
+ !(fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
+ ucode_type = IWL_UCODE_REGULAR_USNIFFER;
+ iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+ iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
+ alive_cmd, ARRAY_SIZE(alive_cmd),
+ iwl_alive_fn, &alive_data);
+
+ /*
+ * We want to load the INIT firmware even in RFKILL
+ * For the unified firmware case, the ucode_type is not
+ * INIT, but we still need to run it.
+ */
+ ret = iwl_trans_start_fw(mvm->trans, mvm->fw, ucode_type,
+ run_in_rfkill);
+ if (ret) {
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ iwl_remove_notification(&mvm->notif_wait, &alive_wait);
+ return ret;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the ALIVE notification here.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+
+ if (mvm->trans->mac_cfg->device_family ==
+ IWL_DEVICE_FAMILY_AX210) {
+ /* print these registers regardless of alive fail/success */
+ IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION));
+ IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
+ iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION));
+ IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n",
+ iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG));
+ IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
+ iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9));
+ }
+
+ if (ret) {
+ struct iwl_trans *trans = mvm->trans;
+
+ /* SecBoot info */
+ if (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_22000) {
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_umac_prph(trans,
+ UMAG_SB_CPU_2_STATUS));
+ } else if (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(trans, SB_CPU_1_STATUS),
+ iwl_read_prph(trans, SB_CPU_2_STATUS));
+ }
+
+ iwl_mvm_print_pd_notification(mvm);
+
+ /* LMAC/UMAC PC info */
+ if (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_22000) {
+ pc_data = trans->dbg.pc_data;
+ for (count = 0; count < trans->dbg.num_pc;
+ count++, pc_data++)
+ IWL_ERR(mvm, "%s: 0x%x\n",
+ pc_data->pc_name,
+ pc_data->pc_address);
+ } else if (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_9000) {
+ IWL_ERR(mvm, "UMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_UMAC_CURRENT_PC));
+ IWL_ERR(mvm, "LMAC PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC1_CURRENT_PC));
+ if (iwl_mvm_is_cdb_supported(mvm))
+ IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
+ iwl_read_umac_prph(trans,
+ UREG_LMAC2_CURRENT_PC));
+ }
+
+ if (ret == -ETIMEDOUT && !mvm->fw_product_reset)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return ret;
+ }
+
+ if (!alive_data.valid) {
+ IWL_ERR(mvm, "Loaded ucode is not valid!\n");
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return -EIO;
+ }
+
+ /* if reached this point, Alive notification was received */
+ iwl_mei_alive_notif(true);
+
+ iwl_trans_fw_alive(mvm->trans);
+
+ ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
+ mvm->fw, alive_data.sku_id);
+ if (ret) {
+ IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return ret;
+ }
+
+ /*
+ * Note: all the queues are enabled as part of the interface
+ * initialization, but in firmware restart scenarios they
+ * could be stopped, so wake them up. In firmware restart,
+ * mac80211 will have the queues stopped as well until the
+ * reconfiguration completes. During normal startup, they
+ * will be empty.
+ */
+
+ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
+ /*
+ * Set a 'fake' TID for the command queue, since we use the
+ * hweight() of the tid_bitmap as a refcount now. Not that
+ * we ever even consider the command queue as one we might
+ * want to reuse, but be safe nevertheless.
+ */
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
+ BIT(IWL_MAX_TID_COUNT + 2);
+
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_fw_set_dbg_rec_on(&mvm->fwrt);
+#endif
+
+ /*
+ * For pre-MLD API (MLD API doesn't use the timestamps):
+ * All the BSSes in the BSS table include the GP2 in the system
+ * at the beacon Rx time, this is of course no longer relevant
+ * since we are resetting the firmware.
+ * Purge all the BSS table.
+ */
+ if (!mvm->mld_api_is_used)
+ cfg80211_bss_flush(mvm->hw->wiphy);
+
+ return 0;
+}
+
+static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+ struct iwl_phy_specific_cfg *phy_filters)
+{
+#ifdef CONFIG_ACPI
+ *phy_filters = mvm->fwrt.phy_filters;
+#endif /* CONFIG_ACPI */
+}
+
+static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
+{
+ u8 cmd_ver;
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ MCC_ALLOWED_AP_TYPE_CMD),
+ .flags = 0,
+ .data[0] = &mvm->fwrt.uats_table,
+ .len[0] = sizeof(mvm->fwrt.uats_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
+ return;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver != 1) {
+ IWL_DEBUG_RADIO(mvm,
+ "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n",
+ cmd_ver);
+ return;
+ }
+
+ iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
+
+ if (!mvm->fwrt.uats_valid)
+ return;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+ ret);
+ else
+ IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n");
+}
+
+static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
+{
+ u8 cmd_ver;
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ SAR_OFFSET_MAPPING_TABLE_CMD),
+ .flags = 0,
+ .data[0] = &mvm->fwrt.sgom_table,
+ .len[0] = sizeof(mvm->fwrt.sgom_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!mvm->fwrt.sgom_enabled) {
+ IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
+ return 0;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver != 2) {
+ IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
+ cmd_ver);
+ return 0;
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
+
+ return ret;
+}
+
+static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = PHY_CONFIGURATION_CMD;
+ struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
+ enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+ u8 cmd_ver;
+ size_t cmd_size;
+
+ if (iwl_mvm_has_unified_ucode(mvm) &&
+ !mvm->trans->cfg->tx_with_siso_diversity)
+ return 0;
+
+ if (mvm->trans->cfg->tx_with_siso_diversity) {
+ /*
+ * TODO: currently we don't set the antenna but letting the NIC
+ * to decide which antenna to use. This should come from BIOS.
+ */
+ phy_cfg_cmd.phy_cfg =
+ cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
+ }
+
+ /* Set parameters */
+ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
+
+ /* set flags extra PHY configuration flags from the device's cfg */
+ phy_cfg_cmd.phy_cfg |=
+ cpu_to_le32(mvm->trans->mac_cfg->extra_phy_cfg_flags);
+
+ phy_cfg_cmd.calib_control.event_trigger =
+ mvm->fw->default_calib[ucode_type].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ mvm->fw->default_calib[ucode_type].flow_trigger;
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver >= 3)
+ iwl_mvm_phy_filter_init(mvm, &phy_cfg_cmd.phy_specific_cfg);
+
+ IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
+ phy_cfg_cmd.phy_cfg);
+ cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
+ sizeof(struct iwl_phy_cfg_cmd_v1);
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
+}
+
+static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_nvm_access_complete_cmd nvm_complete = {};
+ struct iwl_init_extended_cfg_cmd init_cfg = {
+ .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
+ };
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ u32 sb_cfg;
+ int ret;
+
+ if (mvm->trans->cfg->tx_with_siso_diversity)
+ init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm->rfkill_safe_init_done = false;
+
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
+ /* if needed, we'll reset this on our way out later */
+ mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
+ if (mvm->fw_product_reset && iwl_mei_pldr_req())
+ return -EBUSY;
+ }
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_init_complete,
+ NULL);
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+
+ /* if we needed reset then fail here, but notify and remove */
+ if (mvm->fw_product_reset) {
+ iwl_mei_alive_notif(false);
+ iwl_trans_pcie_reset(mvm->trans,
+ IWL_RESET_MODE_RESCAN);
+ }
+
+ goto error;
+ }
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
+ NULL);
+
+ /* Send init config command to mark that we are sending NVM access
+ * commands
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
+ INIT_EXTENDED_CFG_CMD),
+ CMD_SEND_IN_RFKILL,
+ sizeof(init_cfg), &init_cfg);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run init config command: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* Load NVM to NIC if needed */
+ if (mvm->nvm_file_name) {
+ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
+ if (ret)
+ goto error;
+ ret = iwl_mvm_load_nvm_to_nic(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ NVM_ACCESS_COMPLETE),
+ CMD_SEND_IN_RFKILL,
+ sizeof(nvm_complete), &nvm_complete);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+ ret);
+ goto error;
+ }
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run PHY configuration: %d\n",
+ ret);
+ goto error;
+ }
+
+ /* We wait for the INIT complete notification */
+ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
+ MVM_UCODE_ALIVE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (!mvm->nvm_data) {
+ mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
+ mvm->set_tx_ant, mvm->set_rx_ant);
+ if (IS_ERR(mvm->nvm_data)) {
+ ret = PTR_ERR(mvm->nvm_data);
+ mvm->nvm_data = NULL;
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ mvm->rfkill_safe_init_done = true;
+
+ return 0;
+
+error:
+ iwl_remove_notification(&mvm->notif_wait, &init_wait);
+ return ret;
+}
+
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
+{
+ struct iwl_notification_wait calib_wait;
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ CALIB_RES_NOTIF_PHY_DB
+ };
+ int ret;
+
+ if (iwl_mvm_has_unified_ucode(mvm))
+ return iwl_run_unified_mvm_ucode(mvm);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm->rfkill_safe_init_done = false;
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &calib_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ iwl_wait_phy_db_entry,
+ mvm->phy_db);
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
+ /* Will also start the device */
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
+ goto remove_notif;
+ }
+
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+ ret = iwl_mvm_send_bt_init_conf(mvm);
+ if (ret)
+ goto remove_notif;
+ }
+
+ /* Read the NVM only at driver load time, no need to do this twice */
+ if (!mvm->nvm_data) {
+ ret = iwl_nvm_init(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+ goto remove_notif;
+ }
+ }
+
+ /* In case we read the NVM from external file, load it to the NIC */
+ if (mvm->nvm_file_name) {
+ ret = iwl_mvm_load_nvm_to_nic(mvm);
+ if (ret)
+ goto remove_notif;
+ }
+
+ WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
+ "Too old NVM version (0x%0x, required = 0x%0x)",
+ mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
+
+ /*
+ * abort after reading the nvm in case RF Kill is on, we will complete
+ * the init seq later when RF kill will switch to off
+ */
+ if (iwl_mvm_is_radio_hw_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm,
+ "jump over all phy activities due to RF kill\n");
+ goto remove_notif;
+ }
+
+ mvm->rfkill_safe_init_done = true;
+
+ /* Send TX valid antennas before triggering calibrations */
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+ if (ret)
+ goto remove_notif;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+ ret);
+ goto remove_notif;
+ }
+
+ /*
+ * Some things may run in the background now, but we
+ * just wait for the calibration complete notification.
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+ MVM_UCODE_CALIB_TIMEOUT);
+ if (!ret)
+ goto out;
+
+ if (iwl_mvm_is_radio_hw_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+ ret = 0;
+ } else {
+ IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+ ret);
+ }
+
+ goto out;
+
+remove_notif:
+ iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+out:
+ mvm->rfkill_safe_init_done = false;
+ if (!mvm->nvm_data) {
+ /* we want to debug INIT and we have no NVM - fake */
+ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
+ sizeof(struct ieee80211_channel) +
+ sizeof(struct ieee80211_rate),
+ GFP_KERNEL);
+ if (!mvm->nvm_data)
+ return -ENOMEM;
+ mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
+ mvm->nvm_data->bands[0].n_channels = 1;
+ mvm->nvm_data->bands[0].n_bitrates = 1;
+ mvm->nvm_data->bands[0].bitrates =
+ (void *)(mvm->nvm_data->channels + 1);
+ mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
+{
+ struct iwl_ltr_config_cmd cmd = {
+ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!mvm->trans->ltr_enabled)
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+ sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
+ struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ struct iwl_dev_tx_power_cmd cmd_v9_v10 = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ __le16 *per_chain;
+ int ret;
+ u16 len = 0;
+ u32 n_subbands;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+ void *cmd_data = &cmd;
+
+ if (cmd_ver == 10) {
+ len = sizeof(cmd_v9_v10.v10);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = &cmd_v9_v10.v10.per_chain[0][0][0];
+ cmd_v9_v10.v10.flags =
+ cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 9) {
+ len = sizeof(cmd_v9_v10.v9);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = &cmd_v9_v10.v9.per_chain[0][0];
+ } else if (cmd_ver == 8) {
+ len = sizeof(cmd.v8);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = cmd.v8.per_chain[0][0];
+ cmd.v8.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ } else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
+ len = sizeof(cmd.v5);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = cmd.v5.per_chain[0][0];
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
+ len = sizeof(cmd.v4);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = cmd.v4.per_chain[0][0];
+ } else {
+ len = sizeof(cmd.v3);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = cmd.v3.per_chain[0][0];
+ }
+
+ /* all structs have the same common part, add its length */
+ len += sizeof(cmd.common);
+
+ if (cmd_ver < 9)
+ len += sizeof(cmd.per_band);
+ else
+ cmd_data = &cmd_v9_v10;
+
+ ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
+
+ /* return on error or if the profile is disabled (positive number) */
+ if (ret)
+ return ret;
+
+ iwl_mei_set_power_limit(per_chain);
+
+ IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
+}
+
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+{
+ union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
+ struct iwl_geo_tx_power_profiles_resp *resp;
+ u16 len;
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = { &geo_tx_cmd },
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ /* the ops field is at the same spot for all versions, so set in v1 */
+ geo_tx_cmd.v1.ops =
+ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
+
+ if (cmd_ver == 5)
+ len = sizeof(geo_tx_cmd.v5);
+ else if (cmd_ver == 4)
+ len = sizeof(geo_tx_cmd.v4);
+ else if (cmd_ver == 3)
+ len = sizeof(geo_tx_cmd.v3);
+ else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER))
+ len = sizeof(geo_tx_cmd.v2);
+ else
+ len = sizeof(geo_tx_cmd.v1);
+
+ if (!iwl_sar_geo_support(&mvm->fwrt))
+ return -EOPNOTSUPP;
+
+ cmd.len[0] = len;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
+ return ret;
+ }
+
+ resp = (void *)cmd.resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+
+ if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
+ ret = -EIO;
+
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
+ union iwl_geo_tx_power_profiles_cmd cmd;
+ u16 len;
+ u32 n_bands;
+ u32 n_profiles;
+ __le32 sk = cpu_to_le32(0);
+ int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
+
+ /* the ops field is at the same spot for all versions, so set in v1 */
+ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
+ if (cmd_ver == 5) {
+ len = sizeof(cmd.v5);
+ n_bands = ARRAY_SIZE(cmd.v5.table[0]);
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v5.table_revision = sk;
+ } else if (cmd_ver == 4) {
+ len = sizeof(cmd.v4);
+ n_bands = ARRAY_SIZE(cmd.v4.table[0]);
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v4.table_revision = sk;
+ } else if (cmd_ver == 3) {
+ len = sizeof(cmd.v3);
+ n_bands = ARRAY_SIZE(cmd.v3.table[0]);
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v3.table_revision = sk;
+ } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(cmd.v2);
+ n_bands = ARRAY_SIZE(cmd.v2.table[0]);
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v2.table_revision = sk;
+ } else {
+ len = sizeof(cmd.v1);
+ n_bands = ARRAY_SIZE(cmd.v1.table[0]);
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ }
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
+ /* the table is at the same position for all versions, so set use v1 */
+ ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
+
+ /*
+ * It is a valid scenario to not support SAR, or miss wgds table,
+ * but in that case there is no need to send the command.
+ */
+ if (ret)
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
+}
+
+int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+{
+ union iwl_ppag_table_cmd cmd;
+ int ret, cmd_size;
+
+ ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ /* Not supporting PPAG table is a valid scenario */
+ if (ret < 0)
+ return 0;
+
+ IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD),
+ 0, cmd_size, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
+ ret);
+
+ return ret;
+}
+
+static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
+{
+ /* no need to read the table, done in INIT stage */
+ if (!(iwl_is_ppag_approved(&mvm->fwrt)))
+ return 0;
+
+ return iwl_mvm_ppag_send_cmd(mvm);
+}
+
+static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
+ int fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ struct iwl_tas_selection_data selection_data = {};
+ struct iwl_tas_config_cmd_v2_v4 cmd_v2_v4 = {};
+ struct iwl_tas_config_cmd cmd_v5 = {};
+ struct iwl_tas_data data = {};
+ void *cmd_data = &cmd_v2_v4;
+ int cmd_size;
+ int ret;
+
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v2_v4.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v5.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+ IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
+ return;
+ }
+
+ ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "TAS table invalid or unavailable. (%d)\n",
+ ret);
+ return;
+ }
+
+ if (ret == 0 && fw_ver < 5)
+ return;
+
+ if (!iwl_is_tas_approved()) {
+ IWL_DEBUG_RADIO(mvm,
+ "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
+ IWL_DEBUG_RADIO(mvm,
+ "Unable to add US/Canada to TAS block list, disabling TAS\n");
+ return;
+ }
+ } else {
+ IWL_DEBUG_RADIO(mvm,
+ "System vendor '%s' is in the approved list.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ }
+
+ if (fw_ver < 5) {
+ selection_data = iwl_parse_tas_selection(data.tas_selection,
+ data.table_revision);
+ cmd_v2_v4.common.block_list_size =
+ cpu_to_le32(data.block_list_size);
+ for (u8 i = 0; i < data.block_list_size; i++)
+ cmd_v2_v4.common.block_list_array[i] =
+ cpu_to_le32(data.block_list_array[i]);
+ }
+
+ if (fw_ver == 5) {
+ cmd_size = sizeof(cmd_v5);
+ cmd_data = &cmd_v5;
+ cmd_v5.block_list_size = cpu_to_le16(data.block_list_size);
+ for (u16 i = 0; i < data.block_list_size; i++)
+ cmd_v5.block_list_array[i] =
+ cpu_to_le16(data.block_list_array[i]);
+ cmd_v5.tas_config_info.table_source = data.table_source;
+ cmd_v5.tas_config_info.table_revision = data.table_revision;
+ cmd_v5.tas_config_info.value = cpu_to_le32(data.tas_selection);
+ } else if (fw_ver == 4) {
+ cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v4);
+ cmd_v2_v4.v4.override_tas_iec = selection_data.override_tas_iec;
+ cmd_v2_v4.v4.enable_tas_iec = selection_data.enable_tas_iec;
+ cmd_v2_v4.v4.usa_tas_uhb_allowed =
+ selection_data.usa_tas_uhb_allowed;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT) &&
+ selection_data.canada_tas_uhb_allowed)
+ cmd_v2_v4.v4.uhb_allowed_flags = TAS_UHB_ALLOWED_CANADA;
+ } else if (fw_ver == 3) {
+ cmd_size = sizeof(cmd_v2_v4.common) + sizeof(cmd_v2_v4.v3);
+ cmd_v2_v4.v3.override_tas_iec =
+ cpu_to_le16(selection_data.override_tas_iec);
+ cmd_v2_v4.v3.enable_tas_iec =
+ cpu_to_le16(selection_data.enable_tas_iec);
+ } else if (fw_ver == 2) {
+ cmd_size = sizeof(cmd_v2_v4.common);
+ } else {
+ return;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, cmd_data);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
+}
+
+static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
+{
+ struct iwl_lari_config_change_cmd cmd;
+ size_t cmd_size;
+ int ret;
+
+ ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
+ if (!ret) {
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 0, cmd_size, &cmd);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Failed to send LARI_CONFIG_CHANGE (%d)\n",
+ ret);
+ }
+}
+
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ iwl_acpi_get_guid_lock_status(&mvm->fwrt);
+
+ /* read PPAG table */
+ ret = iwl_bios_get_ppag_table(&mvm->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ }
+
+ /* read SAR tables */
+ ret = iwl_bios_get_wrds_table(&mvm->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /*
+ * If not available, don't fail and don't bother with EWRD and
+ * WGDS */
+
+ if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
+ /*
+ * If basic SAR is not available, we check for WGDS,
+ * which should *not* be available either. If it is
+ * available, issue an error, because we can't use SAR
+ * Geo without basic SAR.
+ */
+ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
+ }
+
+ } else {
+ ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
+ /* if EWRD is not available, we can still use
+ * WRDS, so don't fail */
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ /* read geo SAR table */
+ if (iwl_sar_geo_support(&mvm->fwrt)) {
+ ret = iwl_bios_get_wgds_table(&mvm->fwrt);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ }
+ }
+
+ iwl_acpi_get_phy_filters(&mvm->fwrt);
+
+ if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
+ IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
+}
+
+static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
+}
+
+void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
+{
+ u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
+ u32 status = 0;
+ int ret;
+
+ struct iwl_fw_error_recovery_cmd recovery_cmd = {
+ .flags = cpu_to_le32(flags),
+ .buf_size = 0,
+ };
+ struct iwl_host_cmd host_cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
+ .data = {&recovery_cmd, },
+ .len = {sizeof(recovery_cmd), },
+ };
+
+ /* no error log was defined in TLV */
+ if (!error_log_size)
+ return;
+
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ /* no buf was allocated while HW reset */
+ if (!mvm->error_recovery_buf)
+ return;
+
+ host_cmd.data[1] = mvm->error_recovery_buf;
+ host_cmd.len[1] = error_log_size;
+ host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+ recovery_cmd.buf_size = cpu_to_le32(error_log_size);
+ }
+
+ ret = iwl_mvm_send_cmd_status(mvm, &host_cmd, &status);
+ kfree(mvm->error_recovery_buf);
+ mvm->error_recovery_buf = NULL;
+
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
+ return;
+ }
+
+ /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ if (status) {
+ IWL_ERR(mvm,
+ "Failed to send recovery cmd blob was invalid %d\n",
+ status);
+
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_disconnect_iterator,
+ mvm);
+ }
+ }
+}
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_sar_select_profile(mvm, 1, 1);
+}
+
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ if (iwl_mvm_has_unified_ucode(mvm))
+ return iwl_run_unified_mvm_ucode(mvm);
+
+ ret = iwl_run_init_mvm_ucode(mvm);
+
+ if (ret) {
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+ return ret;
+ }
+
+ iwl_fw_dbg_stop_sync(&mvm->fwrt);
+ iwl_trans_stop_device(mvm->trans);
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ mvm->rfkill_safe_init_done = false;
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret)
+ return ret;
+
+ mvm->rfkill_safe_init_done = true;
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
+ NULL);
+
+ return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+ int ret, i;
+ struct ieee80211_supported_band *sband = NULL;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_rt_fw(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ if (ret != -ERFKILL && !mvm->fw_product_reset)
+ iwl_fw_dbg_error_collect(&mvm->fwrt,
+ FW_DBG_TRIGGER_DRIVER);
+ goto error;
+ }
+
+ /* FW loaded successfully */
+ mvm->fw_product_reset = false;
+
+ iwl_fw_disable_dbg_asserts(&mvm->fwrt);
+ iwl_get_shared_mem_conf(&mvm->fwrt);
+
+ ret = iwl_mvm_sf_update(mvm, NULL, false);
+ if (ret)
+ IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+
+ if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
+ mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ /* if we have a destination, assume EARLY START */
+ if (mvm->fw->dbg.dest_tlv)
+ mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
+ iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+ if (ret)
+ goto error;
+
+ if (!iwl_mvm_has_unified_ucode(mvm)) {
+ /* Send phy db control command and then phy db calibration */
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_send_bt_init_conf(mvm);
+ if (ret)
+ goto error;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
+ ret = iwl_set_soc_latency(&mvm->fwrt);
+ if (ret)
+ goto error;
+ }
+
+ iwl_mvm_lari_cfg(mvm);
+
+ /* Init RSS configuration */
+ ret = iwl_configure_rxq(&mvm->fwrt);
+ if (ret)
+ goto error;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ ret = iwl_send_rss_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
+ }
+
+ mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
+
+ /* reset quota debouncing buffer - 0xff will yield invalid data */
+ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
+ ret = iwl_mvm_send_dqa_cmd(mvm);
+ if (ret)
+ goto error;
+ }
+
+ /*
+ * Add auxiliary station for scanning.
+ * Newer versions of this command implies that the fw uses
+ * internal aux station for all aux activities that don't
+ * requires a dedicated data queue.
+ */
+ if (!iwl_mvm_has_new_station_api(mvm->fw)) {
+ /*
+ * In old version the aux station uses mac id like other
+ * station and not lmac id
+ */
+ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
+ if (ret)
+ goto error;
+ }
+
+ /* Add all the PHY contexts */
+ i = 0;
+ while (!sband && i < NUM_NL80211_BANDS)
+ sband = mvm->hw->wiphy->bands[i++];
+
+ if (WARN_ON_ONCE(!sband)) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (iwl_mvm_is_tt_in_fw(mvm)) {
+ /* in order to give the responsibility of ct-kill and
+ * TX backoff to FW we need to send empty temperature reporting
+ * cmd during init time
+ */
+ iwl_mvm_send_temp_report_ths_cmd(mvm);
+ } else {
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+ }
+
+#ifdef CONFIG_THERMAL
+ /* TODO: read the budget from BIOS / Platform NVM */
+
+ /*
+ * In case there is no budget from BIOS / Platform NVM the default
+ * budget should be 2000mW (cooling state 0).
+ */
+ if (iwl_mvm_is_ctdp_supported(mvm)) {
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ mvm->cooling_dev.cur_state);
+ if (ret)
+ goto error;
+ }
+#endif
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
+ WARN_ON(iwl_mvm_config_ltr(mvm));
+
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ goto error;
+
+ /*
+ * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
+ * anyway, so don't init MCC.
+ */
+ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
+ ret = iwl_mvm_init_mcc(mvm);
+ if (ret)
+ goto error;
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
+ mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
+ ret = iwl_mvm_config_scan(mvm);
+ if (ret)
+ goto error;
+ }
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
+
+ if (mvm->time_sync.active)
+ iwl_mvm_time_sync_config(mvm, mvm->time_sync.peer_addr,
+ IWL_TIME_SYNC_PROTOCOL_TM |
+ IWL_TIME_SYNC_PROTOCOL_FTM);
+ }
+
+ if (!mvm->ptp_data.ptp_clock)
+ iwl_mvm_ptp_init(mvm);
+
+ ret = iwl_mvm_ppag_init(mvm);
+ if (ret)
+ goto error;
+
+ ret = iwl_mvm_sar_init(mvm);
+ if (ret == 0)
+ ret = iwl_mvm_sar_geo_init(mvm);
+ if (ret < 0)
+ goto error;
+
+ ret = iwl_mvm_sgom_init(mvm);
+ if (ret)
+ goto error;
+
+ iwl_mvm_tas_init(mvm);
+ iwl_mvm_leds_sync(mvm);
+ iwl_mvm_uats_init(mvm);
+
+ if (iwl_rfi_supported(mvm)) {
+ if (iwl_rfi_is_enabled_in_bios(&mvm->fwrt))
+ iwl_rfi_send_config_cmd(mvm, NULL);
+ }
+
+ iwl_mvm_mei_device_state(mvm, true);
+
+ IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+ return 0;
+ error:
+ iwl_mvm_stop_device(mvm);
+ return ret;
+}
+
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
+{
+ int ret, i;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+ if (ret)
+ goto error;
+
+ /* Send phy db control command and then phy db calibration*/
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_phy_cfg_cmd(mvm);
+ if (ret)
+ goto error;
+
+ /* init the fw <-> mac80211 STA mapping */
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
+ }
+
+ if (!iwl_mvm_has_new_station_api(mvm->fw)) {
+ /*
+ * Add auxiliary station for scanning.
+ * Newer versions of this command implies that the fw uses
+ * internal aux station for all aux activities that don't
+ * requires a dedicated data queue.
+ * In old version the aux station uses mac id like other
+ * station and not lmac id
+ */
+ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+ error:
+ iwl_mvm_stop_device(mvm);
+ return ret;
+}
+
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->installed_ver),
+ le32_to_cpu(mfuart_notif->external_ver),
+ le32_to_cpu(mfuart_notif->status),
+ le32_to_cpu(mfuart_notif->duration));
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
+ IWL_DEBUG_INFO(mvm,
+ "MFUART: image size: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->image_size));
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/led.c b/sys/contrib/dev/iwlwifi/mvm/led.c
new file mode 100644
index 000000000000..c3cc1ea3ccc9
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/led.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2019, 2025 Intel Corporation
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#include <linux/leds.h>
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+
+static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on)
+{
+ struct iwl_led_cmd led_cmd = {
+ .status = cpu_to_le32(on),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(LONG_GROUP, LEDS_CMD),
+ .len = { sizeof(led_cmd), },
+ .data = { &led_cmd, },
+ .flags = CMD_ASYNC,
+ };
+ int err;
+
+ if (!iwl_mvm_firmware_running(mvm))
+ return;
+
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (err)
+ IWL_WARN(mvm, "LED command failed: %d\n", err);
+}
+
+static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
+ iwl_mvm_send_led_fw_cmd(mvm, on);
+ return;
+ }
+
+ iwl_write32(mvm->trans, CSR_LED_REG,
+ on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
+
+ iwl_mvm_led_set(mvm, brightness > 0);
+}
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ int mode = iwlwifi_mod_params.led_mode;
+ int ret;
+
+ switch (mode) {
+ case IWL_LED_BLINK:
+ IWL_ERR(mvm, "Blink led mode not supported, used default\n");
+ fallthrough;
+ case IWL_LED_DEFAULT:
+ case IWL_LED_RF_STATE:
+ mode = IWL_LED_RF_STATE;
+ break;
+ case IWL_LED_DISABLE:
+ IWL_INFO(mvm, "Led disabled\n");
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mvm->hw->wiphy));
+ if (!mvm->led.name)
+ return -ENOMEM;
+
+ mvm->led.brightness_set = iwl_led_brightness_set;
+ mvm->led.max_brightness = 1;
+
+ if (mode == IWL_LED_RF_STATE)
+ mvm->led.default_trigger =
+ ieee80211_get_radio_led_name(mvm->hw);
+
+ ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+ if (ret) {
+ kfree(mvm->led.name);
+ IWL_INFO(mvm, "Failed to enable led\n");
+ return ret;
+ }
+
+ mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+ return 0;
+}
+
+void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
+{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
+ return;
+
+ /*
+ * if we control through the register, we're doing it
+ * even when the firmware isn't up, so no need to sync
+ */
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ return;
+
+ iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
+}
+
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
+ return;
+
+ led_classdev_unregister(&mvm->led);
+ kfree(mvm->led.name);
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/link.c b/sys/contrib/dev/iwlwifi/mvm/link.c
new file mode 100644
index 000000000000..2269acc55c0e
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/link.c
@@ -0,0 +1,1128 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022 - 2024 Intel Corporation
+ */
+#include "mvm.h"
+#include "time-event.h"
+
+#define HANDLE_ESR_REASONS(HOW) \
+ HOW(BLOCKED_PREVENTION) \
+ HOW(BLOCKED_WOWLAN) \
+ HOW(BLOCKED_TPT) \
+ HOW(BLOCKED_FW) \
+ HOW(BLOCKED_NON_BSS) \
+ HOW(BLOCKED_ROC) \
+ HOW(BLOCKED_TMP_NON_BSS) \
+ HOW(EXIT_MISSED_BEACON) \
+ HOW(EXIT_LOW_RSSI) \
+ HOW(EXIT_COEX) \
+ HOW(EXIT_BANDWIDTH) \
+ HOW(EXIT_CSA) \
+ HOW(EXIT_LINK_USAGE)
+
+static const char *const iwl_mvm_esr_states_names[] = {
+#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
+ HANDLE_ESR_REASONS(NAME_ENTRY)
+};
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
+{
+ int offs = ilog2(state);
+
+ if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
+ !iwl_mvm_esr_states_names[offs])
+ return "UNKNOWN";
+
+ return iwl_mvm_esr_states_names[offs];
+}
+
+static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mvm,
+ "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_ESR_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
+ struct iwl_link_config_cmd *cmd,
+ enum iwl_ctxt_action action)
+{
+ int ret;
+
+ cmd->action = cpu_to_le32(action);
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD), 0,
+ sizeof(*cmd), cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n",
+ action, ret);
+ return ret;
+}
+
+void iwl_mvm_set_link_fw_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
+ link_info->fw_link_id = mvmvif->id;
+}
+
+int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct iwl_link_config_cmd cmd = {};
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
+
+ if (WARN_ON_ONCE(!link_info))
+ return -EINVAL;
+
+ iwl_mvm_set_link_fw_id(mvm, vif, link_conf);
+
+ /* Update SF - Disable if needed. if this fails, SF might still be on
+ * while many macs are bound, which is forbidden - so fail the binding.
+ */
+ if (iwl_mvm_sf_update(mvm, vif, false))
+ return -EINVAL;
+
+ cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+ cmd.mac_id = cpu_to_le32(mvmvif->id);
+ cmd.spec_link_id = link_conf->link_id;
+ WARN_ON_ONCE(link_info->phy_ctxt);
+ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+
+ memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
+ memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
+
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+
+ return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
+}
+
+struct iwl_mvm_esr_iter_data {
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ bool lift_block;
+};
+
+static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_esr_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_id];
+ if (vif == data->vif && link_id == data->link_id)
+ continue;
+ if (link_info->active)
+ data->lift_block = false;
+ }
+}
+
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active)
+{
+ /* An active link of a non-station vif blocks EMLSR. Upon activation
+ * block EMLSR on the bss vif. Upon deactivation, check if this link
+ * was the last non-station link active, and if so unblock the bss vif
+ */
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_esr_iter_data data = {
+ .vif = vif,
+ .link_id = link_id,
+ .lift_block = true,
+ };
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return 0;
+
+ if (active)
+ return iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_NON_BSS);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_esr_vif_iterator, &data);
+ if (data.lift_block) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
+ mutex_unlock(&mvm->mutex);
+ }
+
+ return 0;
+}
+
+int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u32 changes, bool active)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct iwl_mvm_phy_ctxt *phyctxt;
+ struct iwl_link_config_cmd cmd = {};
+ u32 ht_flag, flags = 0, flags_mask = 0;
+ int ret;
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
+
+ if (WARN_ON_ONCE(!link_info ||
+ link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
+ return -EINVAL;
+
+ if (changes & LINK_CONTEXT_MODIFY_ACTIVE) {
+ /* When activating a link, phy context should be valid;
+ * when deactivating a link, it also should be valid since
+ * the link was active before. So, do nothing in this case.
+ * Since a link is added first with FW_CTXT_INVALID, then we
+ * can get here in case it's removed before it was activated.
+ */
+ if (!link_info->phy_ctxt)
+ return 0;
+
+ /* Catch early if driver tries to activate or deactivate a link
+ * twice.
+ */
+ WARN_ON_ONCE(active == link_info->active);
+
+ /* When deactivating a link session protection should
+ * be stopped. Also let the firmware know if we can't Tx.
+ */
+ if (!active && vif->type == NL80211_IFTYPE_STATION) {
+ iwl_mvm_stop_session_protection(mvm, vif);
+ if (link_info->csa_block_tx) {
+ cmd.block_tx = 1;
+ link_info->csa_block_tx = false;
+ }
+ }
+ }
+
+ cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+
+ /* The phy_id, link address and listen_lmac can be modified only until
+ * the link becomes active, otherwise they will be ignored.
+ */
+ phyctxt = link_info->phy_ctxt;
+ if (phyctxt)
+ cmd.phy_id = cpu_to_le32(phyctxt->id);
+ else
+ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+ cmd.mac_id = cpu_to_le32(mvmvif->id);
+
+ memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
+
+ cmd.active = cpu_to_le32(active);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
+ memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
+
+ iwl_mvm_set_fw_basic_rates(mvm, vif, link_info,
+ &cmd.cck_rates, &cmd.ofdm_rates);
+
+ cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble);
+ cmd.short_slot = cpu_to_le32(link_conf->use_short_slot);
+
+ /* The fw does not distinguish between ht and fat */
+ ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT;
+ iwl_mvm_set_fw_protection_flags(mvm, vif, link_conf,
+ &cmd.protection_flags,
+ ht_flag, LINK_PROT_FLG_TGG_PROTECT);
+
+ iwl_mvm_set_fw_qos_params(mvm, vif, link_conf, cmd.ac,
+ &cmd.qos_flags);
+
+
+ cmd.bi = cpu_to_le32(link_conf->beacon_int);
+ cmd.dtim_interval = cpu_to_le32(link_conf->beacon_int *
+ link_conf->dtim_period);
+
+ if (!link_conf->he_support || iwlwifi_mod_params.disable_11ax ||
+ (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) {
+ changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS;
+ goto send_cmd;
+ }
+
+ cmd.htc_trig_based_pkt_ext = link_conf->htc_trig_based_pkt_ext;
+
+ if (link_conf->uora_exists) {
+ cmd.rand_alloc_ecwmin =
+ link_conf->uora_ocw_range & 0x7;
+ cmd.rand_alloc_ecwmax =
+ (link_conf->uora_ocw_range >> 3) & 0x7;
+ }
+
+ /* ap_sta may be NULL if we're disconnecting */
+ if (changes & LINK_CONTEXT_MODIFY_HE_PARAMS && mvmvif->ap_sta) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(mvmvif->ap_sta, link_id);
+
+ if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he &&
+ link_sta->he_cap.he_cap_elem.mac_cap_info[5] &
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX)
+ cmd.ul_mu_data_disable = 1;
+ }
+
+ /* TODO how to set ndp_fdbk_buff_th_exp? */
+
+ if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id],
+ &cmd.trig_based_txf[0])) {
+ flags |= LINK_FLG_MU_EDCA_CW;
+ flags_mask |= LINK_FLG_MU_EDCA_CW;
+ }
+
+ if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
+ struct ieee80211_chanctx_conf *ctx;
+ struct cfg80211_chan_def *def = NULL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ if (ctx)
+ def = iwl_mvm_chanctx_def(mvm, ctx);
+
+ if (iwlwifi_mod_params.disable_11be ||
+ !link_conf->eht_support || !def ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6)
+ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
+ else
+ cmd.puncture_mask = cpu_to_le16(def->punctured);
+ rcu_read_unlock();
+ }
+
+ cmd.bss_color = link_conf->he_bss_color.color;
+
+ if (!link_conf->he_bss_color.enabled) {
+ flags |= LINK_FLG_BSS_COLOR_DIS;
+ flags_mask |= LINK_FLG_BSS_COLOR_DIS;
+ }
+
+ cmd.frame_time_rts_th = cpu_to_le16(link_conf->frame_time_rts_th);
+
+ /* Block 26-tone RU OFDMA transmissions */
+ if (link_info->he_ru_2mhz_block) {
+ flags |= LINK_FLG_RU_2MHZ_BLOCK;
+ flags_mask |= LINK_FLG_RU_2MHZ_BLOCK;
+ }
+
+ if (link_conf->nontransmitted) {
+ ether_addr_copy(cmd.ref_bssid_addr,
+ link_conf->transmitter_bssid);
+ cmd.bssid_index = link_conf->bssid_index;
+ }
+
+send_cmd:
+ cmd.modify_mask = cpu_to_le32(changes);
+ cmd.flags = cpu_to_le32(flags);
+ if (cmd_ver < 6)
+ cmd.flags_mask = cpu_to_le32(flags_mask);
+ cmd.spec_link_id = link_conf->link_id;
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+
+ ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY);
+ if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE))
+ link_info->active = active;
+
+ return ret;
+}
+
+int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct iwl_link_config_cmd cmd = {};
+ int ret;
+
+ cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+ link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ cmd.spec_link_id = link_conf->link_id;
+ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+
+ ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE);
+
+ if (!ret && iwl_mvm_sf_update(mvm, vif, true))
+ IWL_ERR(mvm, "Failed to update SF state\n");
+
+ return ret;
+}
+
+/* link should be deactivated before removal, so in most cases we need to
+ * perform these two operations together
+ */
+int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ int ret;
+
+ ret = iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE, false);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_remove_link(mvm, vif, link_conf);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+struct iwl_mvm_rssi_to_grade {
+ s8 rssi[2];
+ u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+ { \
+ .rssi = {_lb, _hb_uhb}, \
+ .grade = _grade \
+ }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
+ RSSI_TO_GRADE_LINE(-85, -89, 177),
+ RSSI_TO_GRADE_LINE(-83, -86, 344),
+ RSSI_TO_GRADE_LINE(-82, -85, 516),
+ RSSI_TO_GRADE_LINE(-80, -83, 688),
+ RSSI_TO_GRADE_LINE(-77, -79, 1032),
+ RSSI_TO_GRADE_LINE(-73, -76, 1376),
+ RSSI_TO_GRADE_LINE(-70, -74, 1548),
+ RSSI_TO_GRADE_LINE(-69, -72, 1750),
+ RSSI_TO_GRADE_LINE(-65, -68, 2064),
+ RSSI_TO_GRADE_LINE(-61, -66, 2294),
+ RSSI_TO_GRADE_LINE(-58, -61, 2580),
+ RSSI_TO_GRADE_LINE(-55, -58, 2868),
+ RSSI_TO_GRADE_LINE(-46, -55, 3098),
+ RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_LB 30
+#define DEFAULT_CHAN_LOAD_HB 15
+#define DEFAULT_CHAN_LOAD_UHB 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
+
+static unsigned int
+iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_chan_width chan_width =
+ link_conf->chanreq.oper.width;
+ int mhz = nl80211_chan_width_to_mhz(chan_width);
+ unsigned int n_subchannels, n_punctured, puncturing_penalty;
+
+ if (WARN_ONCE(mhz < 20 || mhz > 320,
+ "Invalid channel width : (%d)\n", mhz))
+ return SCALE_FACTOR;
+
+ /* No puncturing, no penalty */
+ if (mhz < 80)
+ return SCALE_FACTOR;
+
+ /* total number of subchannels */
+ n_subchannels = mhz / 20;
+ /* how many of these are punctured */
+ n_punctured = hweight16(link_conf->chanreq.oper.punctured);
+
+ puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
+ return SCALE_FACTOR - puncturing_penalty;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct iwl_mvm_vif_link_info *mvm_link =
+ iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
+ const struct element *bss_load_elem;
+ const struct ieee80211_bss_load_elem *bss_load;
+ enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+ const struct cfg80211_bss_ies *ies;
+ unsigned int chan_load;
+ u32 chan_load_by_us;
+
+ rcu_read_lock();
+ if (ieee80211_vif_link_active(vif, link_conf->link_id))
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
+ else
+ ies = rcu_dereference(link_conf->bss->ies);
+
+ if (ies)
+ bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+ ies->data, ies->len);
+ else
+ bss_load_elem = NULL;
+
+ /* If there isn't BSS Load element, take the defaults */
+ if (!bss_load_elem ||
+ bss_load_elem->datalen != sizeof(*bss_load)) {
+ rcu_read_unlock();
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_LB;
+ break;
+ case NL80211_BAND_5GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_HB;
+ break;
+ case NL80211_BAND_6GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_UHB;
+ break;
+ default:
+ chan_load = 0;
+ break;
+ }
+ /* The defaults are given in percentage */
+ return NORMALIZE_PERCENT_TO_255(chan_load);
+ }
+
+ bss_load = (const void *)bss_load_elem->data;
+ /* Channel util is in range 0-255 */
+ chan_load = bss_load->channel_util;
+ rcu_read_unlock();
+
+ if (!mvm_link || !mvm_link->active)
+ return chan_load;
+
+ if (WARN_ONCE(!mvm_link->phy_ctxt,
+ "Active link (%u) without phy ctxt assigned!\n",
+ link_conf->link_id))
+ return chan_load;
+
+ /* channel load by us is given in percentage */
+ chan_load_by_us =
+ NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
+
+ /* Use only values that firmware sends that can possibly be valid */
+ if (chan_load_by_us <= chan_load)
+ chan_load -= chan_load_by_us;
+
+ return chan_load;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
+{
+ return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+VISIBLE_IF_IWLWIFI_KUNIT
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band;
+ int i, rssi_idx;
+ s32 link_rssi;
+ unsigned int grade = MAX_GRADE;
+
+ if (WARN_ON_ONCE(!link_conf))
+ return 0;
+
+ band = link_conf->chanreq.oper.chan->band;
+ if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+ band != NL80211_BAND_5GHZ &&
+ band != NL80211_BAND_6GHZ,
+ "Invalid band (%u)\n", band))
+ return 0;
+
+ link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+ /*
+ * For 6 GHz the RSSI of the beacons is lower than
+ * the RSSI of the data.
+ */
+ if (band == NL80211_BAND_6GHZ)
+ link_rssi += 4;
+
+ rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+ /* No valid RSSI - take the lowest grade */
+ if (!link_rssi)
+ link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+ /* Get grade based on RSSI */
+ for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+ const struct iwl_mvm_rssi_to_grade *line =
+ &rssi_to_grade_map[i];
+
+ if (link_rssi > line->rssi[rssi_idx])
+ continue;
+ grade = line->grade;
+ break;
+ }
+
+ /* apply the channel load and puncturing factors */
+ grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
+ grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
+ return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
+
+static
+u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
+ struct iwl_mvm_link_sel_data *data,
+ unsigned long usable_links,
+ u8 *best_link_idx)
+{
+ u8 n_data = 0;
+ u16 max_grade = 0;
+ unsigned long link_id;
+
+ /* TODO: don't select links that weren't discovered in the last scan */
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].chandef = &link_conf->chanreq.oper;
+ data[n_data].signal = link_conf->bss->signal / 100;
+ data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
+
+ if (data[n_data].grade > max_grade) {
+ max_grade = data[n_data].grade;
+ *best_link_idx = n_data;
+ }
+ n_data++;
+ }
+
+ return n_data;
+}
+
+struct iwl_mvm_bw_to_rssi_threshs {
+ s8 low;
+ s8 high;
+};
+
+#define BW_TO_RSSI_THRESHOLDS(_bw) \
+ [IWL_PHY_CHANNEL_MODE ## _bw] = { \
+ .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
+ .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
+ }
+
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low)
+{
+ const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
+ BW_TO_RSSI_THRESHOLDS(20),
+ BW_TO_RSSI_THRESHOLDS(40),
+ BW_TO_RSSI_THRESHOLDS(80),
+ BW_TO_RSSI_THRESHOLDS(160)
+ /* 320 MHz has the same thresholds as 20 MHz */
+ };
+ const struct iwl_mvm_bw_to_rssi_threshs *threshs;
+ u8 chan_width = iwl_mvm_get_channel_width(chandef);
+
+ if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+ chandef->chan->band != NL80211_BAND_5GHZ &&
+ chandef->chan->band != NL80211_BAND_6GHZ))
+ return S8_MAX;
+
+ /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
+ if (chan_width == IWL_PHY_CHANNEL_MODE320)
+ chan_width = IWL_PHY_CHANNEL_MODE20;
+
+ threshs = &bw_to_rssi_threshs_map[chan_width];
+
+ return low ? threshs->low : threshs->high;
+}
+
+static u32
+iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *link,
+ bool primary)
+{
+ struct wiphy *wiphy = mvm->hw->wiphy;
+ struct ieee80211_bss_conf *conf;
+ enum iwl_mvm_esr_state ret = 0;
+ s8 thresh;
+
+ conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+ if (WARN_ON_ONCE(!conf))
+ return false;
+
+ /* BT Coex effects eSR mode only if one of the links is on LB */
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
+ (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
+ primary)))
+ ret |= IWL_MVM_ESR_EXIT_COEX;
+
+ thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
+ false);
+
+ if (link->signal < thresh)
+ ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
+
+ if (conf->csa_active)
+ ret |= IWL_MVM_ESR_EXIT_CSA;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Link %d is not allowed for esr\n",
+ link->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ }
+ return ret;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_mvm_esr_state ret = 0;
+
+ /* Per-link considerations */
+ if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
+ iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
+ return false;
+
+ if (a->chandef->chan->band == b->chandef->chan->band ||
+ a->chandef->width != b->chandef->width)
+ ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Links %d and %d are not a valid pair for EMLSR\n",
+ a->link_id, b->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ return false;
+ }
+
+ return true;
+
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
+
+/*
+ * Returns the combined eSR grade of two given links.
+ * Returns 0 if eSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b,
+ u8 *primary_id)
+{
+ struct ieee80211_bss_conf *primary_conf;
+ struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+ unsigned int primary_load;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* a is always primary, b is always secondary */
+ if (b->grade > a->grade)
+ swap(a, b);
+
+ *primary_id = a->link_id;
+
+ if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
+ return 0;
+
+ primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+ if (WARN_ON_ONCE(!primary_conf))
+ return 0;
+
+ primary_load = iwl_mvm_get_chan_load(primary_conf);
+
+ return a->grade +
+ ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mvm_link_sel_data *best_link;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ u8 best, primary_link, best_in_pair, n_data;
+ u16 max_esr_grade = 0, new_active_links;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+
+ if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
+ return;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* The logic below is a simple version that doesn't suit more than 2
+ * links
+ */
+ WARN_ON_ONCE(max_active_links > 2);
+
+ n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
+ &best);
+
+ if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ return;
+
+ best_link = &data[best];
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+
+ /* eSR is not supported/blocked, or only one usable link */
+ if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
+ mvmvif->esr_disable_reason || n_data == 1)
+ goto set_active;
+
+ for (u8 a = 0; a < n_data; a++)
+ for (u8 b = a + 1; b < n_data; b++) {
+ u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
+ &data[b],
+ &best_in_pair);
+
+ if (esr_grade <= max_esr_grade)
+ continue;
+
+ max_esr_grade = esr_grade;
+ primary_link = best_in_pair;
+ new_active_links = BIT(data[a].link_id) |
+ BIT(data[b].link_id);
+ }
+
+ /* No valid pair was found, go with the best link */
+ if (hweight16(new_active_links) <= 1)
+ goto set_active;
+
+ /* For equal grade - prefer EMLSR */
+ if (best_link->grade > max_esr_grade) {
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+ }
+set_active:
+ IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
+ new_active_links, primary_link);
+ ieee80211_set_active_links_async(vif, new_active_links);
+ mvmvif->link_selection_res = new_active_links;
+ mvmvif->link_selection_primary = primary_link;
+}
+
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* relevant data is written with both locks held, so read with either */
+ lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) ||
+ lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx));
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* In AP mode, there is no primary link */
+ if (vif->type == NL80211_IFTYPE_AP)
+ return __ffs(vif->active_links);
+
+ if (mvmvif->esr_active &&
+ !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links)))
+ return mvmvif->primary_link;
+
+ return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+ switch (hweight16(vif->active_links)) {
+ case 0:
+ return 0;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case 1:
+ return __ffs(vif->active_links);
+ case 2:
+ return __ffs(vif->active_links & ~BIT(link_id));
+ }
+}
+
+/* Reasons that can cause esr prevention */
+#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
+#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
+#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
+#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
+
+static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ enum iwl_mvm_esr_state reason)
+{
+ bool timeout_expired = time_after(jiffies,
+ mvmvif->last_esr_exit.ts +
+ IWL_MVM_PREVENT_ESR_TIMEOUT);
+ unsigned long delay;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Only handle reasons that can cause prevention */
+ if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
+ return false;
+
+ /*
+ * Reset the counter if more than 400 seconds have passed between one
+ * exit and the other, or if we exited due to a different reason.
+ * Will also reset the counter after the long prevention is done.
+ */
+ if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
+ mvmvif->exit_same_reason_count = 1;
+ return false;
+ }
+
+ mvmvif->exit_same_reason_count++;
+ if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
+ mvmvif->exit_same_reason_count > 3))
+ return false;
+
+ mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
+
+ /*
+ * For the second exit, use a short prevention, and for the third one,
+ * use a long prevention.
+ */
+ delay = mvmvif->exit_same_reason_count == 2 ?
+ IWL_MVM_ESR_PREVENT_SHORT :
+ IWL_MVM_ESR_PREVENT_LONG;
+
+ IWL_DEBUG_INFO(mvm,
+ "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+ delay / HZ, mvmvif->exit_same_reason_count,
+ iwl_get_esr_state_string(reason), reason);
+
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk, delay);
+ return true;
+}
+
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
+
+/* API to exit eSR mode */
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 new_active_links;
+ bool prevented;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* Nothing to do */
+ if (!mvmvif->esr_active)
+ return;
+
+ if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
+ return;
+
+ if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+ link_to_keep = __ffs(vif->active_links);
+
+ new_active_links = BIT(link_to_keep);
+ IWL_DEBUG_INFO(mvm,
+ "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+ iwl_get_esr_state_string(reason), reason,
+ vif->active_links, new_active_links);
+
+ ieee80211_set_active_links_async(vif, new_active_links);
+
+ /* Prevent EMLSR if needed */
+ prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
+
+ /* Remember why and when we exited EMLSR */
+ mvmvif->last_esr_exit.ts = jiffies;
+ mvmvif->last_esr_exit.reason = reason;
+
+ /*
+ * If EMLSR is prevented now - don't try to get back to EMLSR.
+ * If we exited due to a blocking event, we will try to get back to
+ * EMLSR when the corresponding unblocking event will happen.
+ */
+ if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
+ return;
+
+ /* If EMLSR is not blocked - try enabling it again in 30 seconds */
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk,
+ round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
+}
+
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ if (mvmvif->esr_disable_reason & reason)
+ return;
+
+ IWL_DEBUG_INFO(mvm,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+
+ mvmvif->esr_disable_reason |= reason;
+
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
+}
+
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ int primary_link = iwl_mvm_get_primary_link(vif);
+ int ret;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* This should be called only with blocking reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return 0;
+
+ /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ /* only additionally block for consistency and to avoid concurrency */
+ iwl_mvm_block_esr(mvm, vif, reason, primary_link);
+ mutex_unlock(&mvm->mutex);
+
+ return 0;
+}
+
+static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
+ IWL_MVM_TRIGGER_LINK_SEL_TIME);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
+ mvmvif->esr_active)
+ return;
+
+ IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
+
+ /* If we exited due to an EXIT reason, and the exit was in less than
+ * 30 seconds, then a MLO scan was scheduled already.
+ */
+ if (!need_new_sel &&
+ !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
+ IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
+ return;
+ }
+
+ /*
+ * If EMLSR was blocked for more than 30 seconds, or the last link
+ * selection decided to not enter EMLSR, trigger a new scan.
+ */
+ if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
+ IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
+ /*
+ * If EMLSR was blocked for less than 30 seconds, and the last link
+ * selection decided to use EMLSR, activate EMLSR using the previous
+ * link selection result.
+ */
+ } else {
+ IWL_DEBUG_INFO(mvm,
+ "Use the latest link selection result: 0x%x\n",
+ mvmvif->link_selection_res);
+ ieee80211_set_active_links_async(vif,
+ mvmvif->link_selection_res);
+ }
+}
+
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ /* No Change */
+ if (!(mvmvif->esr_disable_reason & reason))
+ return;
+
+ mvmvif->esr_disable_reason &= ~reason;
+
+ IWL_DEBUG_INFO(mvm,
+ "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ if (!mvmvif->esr_disable_reason)
+ iwl_mvm_esr_unblocked(mvm, vif);
+}
+
+void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link)
+{
+ link->bcast_sta.sta_id = IWL_INVALID_STA;
+ link->mcast_sta.sta_id = IWL_INVALID_STA;
+ link->ap_sta_id = IWL_INVALID_STA;
+
+ for (int r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++)
+ link->smps_requests[r] =
+ IEEE80211_SMPS_AUTOMATIC;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c
new file mode 100644
index 000000000000..a3ff2f083aef
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "fw-api.h"
+#include "mvm.h"
+#include "time-event.h"
+#include "iwl-utils.h"
+
+const u8 iwl_mvm_ac_to_tx_fifo[] = {
+ IWL_MVM_TX_FIFO_VO,
+ IWL_MVM_TX_FIFO_VI,
+ IWL_MVM_TX_FIFO_BE,
+ IWL_MVM_TX_FIFO_BK,
+};
+
+const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
+ IWL_GEN2_EDCA_TX_FIFO_VO,
+ IWL_GEN2_EDCA_TX_FIFO_VI,
+ IWL_GEN2_EDCA_TX_FIFO_BE,
+ IWL_GEN2_EDCA_TX_FIFO_BK,
+ IWL_GEN2_TRIG_TX_FIFO_VO,
+ IWL_GEN2_TRIG_TX_FIFO_VI,
+ IWL_GEN2_TRIG_TX_FIFO_BE,
+ IWL_GEN2_TRIG_TX_FIFO_BK,
+};
+
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
+struct iwl_mvm_mac_iface_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+ unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+ enum iwl_tsf_id preferred_tsf;
+ bool found_vif;
+};
+
+static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 min_bi;
+
+ /* Skip the interface for which we are trying to assign a tsf_id */
+ if (vif == data->vif)
+ return;
+
+ /*
+ * The TSF is a hardware/firmware resource, there are 4 and
+ * the driver should assign and free them as needed. However,
+ * there are cases where 2 MACs should share the same TSF ID
+ * for the purpose of clock sync, an optimization to avoid
+ * clock drift causing overlapping TBTTs/DTIMs for a GO and
+ * client in the system.
+ *
+ * The firmware will decide according to the MAC type which
+ * will be the leader and follower. Clients that need to sync
+ * with a remote station will be the leader, and an AP or GO
+ * will be the follower.
+ *
+ * Depending on the new interface type it can be following
+ * or become the leader of an existing interface.
+ */
+ switch (data->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /*
+ * The new interface is a client, so if the one we're iterating
+ * is an AP, and the beacon interval of the AP is a multiple or
+ * divisor of the beacon interval of the client, the same TSF
+ * should be used to avoid drift between the new client and
+ * existing AP. The existing AP will get drift updates from the
+ * new client context in this case.
+ */
+ if (vif->type != NL80211_IFTYPE_AP ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+
+ case NL80211_IFTYPE_AP:
+ /*
+ * The new interface is AP/GO, so if its beacon interval is a
+ * multiple or a divisor of the beacon interval of an existing
+ * interface, it should get drift updates from an existing
+ * client or use the same TSF as an existing GO. There's no
+ * drift between TSFs internally but if they used different
+ * TSFs then a new client MAC could update one of them and
+ * cause drift that way.
+ */
+ if ((vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION) ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
+ }
+ break;
+ default:
+ /*
+ * For all other interface types there's no need to
+ * take drift into account. Either they're exclusive
+ * like IBSS and monitor, or we don't care much about
+ * their TSF (like P2P Device), but we won't be able
+ * to share the TSF resource.
+ */
+ break;
+ }
+
+ /*
+ * Unless we exited above, we can't share the TSF resource
+ * that the virtual interface we're iterating over is using
+ * with the new one, so clear the available bit and if this
+ * was the preferred one, reset that as well.
+ */
+ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+
+ if (data->preferred_tsf == mvmvif->tsf_id)
+ data->preferred_tsf = NUM_TSF_IDS;
+}
+
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* Iterator may already find the interface being added -- skip it */
+ if (vif == data->vif) {
+ data->found_vif = true;
+ return;
+ }
+
+ /* Mark MAC IDs as used by clearing the available bit, and
+ * (below) mark TSFs as used if their existing use is not
+ * compatible with the new interface type.
+ * No locking or atomic bit operations are needed since the
+ * data is on the stack of the caller function.
+ */
+ __clear_bit(mvmvif->id, data->available_mac_ids);
+
+ /* find a suitable tsf_id */
+ iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+}
+
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_tsf_id_iter, &data);
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+}
+
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_iface_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
+ .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+ /* no preference yet */
+ .preferred_tsf = NUM_TSF_IDS,
+ .found_vif = false,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Allocate a MAC ID and a TSF for this MAC, along with the queues
+ * and other resources.
+ */
+
+ /*
+ * Before the iterator, we start with all MAC IDs and TSFs available.
+ *
+ * During iteration, all MAC IDs are cleared that are in use by other
+ * virtual interfaces, and all TSF IDs are cleared that can't be used
+ * by this new virtual interface because they're used by an interface
+ * that can't share it with the new one.
+ * At the same time, we check if there's a preferred TSF in the case
+ * that we should share it with another interface.
+ */
+
+ /* MAC ID 0 should be used only for the managed/IBSS vif with non-MLO
+ * FW API
+ */
+ if (!mvm->mld_api_is_used) {
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!vif->p2p)
+ break;
+ fallthrough;
+ default:
+ __clear_bit(0, data.available_mac_ids);
+ }
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_iface_iterator, &data);
+
+ /*
+ * In the case we're getting here during resume, it's similar to
+ * firmware restart, and with RESUME_ALL the iterator will find
+ * the vif being added already.
+ * We don't want to reassign any IDs in either case since doing
+ * so would probably assign different IDs (as interfaces aren't
+ * necessarily added in the same order), but the old IDs were
+ * preserved anyway, so skip ID assignment for both resume and
+ * recovery.
+ */
+ if (data.found_vif)
+ return 0;
+
+ /* Therefore, in recovery, we can't get here */
+ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return -EBUSY;
+
+ mvmvif->id = find_first_bit(data.available_mac_ids,
+ NUM_MAC_INDEX_DRIVER);
+ if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ if (data.preferred_tsf != NUM_TSF_IDS)
+ mvmvif->tsf_id = data.preferred_tsf;
+ else
+ mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+ NUM_TSF_IDS);
+ if (mvmvif->tsf_id == NUM_TSF_IDS) {
+ IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+
+ mvmvif->color = 0;
+
+ INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+ mvmvif->time_event_data.id = TE_MAX;
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ iwl_mvm_init_link(&mvmvif->deflink);
+
+ /* No need to allocate data queues to P2P Device MAC */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return 0;
+
+ /* Allocate the CAB queue for softAP and GO interfaces */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /*
+ * For TVQM this will be overwritten later with the FW assigned
+ * queue value (when queue is enabled).
+ */
+ mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+ }
+
+ return 0;
+
+exit_fail:
+ memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+ return ret;
+}
+
+static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band,
+ u8 *cck_rates, u8 *ofdm_rates)
+{
+ struct ieee80211_supported_band *sband;
+ unsigned long basic = vif->bss_conf.basic_rates;
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ u8 cck = 0;
+ u8 ofdm = 0;
+ int i;
+
+ sband = mvm->hw->wiphy->bands[band];
+
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ int hw = sband->bitrates[i].hw_value;
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+ if (lowest_present_ofdm > hw)
+ lowest_present_ofdm = hw;
+ } else {
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ cck |= BIT(hw);
+ if (lowest_present_cck > hw)
+ lowest_present_cck = hw;
+ }
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+ if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWL_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+ if (IWL_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link_info,
+ __le32 *cck_rates, __le32 *ofdm_rates)
+{
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 cck_ack_rates = 0, ofdm_ack_rates = 0;
+ enum nl80211_band band = NL80211_BAND_2GHZ;
+
+ phy_ctxt = link_info->phy_ctxt;
+ if (phy_ctxt && phy_ctxt->channel)
+ band = phy_ctxt->channel->band;
+
+ iwl_mvm_ack_rates(mvm, vif, band, &cck_ack_rates, &ofdm_ack_rates);
+
+ *cck_rates = cpu_to_le32((u32)cck_ack_rates);
+ *ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
+}
+
+void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ __le32 *protection_flags, u32 ht_flag,
+ u32 tgg_flag)
+{
+ /* for both sta and ap, ht_operation_mode hold the protection_mode */
+ u8 protection_mode = link_conf->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ bool ht_enabled = !!(link_conf->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION);
+
+ if (link_conf->use_cts_prot)
+ *protection_flags |= cpu_to_le32(tgg_flag);
+
+ IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+ link_conf->use_cts_prot,
+ link_conf->ht_operation_mode);
+
+ if (!ht_enabled)
+ return;
+
+ IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+ /*
+ * See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ */
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ *protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
+ *protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ default:
+ IWL_ERR(mvm, "Illegal protection mode %d\n",
+ protection_mode);
+ break;
+ }
+}
+
+void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct iwl_ac_qos *ac, __le32 *qos_flags)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link =
+ mvmvif->link[link_conf->link_id];
+ int i;
+
+ if (!mvm_link)
+ return;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
+ u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i);
+
+ ac[ucode_ac].cw_min =
+ cpu_to_le16(mvm_link->queue_params[i].cw_min);
+ ac[ucode_ac].cw_max =
+ cpu_to_le16(mvm_link->queue_params[i].cw_max);
+ ac[ucode_ac].edca_txop =
+ cpu_to_le16(mvm_link->queue_params[i].txop * 32);
+ ac[ucode_ac].aifsn = mvm_link->queue_params[i].aifs;
+ ac[ucode_ac].fifos_mask = BIT(txf);
+ }
+
+ if (link_conf->qos)
+ *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
+ *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+}
+
+int iwl_mvm_get_mac_type(struct ieee80211_vif *vif)
+{
+ u32 mac_type = FW_MAC_TYPE_BSS_STA;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ mac_type = FW_MAC_TYPE_P2P_STA;
+ else
+ mac_type = FW_MAC_TYPE_BSS_STA;
+ break;
+ case NL80211_IFTYPE_AP:
+ mac_type = FW_MAC_TYPE_GO;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ mac_type = FW_MAC_TYPE_LISTENER;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ mac_type = FW_MAC_TYPE_P2P_DEVICE;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ mac_type = FW_MAC_TYPE_IBSS;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return mac_type;
+}
+
+static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
+ const u8 *bssid_override,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ const u8 *bssid = bssid_override ?: vif->bss_conf.bssid;
+ u32 ht_flag;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd->action = cpu_to_le32(action);
+ cmd->mac_type = cpu_to_le32(iwl_mvm_get_mac_type(vif));
+
+ cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+
+ memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+
+ if (bssid)
+ memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
+ else
+ eth_broadcast_addr(cmd->bssid_addr);
+
+ iwl_mvm_set_fw_basic_rates(mvm, vif, &mvmvif->deflink, &cmd->cck_rates,
+ &cmd->ofdm_rates);
+
+ cmd->cck_short_preamble =
+ cpu_to_le32(vif->bss_conf.use_short_preamble ?
+ MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot =
+ cpu_to_le32(vif->bss_conf.use_short_slot ?
+ MAC_FLG_SHORT_SLOT : 0);
+
+ cmd->filter_flags = 0;
+
+ iwl_mvm_set_fw_qos_params(mvm, vif, &vif->bss_conf, cmd->ac,
+ &cmd->qos_flags);
+
+ /* The fw does not distinguish between ht and fat */
+ ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+ iwl_mvm_set_fw_protection_flags(mvm, vif, &vif->bss_conf,
+ &cmd->protection_flags,
+ ht_flag, MAC_PROT_FLG_TGG_PROTECT);
+}
+
+static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_mac_ctx_cmd *cmd)
+{
+ int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
+ sizeof(*cmd), cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send MAC_CONTEXT_CMD (action:%d): %d\n",
+ le32_to_cpu(cmd->action), ret);
+ return ret;
+}
+
+void iwl_mvm_set_fw_dtim_tbtt(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ __le64 *dtim_tsf, __le32 *dtim_time,
+ __le32 *assoc_beacon_arrive_time)
+{
+ u32 dtim_offs;
+
+ /*
+ * The DTIM count counts down, so when it is N that means N
+ * more beacon intervals happen until the DTIM TBTT. Therefore
+ * add this to the current time. If that ends up being in the
+ * future, the firmware will handle it.
+ *
+ * Also note that the system_timestamp (which we get here as
+ * "sync_device_ts") and TSF timestamp aren't at exactly the
+ * same offset in the frame -- the TSF is at the first symbol
+ * of the TSF, the system timestamp is at signal acquisition
+ * time. This means there's an offset between them of at most
+ * a few hundred microseconds (24 * 8 bits + PLCP time gives
+ * 384us in the longest case), this is currently not relevant
+ * as the firmware wakes up around 2ms before the TBTT.
+ */
+ dtim_offs = link_conf->sync_dtim_count *
+ link_conf->beacon_int;
+ /* convert TU to usecs */
+ dtim_offs *= 1024;
+
+ *dtim_tsf =
+ cpu_to_le64(link_conf->sync_tsf + dtim_offs);
+ *dtim_time =
+ cpu_to_le32(link_conf->sync_device_ts + dtim_offs);
+ *assoc_beacon_arrive_time =
+ cpu_to_le32(link_conf->sync_device_ts);
+
+ IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
+ le64_to_cpu(*dtim_tsf),
+ le32_to_cpu(*dtim_time),
+ dtim_offs);
+}
+
+__le32 iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_p2p_noa_attr *noa =
+ &vif->bss_conf.p2p_noa_attr;
+
+ return cpu_to_le32(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+}
+
+u32 iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 twt_policy = 0;
+
+ if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT)
+ twt_policy |= TWT_SUPPORTED;
+ if (vif->bss_conf.twt_protected)
+ twt_policy |= PROTECTED_TWT_SUPPORTED;
+ if (vif->bss_conf.twt_broadcast)
+ twt_policy |= BROADCAST_TWT_SUPPORTED;
+
+ return twt_policy;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off,
+ const u8 *bssid_override)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct iwl_mac_data_sta *ctxt_sta;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
+
+ /*
+ * We always want to hear MCAST frames, if we're not authorized yet,
+ * we'll drop them.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+
+ if (vif->p2p) {
+ cmd.p2p_sta.ctwin =
+ iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(mvm, vif);
+
+ ctxt_sta = &cmd.p2p_sta.sta;
+ } else {
+ ctxt_sta = &cmd.sta;
+ }
+
+ /* We need the dtim_period to set the MAC as associated */
+ if (vif->cfg.assoc && vif->bss_conf.dtim_period &&
+ !force_assoc_off) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_set_fw_dtim_tbtt(mvm, vif, &vif->bss_conf,
+ &ctxt_sta->dtim_tsf,
+ &ctxt_sta->dtim_time,
+ &ctxt_sta->assoc_beacon_arrive_time);
+
+ ctxt_sta->is_assoc = cpu_to_le32(1);
+
+ if (!mvmvif->authorized &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO))
+ ctxt_sta->data_policy |=
+ cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE);
+ } else {
+ ctxt_sta->is_assoc = cpu_to_le32(0);
+
+ /* Allow beacons to pass through as long as we are not
+ * associated, or we do not have dtim period information.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ }
+
+ ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+
+ ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+ ctxt_sta->assoc_id = cpu_to_le32(vif->cfg.aid);
+
+ if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
+ if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+ ctxt_sta->data_policy |=
+ cpu_to_le32(iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(mvm, vif));
+ }
+
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ u32 tfd_queue_msk = 0;
+ int ret;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
+ MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_CRC32 |
+ MAC_FILTER_ACCEPT_GRP);
+ ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
+
+ /*
+ * the queue mask is only relevant for old TX API, and
+ * mvm->snif_queue isn't set here (it's still set to
+ * IWL_MVM_INVALID_QUEUE so the BIT() of it is UB)
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ tfd_queue_msk = BIT(mvm->snif_queue);
+
+ /* Allocate sniffer station */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk,
+ vif->type, IWL_STA_GENERAL_PURPOSE);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
+ MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_ACCEPT_GRP);
+
+ /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+
+ /* TODO: Assumes that the beacon id == mac context id */
+ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+struct iwl_mvm_go_iterator_data {
+ bool go_active;
+};
+
+static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_go_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ mvmvif->ap_ibss_active)
+ data->go_active = true;
+}
+
+__le32 iwl_mac_ctxt_p2p_dev_has_extended_disc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_go_iterator_data data = {};
+
+ /*
+ * This flag should be set to true when the P2P Device is
+ * discoverable and there is at least another active P2P GO. Settings
+ * this flag will allow the P2P Device to be discoverable on other
+ * channels in addition to its listen channel.
+ * Note that this flag should not be set in other cases as it opens the
+ * Rx filters on all MAC and increases the number of interrupts.
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_go_iterator, &data);
+
+ return cpu_to_le32(data.go_active ? 1 : 0);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ cmd.p2p_dev.is_disc_extended =
+ iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
+
+ /* Override the filter flags to accept only probe requests */
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ __le32 *tim_index, __le32 *tim_size,
+ u8 *beacon, u32 frame_size)
+{
+ u32 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /* The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon. */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx+1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ *tim_index = cpu_to_le32(tim_idx);
+ *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
+ } else {
+ IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+ }
+}
+
+u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_supported_band *sband;
+ unsigned long basic = vif->bss_conf.basic_rates;
+ u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ u8 band = info->band;
+ u8 rate;
+ u32 i;
+
+ if (link_id == IEEE80211_LINK_UNSPECIFIED && ieee80211_vif_is_mld(vif)) {
+ for (i = 0; i < ARRAY_SIZE(mvmvif->link); i++) {
+ if (!mvmvif->link[i])
+ continue;
+ /* shouldn't do this when >1 link is active */
+ WARN_ON_ONCE(link_id != IEEE80211_LINK_UNSPECIFIED);
+ link_id = i;
+ }
+ }
+
+ if (link_id < IEEE80211_LINK_UNSPECIFIED) {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (link_conf) {
+ basic = link_conf->basic_rates;
+ if (link_conf->chanreq.oper.chan)
+ band = link_conf->chanreq.oper.chan->band;
+ }
+ rcu_read_unlock();
+ }
+
+ sband = mvm->hw->wiphy->bands[band];
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ u16 hw = sband->bitrates[i].hw_value;
+
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ if (lowest_ofdm > hw)
+ lowest_ofdm = hw;
+ } else if (lowest_cck > hw) {
+ lowest_cck = hw;
+ }
+ }
+
+ if (band == NL80211_BAND_2GHZ && !vif->p2p &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
+ if (lowest_cck != IWL_RATE_COUNT)
+ rate = lowest_cck;
+ else if (lowest_ofdm != IWL_RATE_COUNT)
+ rate = lowest_ofdm;
+ else
+ rate = IWL_RATE_1M_INDEX;
+ } else if (lowest_ofdm != IWL_RATE_COUNT) {
+ rate = lowest_ofdm;
+ } else {
+ rate = IWL_RATE_6M_INDEX;
+ }
+
+ return rate;
+}
+
+u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
+{
+ bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
+ u16 flags, cck_flag;
+
+ if (is_new_rate) {
+ flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
+ cck_flag = IWL_MAC_BEACON_CCK;
+ } else {
+ cck_flag = IWL_MAC_BEACON_CCK_V1;
+ flags = iwl_fw_rate_idx_to_plcp(rate_idx);
+ }
+
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ flags |= cck_flag;
+
+ return flags;
+}
+
+u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband =
+ mvm->hw->wiphy->bands[info->band];
+ u32 legacy = vif->bss_conf.beacon_tx_rate.control[info->band].legacy;
+
+ /* if beacon rate was configured try using it */
+ if (hweight32(legacy) == 1) {
+ u32 rate = ffs(legacy) - 1;
+
+ return sband->bitrates[rate].hw_value;
+ }
+
+ return iwl_mvm_mac_ctxt_get_lowest_rate(mvm, info, vif);
+}
+
+static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon,
+ struct iwl_tx_cmd_v6_params *tx_params)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_tx_info *info;
+ u8 rate;
+ u32 tx_flags;
+
+ info = IEEE80211_SKB_CB(beacon);
+
+ /* Set up TX command fields */
+ tx_params->len = cpu_to_le16((u16)beacon->len);
+ tx_params->sta_id = mvmvif->deflink.bcast_sta.sta_id;
+ tx_params->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
+ tx_flags |=
+ iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
+ TX_CMD_FLG_BT_PRIO_POS;
+ tx_params->tx_flags = cpu_to_le32(tx_flags);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+
+ tx_params->rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+ }
+
+ rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
+
+ tx_params->rate_n_flags |=
+ cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate));
+ if (rate == IWL_FIRST_CCK_RATE)
+ tx_params->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1);
+
+}
+
+int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
+ struct sk_buff *beacon,
+ void *data, int len)
+{
+ struct iwl_host_cmd cmd = {
+ .id = BEACON_TEMPLATE_CMD,
+ .flags = CMD_ASYNC,
+ };
+
+ cmd.len[0] = len;
+ cmd.data[0] = data;
+ cmd.dataflags[0] = 0;
+ cmd.len[1] = beacon->len;
+ cmd.data[1] = beacon->data;
+ cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_beacon_cmd_v6 beacon_cmd = {};
+
+ iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_beacon_cmd_v7 beacon_cmd = {};
+
+ iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+
+ beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ beacon_cmd.csa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
+ beacon_cmd.ecsa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
+
+ return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+}
+
+bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (vif->type != NL80211_IFTYPE_AP || IWL_MVM_DISABLE_AP_FILS)
+ return false;
+
+ if (cfg80211_channel_is_psc(ctx->def.chan))
+ return true;
+
+ return (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ctx->def.width >= NL80211_CHAN_WIDTH_80);
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ u8 rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
+ u16 flags;
+ struct ieee80211_chanctx_conf *ctx;
+ int channel;
+ flags = iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate);
+
+ /* Enable FILS on PSC channels only */
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq);
+ WARN_ON(channel == 0);
+ if (iwl_mvm_enable_fils(mvm, vif, ctx)) {
+ flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
+ 0) > 10 ?
+ IWL_MAC_BEACON_FILS :
+ IWL_MAC_BEACON_FILS_V1;
+ beacon_cmd.short_ssid =
+ cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
+ vif->cfg.ssid_len));
+ }
+ rcu_read_unlock();
+
+ beacon_cmd.flags = cpu_to_le16(flags);
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+
+ if (WARN_ON(!mvmvif->link[link_conf->link_id]))
+ return -EINVAL;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 12)
+ beacon_cmd.link_id =
+ cpu_to_le32(mvmvif->link[link_conf->link_id]->fw_link_id);
+ else
+ beacon_cmd.link_id = cpu_to_le32((u32)mvmvif->id);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+ &beacon_cmd.tim_size,
+ beacon->data, beacon->len);
+
+ beacon_cmd.csa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
+ beacon_cmd.ecsa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14)
+ beacon_cmd.btwt_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
+
+ return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+ sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon,
+ struct ieee80211_bss_conf *link_conf)
+{
+ if (WARN_ON(!beacon))
+ return -EINVAL;
+
+ if (IWL_MVM_NON_TRANSMITTING_AP)
+ return 0;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD))
+ return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE))
+ return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon,
+ link_conf);
+
+ return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon);
+}
+
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct sk_buff *beacon;
+ int ret;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
+
+ beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL,
+ link_conf->link_id);
+ if (!beacon)
+ return -ENOMEM;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->beacon_inject_active) {
+ dev_kfree_skb(beacon);
+ return -EBUSY;
+ }
+#endif
+
+ ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon, link_conf);
+ dev_kfree_skb(beacon);
+ return ret;
+}
+
+struct iwl_mvm_mac_ap_iterator_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *vif;
+ u32 beacon_device_ts;
+ u16 beacon_int;
+};
+
+/* Find the beacon_device_ts and beacon_int for a managed interface */
+static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mac_ap_iterator_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return;
+
+ /* Station client has higher priority over P2P client*/
+ if (vif->p2p && data->beacon_device_ts)
+ return;
+
+ data->beacon_device_ts = vif->bss_conf.sync_device_ts;
+ data->beacon_int = vif->bss_conf.beacon_int;
+}
+
+/*
+ * Fill the filter flags for mac context of type AP or P2P GO.
+ */
+void iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ __le32 *filter_flags,
+ int accept_probe_req_flag,
+ int accept_beacon_flag)
+{
+ /*
+ * in AP mode, pass probe requests and beacons from other APs
+ * (needed for ht protection); when there're no any associated
+ * station don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ *filter_flags |= cpu_to_le32(accept_probe_req_flag);
+ if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+ *filter_flags |= cpu_to_le32(accept_beacon_flag);
+ IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+ } else {
+ IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+ }
+}
+
+/*
+ * Fill the specific data for mac context of type AP of P2P GO
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_ctx_cmd *cmd,
+ struct iwl_mac_data_ap *ctxt_ap,
+ bool add)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_mac_ap_iterator_data data = {
+ .mvm = mvm,
+ .vif = vif,
+ .beacon_device_ts = 0
+ };
+
+ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
+ iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(mvm, mvmvif,
+ &cmd->filter_flags,
+ MAC_FILTER_IN_PROBE_REQUEST,
+ MAC_FILTER_IN_BEACON);
+
+ ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+ ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+ vif->bss_conf.dtim_period);
+
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE))
+ ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->deflink.cab_queue);
+
+ /*
+ * Only set the beacon time when the MAC is being added, when we
+ * just modify the MAC then we should keep the time -- the firmware
+ * can otherwise have a "jumping" TBTT.
+ */
+ if (add) {
+ /*
+ * If there is a station/P2P client interface which is
+ * associated, set the AP's TBTT far enough from the station's
+ * TBTT. Otherwise, set it to the current system time
+ */
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mvm_mac_ap_iterator, &data);
+
+ if (data.beacon_device_ts) {
+ u32 rand = get_random_u32_inclusive(36, 63);
+ mvmvif->ap_beacon_time = data.beacon_device_ts +
+ ieee80211_tu_to_usec(data.beacon_int * rand /
+ 100);
+ } else {
+ mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm);
+ }
+ }
+
+ ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
+ ctxt_ap->beacon_tsf = 0; /* unused */
+
+ /* TODO: Assume that the beacon id == mac context id */
+ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ /* Fill the data specific for ap mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap,
+ action == FW_CTXT_ACTION_ADD);
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_ctx_cmd cmd = {};
+ struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+ /* Fill the data specific for GO mode */
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap,
+ action == FW_CTXT_ACTION_ADD);
+
+ cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+ cmd.go.opp_ps_enabled =
+ cpu_to_le32(!!(noa->oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT));
+
+ return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off,
+ const u8 *bssid_override)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
+ force_assoc_off,
+ bssid_override);
+ case NL80211_IFTYPE_AP:
+ if (!vif->p2p)
+ return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+ else
+ return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+ case NL80211_IFTYPE_MONITOR:
+ return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ case NL80211_IFTYPE_ADHOC:
+ return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+ true, NULL);
+ if (ret)
+ return ret;
+
+ /* will only do anything at resume from D3 time */
+ iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
+ mvmvif->uploaded = true;
+ return 0;
+}
+
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off, const u8 *bssid_override)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+ force_assoc_off, bssid_override);
+}
+
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_ctx_cmd cmd;
+ int ret;
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+
+ ret = iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ mvmvif->uploaded = false;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
+ iwl_mvm_dealloc_snif_sta(mvm);
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
+ struct ieee80211_vif *csa_vif, u32 gp2,
+ bool tx_success)
+{
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(csa_vif);
+
+ /* Don't start to countdown from a failed beacon */
+ if (!tx_success && !mvmvif->csa_countdown)
+ return;
+
+ mvmvif->csa_countdown = true;
+
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif, 0);
+
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif,
+ &csa_vif->bss_conf);
+ if (csa_vif->p2p &&
+ !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
+ tx_success) {
+ u32 rel_time = (c + 1) *
+ csa_vif->bss_conf.beacon_int -
+ IWL_MVM_CHANNEL_SWITCH_TIME_GO;
+ u32 apply_time = gp2 + rel_time * 1024;
+
+ iwl_mvm_schedule_csa_period(mvm, csa_vif,
+ IWL_MVM_CHANNEL_SWITCH_TIME_GO -
+ IWL_MVM_CHANNEL_SWITCH_MARGIN,
+ apply_time);
+ }
+ } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
+ /* we don't have CSA NoA scheduled yet, switch now */
+ ieee80211_csa_finish(csa_vif, 0);
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ }
+}
+
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
+ struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data;
+ struct ieee80211_vif *csa_vif;
+ struct ieee80211_vif *tx_blocked_vif;
+ struct agg_tx_status *agg_status;
+ u16 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+
+ if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) {
+ struct iwl_tx_resp *beacon_notify_hdr =
+ &beacon_v5->beacon_notify_hdr;
+
+ if (unlikely(pkt_len < sizeof(*beacon_v5)))
+ return;
+
+ mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0;
+ agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+ status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
+ IWL_DEBUG_RX(mvm,
+ "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n",
+ status, beacon_notify_hdr->failure_frame,
+ le64_to_cpu(beacon->tsf),
+ mvm->ap_last_beacon_gp2,
+ le32_to_cpu(beacon_notify_hdr->initial_rate));
+ } else {
+ if (unlikely(pkt_len < sizeof(*beacon)))
+ return;
+
+ mvm->ibss_manager = beacon->ibss_mgr_status != 0;
+ status = le32_to_cpu(beacon->status) & TX_STATUS_MSK;
+ IWL_DEBUG_RX(mvm,
+ "beacon status %#x tsf:0x%016llX gp2:0x%X\n",
+ status, le64_to_cpu(beacon->tsf),
+ mvm->ap_last_beacon_gp2);
+ }
+
+ csa_vif = rcu_dereference_protected(mvm->csa_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (unlikely(csa_vif && csa_vif->bss_conf.csa_active))
+ iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
+ (status == TX_STATUS_SUCCESS));
+
+ tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (unlikely(tx_blocked_vif)) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ /*
+ * The channel switch is started and we have blocked the
+ * stations. If this is the first beacon (the timeout wasn't
+ * set), set the unblock timeout, otherwise countdown
+ */
+ if (!mvm->csa_tx_block_bcn_timeout)
+ mvm->csa_tx_block_bcn_timeout =
+ IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
+ else
+ mvm->csa_tx_block_bcn_timeout--;
+
+ /* Check if the timeout is expired, and unblock tx */
+ if (mvm->csa_tx_block_bcn_timeout == 0) {
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ }
+ }
+}
+
+static void
+iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
+ const struct iwl_missed_beacons_notif *mb,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
+ struct iwl_fw_dbg_trigger_tlv *trigger;
+ u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+ u32 rx_missed_bcon, rx_missed_bcon_since_rx;
+ struct ieee80211_vif *vif;
+ /* Id can be mac/link id depending on the notification version */
+ u32 id = le32_to_cpu(mb->link_id);
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
+ u32 mac_type;
+ int link_id;
+ u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ MISSED_BEACONS_NOTIFICATION,
+ 0);
+ u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ MISSED_BEACONS_NOTIF, 0);
+ struct ieee80211_bss_conf *bss_conf;
+
+ /* If the firmware uses the new notification (from MAC_CONF_GROUP),
+ * refer to that notification's version.
+ * Note that the new notification from MAC_CONF_GROUP starts from
+ * version 5.
+ */
+ if (new_notif_ver)
+ notif_ver = new_notif_ver;
+
+ IWL_DEBUG_INFO(mvm,
+ "missed bcn %s_id=%u, consecutive=%u (%u)\n",
+ notif_ver < 4 ? "mac" : "link",
+ id,
+ le32_to_cpu(mb->consec_missed_beacons),
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx));
+
+ /*
+ * starting from version 4 the ID is link ID, but driver
+ * uses link ID == MAC ID, so always treat as MAC ID
+ */
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
+ if (!vif)
+ return;
+
+ bss_conf = &vif->bss_conf;
+ link_id = bss_conf->link_id;
+ mac_type = iwl_mvm_get_mac_type(vif);
+
+ IWL_DEBUG_INFO(mvm, "missed beacon mac_type=%u,\n", mac_type);
+
+ mvm->trans->dbg.dump_file_name_ext_valid = true;
+ snprintf(mvm->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
+ "MacId_%d_MacType_%d", id, mac_type);
+
+ rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
+ rx_missed_bcon_since_rx =
+ le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
+ /*
+ * TODO: the threshold should be adjusted based on latency conditions,
+ * and/or in case of a CS flow on one of the other AP vifs.
+ */
+ if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) {
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ } else {
+ IWL_WARN(mvm,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ IWL_WARN(mvm,
+ "missed_beacons:%d, missed_beacons_since_rx:%d\n",
+ rx_missed_bcon, rx_missed_bcon_since_rx);
+ }
+ } else if (link_id >= 0 && hweight16(vif->active_links) > 1) {
+ u32 bss_param_ch_cnt_link_id =
+ bss_conf->bss_param_ch_cnt_link_id;
+ u32 scnd_lnk_bcn_lost = 0;
+
+ if (notif_ver >= 5 &&
+ !IWL_FW_CHECK(mvm,
+ le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID,
+ "No data for other link id but we are in EMLSR active_links: 0x%x\n",
+ vif->active_links))
+ scnd_lnk_bcn_lost =
+ le32_to_cpu(mb->consec_missed_beacons_other_link);
+
+ /* Exit EMLSR if we lost more than
+ * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
+ * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link.
+ * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
+ * and the link's bss_param_ch_count has changed.
+ */
+ if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
+ scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
+ rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH ||
+ (bss_param_ch_cnt_link_id != link_id &&
+ rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED))
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON,
+ iwl_mvm_get_primary_link(vif));
+ } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ ieee80211_beacon_loss(vif);
+ else
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+
+ /* try to switch links, no-op if we don't have MLO */
+ iwl_mvm_int_mlo_scan(mvm, vif);
+ }
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
+
+ trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MISSED_BEACONS);
+ if (!trigger)
+ return;
+
+ bcon_trig = (void *)trigger->data;
+ stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+ stop_trig_missed_bcon_since_rx =
+ le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+
+ /* TODO: implement start trigger */
+
+ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+ rx_missed_bcon >= stop_trig_missed_bcon)
+#if defined(__linux__)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
+#elif defined(__FreeBSD__)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, "");
+#endif
+}
+
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ iwl_mvm_handle_missed_beacons_notif(mvm, (const void *)pkt->data, pkt);
+}
+
+void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_missed_beacons_notif_v4 *mb_v4 =
+ (const void *)pkt->data;
+ struct iwl_missed_beacons_notif mb = {
+ .link_id = mb_v4->link_id,
+ .consec_missed_beacons = mb_v4->consec_missed_beacons,
+ .consec_missed_beacons_since_last_rx =
+ mb_v4->consec_missed_beacons_since_last_rx,
+ .other_link_id = cpu_to_le32(IWL_MVM_FW_LINK_ID_INVALID),
+ };
+
+ iwl_mvm_handle_missed_beacons_notif(mvm, &mb, pkt);
+}
+
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ u8 *data;
+ u32 size = le32_to_cpu(sb->byte_count);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF),
+ 0);
+
+ if (size == 0)
+ return;
+
+ /* handle per-version differences */
+ if (ver <= 2) {
+ struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data;
+
+ if (pkt_len < struct_size(sb_v2, data, size))
+ return;
+
+ data = sb_v2->data;
+ } else {
+ struct iwl_stored_beacon_notif *sb_v3 = (void *)pkt->data;
+
+ if (pkt_len < struct_size(sb_v3, data, size))
+ return;
+
+ data = sb_v3->data;
+ }
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ /* update rx_status according to the notification's metadata */
+ memset(&rx_status, 0, sizeof(rx_status));
+ rx_status.mactime = le64_to_cpu(sb->tsf);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+ rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+ rx_status.band =
+ (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
+ rx_status.band);
+
+ /* copy the data */
+ skb_put_data(skb, data, size);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ /* pass it as regular rx to mac80211 */
+ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
+}
+
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
+ struct iwl_probe_resp_data *old_data, *new_data;
+ u32 id = le32_to_cpu(notif->mac_id);
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+
+ IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
+ notif->noa_active, notif->csa_counter);
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
+ if (!vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+ if (!new_data)
+ return;
+
+ memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+
+ /* noa_attr contains 1 reserved byte, need to substruct it */
+ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
+ sizeof(new_data->notif.noa_attr) - 1;
+
+ /*
+ * If it's a one time NoA, only one descriptor is needed,
+ * adjust the length according to len_low.
+ */
+ if (new_data->notif.noa_attr.len_low ==
+ sizeof(struct ieee80211_p2p_noa_desc) + 2)
+ new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+
+ old_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
+ lockdep_is_held(&mvmvif->mvm->mutex));
+ rcu_assign_pointer(mvmvif->deflink.probe_resp_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+
+ if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
+ notif->csa_counter >= 1)
+ ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
+}
+
+void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct ieee80211_vif *csa_vif, *vif;
+ struct iwl_mvm_vif *mvmvif, *csa_mvmvif;
+ u32 id_n_color, csa_id;
+ /* save mac_id or link_id to use later to cancel csa if needed */
+ u32 id;
+ u32 mac_link_id = 0;
+ u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_START_NOTIF, 0);
+ bool csa_active;
+
+ rcu_read_lock();
+
+ if (notif_ver < 3) {
+ struct iwl_channel_switch_start_notif_v1 *notif = (void *)pkt->data;
+ u32 mac_id;
+
+ id_n_color = le32_to_cpu(notif->id_and_color);
+ mac_id = id_n_color & FW_CTXT_ID_MSK;
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, mac_id, true);
+ if (!vif)
+ goto out_unlock;
+
+ id = mac_id;
+ csa_active = vif->bss_conf.csa_active;
+ } else {
+ struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
+ u32 link_id = le32_to_cpu(notif->link_id);
+
+ /* we use link ID == MAC ID */
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, link_id, true);
+ if (!vif)
+ goto out_unlock;
+
+ id = link_id;
+ mac_link_id = vif->bss_conf.link_id;
+ csa_active = vif->bss_conf.csa_active;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (notif_ver >= 3)
+ id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ csa_vif = rcu_dereference(mvm->csa_vif);
+ if (WARN_ON(!csa_vif || !csa_vif->bss_conf.csa_active ||
+ csa_vif != vif))
+ goto out_unlock;
+
+ csa_mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+ csa_id = FW_CMD_ID_AND_COLOR(csa_mvmvif->id, csa_mvmvif->color);
+ if (WARN(csa_id != id_n_color,
+ "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
+ csa_id, id_n_color))
+ goto out_unlock;
+
+ IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
+
+ schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
+ msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+ csa_vif->bss_conf.beacon_int));
+
+ ieee80211_csa_finish(csa_vif, 0);
+
+ rcu_read_unlock();
+
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ return;
+ case NL80211_IFTYPE_STATION:
+ /*
+ * if we don't know about an ongoing channel switch,
+ * make sure FW cancels it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0) && !csa_active) {
+ IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n");
+ iwl_mvm_cancel_channel_switch(mvm, vif, id);
+ break;
+ }
+
+ iwl_mvm_csa_client_absent(mvm, vif);
+ cancel_delayed_work(&mvmvif->csa_work);
+ ieee80211_chswitch_done(vif, true, mac_link_id);
+ break;
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ break;
+ }
+out_unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+ u32 id = le32_to_cpu(notif->link_id);
+ u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+ rcu_read_lock();
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif) {
+ rcu_read_unlock();
+ return;
+ }
+
+ IWL_DEBUG_INFO(mvm, "FW reports CSA error: id=%u, csa_err_mask=%u\n",
+ id, csa_err_mask);
+ if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ CS_ERR_LONG_DELAY_AFTER_CS |
+ CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+ ieee80211_channel_switch_disconnect(vif);
+ rcu_read_unlock();
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mac80211.c
new file mode 100644
index 000000000000..f32398213ab8
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mac80211.c
@@ -0,0 +1,6626 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/kernel.h>
+#include <linux/fips.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/time.h>
+#if defined(__FreeBSD__)
+#include <linux/math64.h>
+#endif
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+#include <net/tcp.h>
+#if defined(__FreeBSD__)
+#include <linux/udp.h>
+#endif
+
+#include "iwl-drv.h"
+#include "iwl-op-mode.h"
+#include "iwl-io.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-event.h"
+#include "iwl-nvm-utils.h"
+#include "iwl-phy-db.h"
+#ifdef CONFIG_NL80211_TESTMODE
+#include "testmode.h"
+#endif
+#include "fw/error-dump.h"
+#include "iwl-prph.h"
+#include "iwl-nvm-parse.h"
+#include "time-sync.h"
+
+#define IWL_MVM_LIMITS(ap) \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_STATION), \
+ }, \
+ { \
+ .max = 1, \
+ .types = ap | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO), \
+ }, \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
+ }
+
+static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
+ IWL_MVM_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = {
+ IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP))
+};
+
+static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits),
+ },
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits_ap,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap),
+ },
+};
+
+static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
+ .max_peers = IWL_TOF_MAX_APS,
+ .report_ap_tsf = 1,
+ .randomize_mac_addr = 1,
+
+ .ftm = {
+ .supported = 1,
+ .asap = 1,
+ .non_asap = 1,
+ .request_lci = 1,
+ .request_civicloc = 1,
+ .trigger_based = 1,
+ .non_trigger_based = 1,
+ .max_bursts_exponent = -1, /* all supported */
+ .max_ftms_per_burst = 0, /* no limits */
+ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ .preambles = BIT(NL80211_PREAMBLE_LEGACY) |
+ BIT(NL80211_PREAMBLE_HT) |
+ BIT(NL80211_PREAMBLE_VHT) |
+ BIT(NL80211_PREAMBLE_HE),
+ },
+};
+
+static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+
+static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
+{
+ int i;
+
+ memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ mvm->phy_ctxts[i].id = i;
+ mvm->phy_ctxts[i].ref = 0;
+ }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcc_update_resp_v8 *resp;
+ u8 resp_ver;
+
+ IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
+ if (IS_ERR_OR_NULL(resp)) {
+ IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
+ PTR_ERR_OR_ZERO(resp));
+ resp = NULL;
+ goto out;
+ }
+
+ if (changed) {
+ u32 status = le32_to_cpu(resp->status);
+
+ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+ status == MCC_RESP_ILLEGAL);
+ }
+ resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+ IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver);
+
+ regd = iwl_parse_nvm_mcc_info(mvm->trans,
+ __le32_to_cpu(resp->n_channels),
+ resp->channels,
+ __le16_to_cpu(resp->mcc),
+ __le16_to_cpu(resp->geo_info),
+ le32_to_cpu(resp->cap), resp_ver);
+ /* Store the return source id */
+ src_id = resp->source_id;
+ if (IS_ERR_OR_NULL(regd)) {
+ IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
+ PTR_ERR_OR_ZERO(regd));
+ goto out;
+ }
+
+ IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+ regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
+ mvm->lar_regdom_set = true;
+ mvm->mcc_src = src_id;
+
+ iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));
+
+out:
+ kfree(resp);
+ return regd;
+}
+
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
+{
+ bool changed;
+ struct ieee80211_regdomain *regd;
+
+ if (!iwl_mvm_is_lar_supported(mvm))
+ return;
+
+ regd = iwl_mvm_get_current_regdomain(mvm, &changed);
+ if (!IS_ERR_OR_NULL(regd)) {
+ /* only update the regulatory core if changed */
+ if (changed)
+ regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+ kfree(regd);
+ }
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+ bool *changed)
+{
+ return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
+ iwl_mvm_is_wifi_mcc_supported(mvm) ?
+ MCC_SOURCE_GET_CURRENT :
+ MCC_SOURCE_OLD_FW, changed);
+}
+
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync)
+{
+ enum iwl_mcc_source used_src;
+ struct ieee80211_regdomain *regd;
+ int ret;
+ bool changed;
+ const struct ieee80211_regdomain *r =
+ wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd);
+
+ if (!r)
+ return -ENOENT;
+
+ /* save the last source in case we overwrite it below */
+ used_src = mvm->mcc_src;
+ if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
+ /* Notify the firmware we support wifi location updates */
+ regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+ if (!IS_ERR_OR_NULL(regd))
+ kfree(regd);
+ }
+
+ /* Now set our last stored MCC and source */
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
+ &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ /* update cfg80211 if the regdomain was changed or the caller explicitly
+ * asked to update regdomain
+ */
+ if (changed || force_regd_sync)
+ ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
+ else
+ ret = 0;
+
+ kfree(regd);
+ return ret;
+}
+
+/* Each capability added here should also be add to tm_if_types_ext_capa_sta */
+static const u8 he_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+};
+
+static const u8 tm_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT |
+ WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+/* Additional interface types for which extended capabilities are
+ * specified separately
+ */
+
+#define IWL_MVM_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \
+ __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
+ __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MVM_MLD_CAPA_OPS (FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)
+
+static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = he_if_types_ext_capa_sta,
+ .extended_capabilities_mask = he_if_types_ext_capa_sta,
+ .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
+ /* relevant only if EHT is supported */
+ .eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
+ },
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = tm_if_types_ext_capa_sta,
+ .extended_capabilities_mask = tm_if_types_ext_capa_sta,
+ .extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
+ /* relevant only if EHT is supported */
+ .eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
+ },
+};
+
+int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ *tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+ *rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+ return 0;
+}
+
+int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant,
+ u32 rx_ant)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* This has been tested on those devices only */
+ if (mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
+ mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_AX210)
+ return -EOPNOTSUPP;
+
+ if (!mvm->nvm_data)
+ return -EBUSY;
+
+ /* mac80211 ensures the device is not started,
+ * so the firmware cannot be running
+ */
+
+ mvm->set_tx_ant = tx_ant;
+ mvm->set_rx_ant = rx_ant;
+
+ iwl_reinit_cab(mvm->trans, mvm->nvm_data, tx_ant, rx_ant, mvm->fw);
+
+ return 0;
+}
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ int num_mac, ret, i;
+ static const u32 mvm_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
+#ifdef CONFIG_PM_SLEEP
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+#endif
+ u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);
+
+ /* Tell mac80211 our characteristics */
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+ ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+
+ /* Set this early since we need to have it for the check below */
+ if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
+ !iwlwifi_mod_params.disable_11ax &&
+ !iwlwifi_mod_params.disable_11be) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ /* we handle this already earlier, but need it for MLO */
+ ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+ }
+
+ /* With MLD FW API, it tracks timing by itself,
+ * no need for any timing from the host
+ */
+ if (!mvm->mld_api_is_used)
+ ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+
+ /*
+ * On older devices, enabling TX A-MSDU occasionally leads to
+ * something getting messed up, the command read from the FIFO
+ * gets out of sync and isn't a TX command, so that we have an
+ * assert EDC.
+ *
+ * It's not clear where the bug is, but since we didn't used to
+ * support A-MSDU until moving the mac80211 iTXQs, just leave it
+ * for older devices. We also don't see this issue on any newer
+ * devices.
+ */
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000)
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ }
+
+ /* We want to use the mac80211's reorder buffer for 9000 */
+ if (iwl_mvm_has_new_rx_api(mvm) &&
+ mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_9000)
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+ /*
+ * we absolutely need this for the new TX API since that comes
+ * with many more queues than the current code can deal with
+ * for station powersave
+ */
+ return -EINVAL;
+ }
+
+ if (mvm->trans->info.num_rxqs > 1)
+ ieee80211_hw_set(hw, USES_RSS);
+
+ if (mvm->trans->info.max_skb_frags)
+ hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+
+ hw->queues = IEEE80211_NUM_ACS;
+ hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+ /* this is the case for CCK frames, it's better (only 8) for OFDM */
+ hw->radiotap_timestamp.accuracy = 22;
+
+ if (!iwl_mvm_has_tlc_offload(mvm))
+ hw->rate_control_algorithm = RS_NAME;
+
+ hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ hw->max_tx_fragments = mvm->trans->info.max_skb_frags;
+
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
+ memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+ hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+ hw->wiphy->cipher_suites = mvm->ciphers;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_GCMP;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_GCMP_256;
+ hw->wiphy->n_cipher_suites++;
+ }
+
+ if (iwlwifi_mod_params.swcrypto)
+ IWL_ERR(mvm,
+ "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n");
+ if (!iwlwifi_mod_params.bt_coex_active)
+ IWL_ERR(mvm,
+ "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n");
+
+ if (!fips_enabled)
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC;
+ hw->wiphy->n_cipher_suites++;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_128;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_256;
+ hw->wiphy->n_cipher_suites++;
+ }
+
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+ hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
+ }
+
+ /*
+ * beacon protection must be handled by firmware,
+ * so cannot be done with fips_enabled
+ */
+ if (!fips_enabled && sec_key_ver &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION);
+ else if (!fips_enabled &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
+ hw->wiphy->hw_timestamp_max_peers = 1;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ hw->wiphy->features |=
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+
+ hw->sta_data_size = sizeof(struct iwl_mvm_sta);
+ hw->vif_data_size = sizeof(struct iwl_mvm_vif);
+ hw->chanctx_data_size = sizeof(u16);
+ hw->txq_data_size = sizeof(struct iwl_mvm_txq);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+ /* The new Tx API does not allow to pass the key or keyid of a MPDU to
+ * the hw, preventing us to control which key(id) to use per MPDU.
+ * Till that's fixed we can't use Extended Key ID for the newer cards.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_EXT_KEY_ID);
+ hw->wiphy->features |= NL80211_FEATURE_HT_IBSS;
+
+ hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+ if (iwl_mvm_is_lar_supported(mvm))
+ hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ else
+ hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS;
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT);
+
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+
+ hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwl_mvm_iface_combinations);
+
+ hw->wiphy->max_remain_on_channel_duration = 10000;
+ hw->max_listen_interval = IWL_MVM_CONN_LISTEN_INTERVAL;
+
+ /* Extract MAC address */
+ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
+ hw->wiphy->addresses = mvm->addresses;
+ hw->wiphy->n_addresses = 1;
+
+ /* Extract additional MAC addresses if available */
+ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
+ min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
+
+ for (i = 1; i < num_mac; i++) {
+ memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
+ ETH_ALEN);
+ mvm->addresses[i].addr[5]++;
+ hw->wiphy->n_addresses++;
+ }
+
+ iwl_mvm_reset_phy_ctxts(mvm);
+
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+
+ BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+ BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ mvm->max_scans = IWL_MAX_UMAC_SCANS;
+ else
+ mvm->max_scans = IWL_MAX_LMAC_SCANS;
+
+ if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+ hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+ }
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) &&
+ mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels)
+ hw->wiphy->bands[NL80211_BAND_6GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
+
+ hw->wiphy->hw_version = mvm->trans->info.hw_id;
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_sched_scan_reqs = 1;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw);
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+ hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+ hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
+
+ /*
+ * the firmware uses u8 for num of iterations, but 0xff is saved for
+ * infinite loop, so the maximum number of iterations is actually 254.
+ */
+ hw->wiphy->max_sched_scan_plan_iterations = 254;
+
+ hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
+
+ /* when firmware supports RLC/SMPS offload, do not set these
+ * driver features, since it's no longer supported by driver.
+ */
+ if (!iwl_mvm_has_rlc_offload(mvm))
+ hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS |
+ NL80211_FEATURE_DYNAMIC_SMPS;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
+ hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
+ hw->wiphy->features |= NL80211_FEATURE_QUIET;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ hw->wiphy->features |=
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+ hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
+ IWL_FW_CMD_VER_UNKNOWN) >= 3)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ }
+
+ if (iwl_mvm_is_oce_supported(mvm)) {
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0);
+
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+
+ /* Old firmware also supports probe deferral and suppression */
+ if (scan_ver < 15)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
+ }
+
+ hw->wiphy->iftype_ext_capab = NULL;
+ hw->wiphy->num_iftype_ext_capab = 0;
+
+ if (mvm->nvm_data->sku_cap_11ax_enable &&
+ !iwlwifi_mod_params.disable_11ax) {
+ hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(add_iftypes_ext_capa) - 1;
+
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 1) {
+ IWL_DEBUG_INFO(mvm->trans, "Timing measurement supported\n");
+
+ if (!hw->wiphy->iftype_ext_capab) {
+ hw->wiphy->num_iftype_ext_capab = 1;
+ hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa +
+ ARRAY_SIZE(add_iftypes_ext_capa) - 1;
+ } else {
+ hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa + 1;
+ }
+ }
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 11) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SECURE_LTF);
+ }
+
+ mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+#ifdef CONFIG_PM_SLEEP
+ if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
+ device_can_wakeup(mvm->trans->dev) && !fips_enabled) {
+ mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT;
+ mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mvm->wowlan.max_nd_match_sets =
+ iwl_umac_scan_get_max_profiles(mvm->fw);
+ hw->wiphy->wowlan = &mvm->wowlan;
+ }
+#endif
+
+ ret = iwl_mvm_leds_init(mvm);
+ if (ret)
+ return ret;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
+ IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
+ hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+ }
+
+ hw->netdev_features |= mvm->trans->mac_cfg->base->features;
+ if (!iwl_mvm_is_csum_supported(mvm))
+ hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;
+
+ if (mvm->cfg->vht_mu_mimo_supported)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROTECTED_TWT);
+
+ iwl_mvm_vendor_cmds_register(mvm);
+
+ hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
+ hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
+
+ ret = ieee80211_register_hw(mvm->hw);
+ if (ret) {
+ iwl_mvm_leds_exit(mvm);
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ if (likely(sta)) {
+ if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+ return;
+ } else {
+ if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+ return;
+ }
+
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
+void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control, struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
+ IEEE80211_TX_CTL_TX_OFFCHAN;
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ struct ieee80211_sta *tmp_sta = sta;
+
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
+ goto drop;
+ }
+
+ if (offchannel &&
+ !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) &&
+ !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+ goto drop;
+
+ /*
+ * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs
+ * so we treat the others as broadcast
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ sta = NULL;
+
+ /* this shouldn't even happen: just drop */
+ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
+ !offchannel)
+ goto drop;
+
+ if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED &&
+ !ieee80211_is_probe_resp(hdr->frame_control)) {
+ /* translate MLD addresses to LINK addresses */
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(tmp_sta->link[link_id]);
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(info->control.vif->link_conf[link_id]);
+ struct ieee80211_mgmt *mgmt;
+
+ if (WARN_ON(!link_sta || !link_conf))
+ goto drop;
+
+ /* if sta is NULL, the frame is a management frame */
+ mgmt = (void *)hdr;
+ memcpy(mgmt->da, link_sta->addr, ETH_ALEN);
+ memcpy(mgmt->sa, link_conf->addr, ETH_ALEN);
+ memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN);
+ }
+
+ iwl_mvm_tx_skb(mvm, skb, sta);
+ return;
+ drop:
+ ieee80211_free_txskb(hw, skb);
+}
+
+void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ struct sk_buff *skb = NULL;
+
+ /*
+ * No need for threads to be pending here, they can leave the first
+ * taker all the work.
+ *
+ * mvmtxq->tx_request logic:
+ *
+ * If 0, no one is currently TXing, set to 1 to indicate current thread
+ * will now start TX and other threads should quit.
+ *
+ * If 1, another thread is currently TXing, set to 2 to indicate to
+ * that thread that there was another request. Since that request may
+ * have raced with the check whether the queue is empty, the TXing
+ * thread should check the queue's status one more time before leaving.
+ * This check is done in order to not leave any TX hanging in the queue
+ * until the next TX invocation (which may not even happen).
+ *
+ * If 2, another thread is currently TXing, and it will already double
+ * check the queue, so do nothing.
+ */
+ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
+ return;
+
+ rcu_read_lock();
+ do {
+ while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
+ &mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
+ &mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA,
+ &mvmtxq->state) &&
+ !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
+ skb = ieee80211_tx_dequeue(hw, txq);
+
+ if (!skb) {
+ if (txq->sta)
+ IWL_DEBUG_TX(mvm,
+ "TXQ of sta %pM tid %d is now empty\n",
+ txq->sta->addr,
+ txq->tid);
+ break;
+ }
+
+ iwl_mvm_tx_skb(mvm, skb, txq->sta);
+ }
+ } while (atomic_dec_return(&mvmtxq->tx_request));
+ rcu_read_unlock();
+}
+
+void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ iwl_mvm_mac_itxq_xmit(hw, txq);
+ return;
+ }
+
+ /* iwl_mvm_mac_itxq_xmit() will later be called by the worker
+ * to handle any packets we leave on the txq now
+ */
+
+ spin_lock_bh(&mvm->add_stream_lock);
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (list_empty(&mvmtxq->list) &&
+ /* recheck under lock */
+ !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
+ list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+ schedule_work(&mvm->add_stream_wk);
+ }
+ spin_unlock_bh(&mvm->add_stream_lock);
+}
+
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
+ do { \
+ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
+ break; \
+ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \
+ } while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+ enum ieee80211_ampdu_mlme_action action)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
+ return;
+
+ ba_trig = (void *)trig->data;
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_OPERATIONAL: {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+ "TX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, tid_data->ssn);
+ break;
+ }
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+ "TX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+ "RX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, rx_ba_ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+ "RX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ default:
+ break;
+ }
+}
+
+int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u16 buf_size = params->buf_size;
+ bool amsdu = params->amsdu;
+ u16 timeout = params->timeout;
+
+ IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
+ sta->addr, tid, action);
+
+ if (!(mvm->nvm_data->sku_cap_11n_enable))
+ return -EACCES;
+
+ mutex_lock(&mvm->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwl_mvm_vif_from_mac80211(vif)->deflink.ap_sta_id ==
+ iwl_mvm_sta_from_mac80211(sta)->deflink.sta_id) {
+ struct iwl_mvm_vif *mvmvif;
+ u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
+
+ mdata->opened_rx_ba_sessions = true;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
+ }
+ if (!iwl_enable_rx_ampdu()) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
+ timeout);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
+ timeout);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ if (!iwl_enable_tx_ampdu()) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
+ buf_size, amsdu);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ u16 rx_ba_ssn = 0;
+
+ if (action == IEEE80211_AMPDU_RX_START)
+ rx_ba_ssn = *ssn;
+
+ iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+ rx_ba_ssn, action);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_probe_resp_data *probe_data;
+ unsigned int link_id;
+
+ mvmvif->uploaded = false;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ mvmvif->bf_enabled = false;
+ mvmvif->ba_enabled = false;
+ mvmvif->ap_sta = NULL;
+
+ mvmvif->esr_active = false;
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ mvmvif->link[link_id]->ap_sta_id = IWL_INVALID_STA;
+ mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->link[link_id]->phy_ctxt = NULL;
+ mvmvif->link[link_id]->active = 0;
+ mvmvif->link[link_id]->igtk = NULL;
+ memset(&mvmvif->link[link_id]->bf_data, 0,
+ sizeof(mvmvif->link[link_id]->bf_data));
+ }
+
+ probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
+ lockdep_is_held(&mvm->mutex));
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+ RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
+}
+
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct ieee80211_vif *vif;
+ int link_id;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvm_sta->vif;
+
+ if (!sta->valid_links)
+ return;
+
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ mvm_link_sta =
+ rcu_dereference_check(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+ /*
+ * We have a link STA but the link is inactive in
+ * mac80211. This will happen if we failed to
+ * deactivate the link but mac80211 roll back the
+ * deactivation of the link.
+ * Delete the stale data to avoid issues later on.
+ */
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+ link_id);
+ }
+ }
+}
+
+static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+{
+ iwl_mvm_stop_device(mvm);
+
+ mvm->cur_aid = 0;
+
+ mvm->scan_status = 0;
+ mvm->ps_disabled = false;
+ mvm->rfkill_safe_init_done = false;
+
+ /* just in case one was running */
+ iwl_mvm_cleanup_roc_te(mvm);
+ ieee80211_remain_on_channel_expired(mvm->hw);
+
+ iwl_mvm_ftm_restart(mvm);
+
+ /*
+ * cleanup all interfaces, even inactive ones, as some might have
+ * gone down during the HW restart
+ */
+ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+
+ /* cleanup stations as links may be gone after restart */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_cleanup_sta_iterator, mvm);
+
+ mvm->p2p_device_vif = NULL;
+
+ iwl_mvm_reset_phy_ctxts(mvm);
+ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+ memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+ memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+ ieee80211_wake_queues(mvm->hw);
+
+ mvm->rx_ba_sessions = 0;
+ mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ mvm->monitor_on = false;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->beacon_inject_active = false;
+#endif
+
+ /* keep statistics ticking */
+ iwl_mvm_accu_radio_stats(mvm);
+}
+
+int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
+{
+ bool fast_resume = false;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_mei_get_ownership(mvm);
+ if (ret)
+ return ret;
+
+ if (mvm->mei_nvm_data) {
+ /* We got the NIC, we can now free the MEI NVM data */
+ kfree(mvm->mei_nvm_data);
+ mvm->mei_nvm_data = NULL;
+
+ /*
+ * We can't free the nvm_data we allocated based on the SAP
+ * data because we registered to cfg80211 with the channels
+ * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data
+ * just in order to be able free it later.
+ * NULLify nvm_data so that we will read the NVM from the
+ * firmware this time.
+ */
+ mvm->temp_nvm_data = mvm->nvm_data;
+ mvm->nvm_data = NULL;
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ /* fast_resume will be cleared by iwl_mvm_fast_resume */
+ fast_resume = mvm->fast_resume;
+
+ if (fast_resume) {
+ iwl_mvm_mei_device_state(mvm, true);
+ ret = iwl_mvm_fast_resume(mvm);
+ if (ret) {
+ iwl_mvm_stop_device(mvm);
+ /* iwl_mvm_up() will be called further down */
+ } else {
+ /*
+ * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon
+ * mac_down() so that debugfs will stop honoring
+ * requests after we flush all the workers.
+ * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again
+ * now that we are back. This is a bit abusing the
+ * flag since the firmware wasn't really ever stopped,
+ * but this still serves the purpose.
+ */
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ }
+ }
+#endif /* CONFIG_PM_SLEEP */
+
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
+ /*
+ * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
+ * so later code will - from now on - see that we're doing it.
+ */
+ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+ /* Clean up some internal and mac80211 state on restart */
+ iwl_mvm_restart_cleanup(mvm);
+ }
+
+ /* we also want to load the firmware if fast_resume failed */
+ if (!fast_resume || ret)
+ ret = iwl_mvm_up(mvm);
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
+ NULL);
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+ NULL);
+
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+
+ if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ /* Something went wrong - we need to finish some cleanup
+ * that normally iwl_mvm_mac_restart_complete() below
+ * would do.
+ */
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+ int retry, max_retry = 0;
+
+ mutex_lock(&mvm->mutex);
+
+ /* we are starting the mac not in error flow, and restart is enabled */
+ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
+ iwlwifi_mod_params.fw_restart)
+ max_retry = IWL_MAX_INIT_RETRY;
+
+ for (retry = 0; retry <= max_retry; retry++) {
+ ret = __iwl_mvm_mac_start(mvm);
+ if (ret != -ETIMEDOUT)
+ break;
+
+ IWL_ERR(mvm, "mac start retry %d\n", retry);
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_mei_set_sw_rfkill_state(mvm);
+
+ return ret;
+}
+
+static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ guard(mvm)(mvm);
+
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+ ret = iwl_mvm_update_quotas(mvm, true, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
+ ret);
+
+ iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY);
+
+ /*
+ * If we have TDLS peers, remove them. We don't know the last seqno/PN
+ * of packets the FW sent out, so we must reconnect.
+ */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ IWL_INFO(mvm, "restart completed\n");
+ iwl_trans_finish_sw_reset(mvm->trans);
+
+ /* no need to lock, adding in parallel would schedule too */
+ if (!list_empty(&mvm->add_stream_txqs))
+ schedule_work(&mvm->add_stream_wk);
+}
+
+void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (reconfig_type) {
+ case IEEE80211_RECONFIG_TYPE_RESTART:
+ iwl_mvm_restart_complete(mvm);
+ break;
+ case IEEE80211_RECONFIG_TYPE_SUSPEND:
+ break;
+ }
+}
+
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_ftm_initiator_smooth_stop(mvm);
+
+ /* firmware counters are obviously reset now, but we shouldn't
+ * partially track so also clear the fw_reset_accu counters.
+ */
+ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
+
+ /* async_handlers_wk is now blocked */
+
+ if (!iwl_mvm_has_new_station_api(mvm->fw))
+ iwl_mvm_rm_aux_sta(mvm);
+
+ if (suspend &&
+ mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ iwl_mvm_fast_suspend(mvm);
+ /* From this point on, we won't touch the device */
+ iwl_mvm_mei_device_state(mvm, false);
+ } else {
+ iwl_mvm_stop_device(mvm);
+ }
+
+ iwl_mvm_async_handlers_purge(mvm);
+ /* async_handlers_list is empty and will stay empty: HW is stopped */
+
+ /*
+ * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
+ * hw (as restart_complete() won't be called in this case) and mac80211
+ * won't execute the restart.
+ * But make sure to cleanup interfaces that have gone down before/during
+ * HW restart was requested.
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_cleanup_iterator, mvm);
+
+ /* We shouldn't have any UIDs still set. Loop over all the UIDs to
+ * make sure there's nothing left there and warn if any is found.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ int i;
+
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
+ }
+ }
+}
+
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Stop internal MLO scan, if running */
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ mutex_unlock(&mvm->mutex);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
+ wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
+ flush_work(&mvm->async_handlers_wk);
+ flush_work(&mvm->add_stream_wk);
+
+ /*
+ * Lock and clear the firmware running bit here already, so that
+ * new commands coming in elsewhere, e.g. from debugfs, will not
+ * be able to proceed. This is important here because one of those
+ * debugfs files causes the firmware dump to be triggered, and if we
+ * don't stop debugfs accesses before canceling that it could be
+ * retriggered after we flush it but before we've cleared the bit.
+ */
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+ cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+ cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
+
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ flush_work(&mvm->roc_done_wk);
+
+ iwl_mvm_mei_set_sw_rfkill_state(mvm);
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_mac_stop(mvm, suspend);
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * The worker might have been waiting for the mutex, let it run and
+ * discover that its list is now empty.
+ */
+ cancel_work_sync(&mvm->async_handlers_wk);
+ wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
+}
+
+struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
+{
+ u16 i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < NUM_PHY_CTX; i++)
+ if (!mvm->phy_ctxts[i].ref)
+ return &mvm->phy_ctxts[i];
+
+ IWL_ERR(mvm, "No available PHY context\n");
+ return NULL;
+}
+
+int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
+ struct ieee80211_bss_conf *link_conf,
+ s16 tx_power)
+{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(link_conf->vif);
+ u32 mac_id = mvmvif->id;
+ int len;
+ struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
+ .common.link_id = cpu_to_le32(mac_id),
+ };
+ struct iwl_dev_tx_power_cmd cmd_v9_v10;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+ u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
+ IWL_DEV_MAX_TX_POWER : 8 * tx_power;
+ void *cmd_data = &cmd;
+
+ cmd.common.pwr_restriction = cpu_to_le16(u_tx_power);
+
+ if (cmd_ver > 8) {
+ u32 link_id;
+
+ if (WARN_ON(!mvmvif->link[link_conf->link_id]))
+ return -ENODEV;
+
+ link_id = mvmvif->link[link_conf->link_id]->fw_link_id;
+
+ /* Those fields sit on the same place for v9 and v10 */
+ cmd_v9_v10.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK);
+ cmd_v9_v10.common.link_id = cpu_to_le32(link_id);
+ cmd_v9_v10.common.pwr_restriction = cpu_to_le16(u_tx_power);
+ cmd_data = &cmd_v9_v10;
+ }
+
+ if (cmd_ver == 10)
+ len = sizeof(cmd_v9_v10.v10);
+ else if (cmd_ver == 9)
+ len = sizeof(cmd_v9_v10.v9);
+ else if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
+ else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ len = sizeof(cmd.v5);
+ else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v4);
+ else
+ len = sizeof(cmd.v3);
+
+ /* all structs have the same common part, add its length */
+ len += sizeof(cmd.common);
+
+ if (cmd_ver < 9)
+ len += sizeof(cmd.per_band);
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
+
+}
+
+static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]);
+ }
+}
+
+int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct iwl_mvm_sta *mvmsta;
+ unsigned int link_id = link_conf->link_id;
+ u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id;
+
+ mvmvif->csa_bcn_pending = false;
+ mvmvif->csa_blocks_tx = false;
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+
+ if (WARN_ON(!mvmsta)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ if (mvm->mld_api_is_used)
+ iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
+ else
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+ } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(vif->txq);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+
+ local_bh_disable();
+ iwl_mvm_mac_itxq_xmit(hw, vif->txq);
+ ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw);
+ local_bh_enable();
+
+ mvmvif->csa_blocks_tx = false;
+ }
+
+ mvmvif->ps_disabled = false;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+
+out_unlock:
+ if (mvmvif->csa_failed)
+ ret = -EIO;
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+
+ /*
+ * In the new flow since FW is in charge of the timing,
+ * if driver has canceled the channel switch he will receive the
+ * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
+ IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
+
+ mutex_lock(&mvm->mutex);
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
+ iwl_mvm_remove_csa_period(mvm, vif);
+ else
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd));
+ mvmvif->csa_failed = true;
+ mutex_unlock(&mvm->mutex);
+
+ /* If we're here, we can't support MLD */
+ iwl_mvm_post_channel_switch(hw, vif, &vif->bss_conf);
+}
+
+void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
+{
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+
+ mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
+ vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ /* Trigger disconnect (should clear the CSA state) */
+ ieee80211_chswitch_done(vif, false, 0);
+}
+
+static u8
+iwl_mvm_chandef_get_primary_80(struct cfg80211_chan_def *chandef)
+{
+ int data_start;
+ int control_start;
+ int bw;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_320)
+ bw = 320;
+ else if (chandef->width == NL80211_CHAN_WIDTH_160)
+ bw = 160;
+ else
+ return 0;
+
+ /* data is bw wide so the start is half the width */
+ data_start = chandef->center_freq1 - bw / 2;
+ /* control is 20Mhz width */
+ control_start = chandef->chan->center_freq - 10;
+
+ return (control_start - data_start) / 80;
+}
+
+static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to allocate bcast sta\n");
+ return ret;
+ }
+
+ /* Only queue for this station is the mcast queue,
+ * which shouldn't be in TFD mask anyway
+ */
+ return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.mcast_sta, 0,
+ vif->type,
+ IWL_STA_MULTICAST);
+}
+
+static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvm);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
+}
+
+static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
+ mlo_int_scan_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvmvif->mvm);
+ iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvm);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
+}
+
+static void iwl_mvm_unblock_esr_tmp_non_bss(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif,
+ unblock_esr_tmp_non_bss_wk.work);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
+ mutex_unlock(&mvm->mutex);
+}
+
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ mvmvif->deflink.average_beacon_energy = 0;
+
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
+
+ wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
+ iwl_mvm_prevent_esr_done_wk);
+
+ wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
+ iwl_mvm_mlo_int_scan_wk);
+
+ wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
+ iwl_mvm_unblock_esr_tpt);
+
+ wiphy_delayed_work_init(&mvmvif->unblock_esr_tmp_non_bss_wk,
+ iwl_mvm_unblock_esr_tmp_non_bss);
+}
+
+static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+ int i;
+
+ mutex_lock(&mvm->mutex);
+
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
+ mvmvif->mvm = mvm;
+
+ /* the first link always points to the default one */
+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->deflink.active = 0;
+ mvmvif->link[0] = &mvmvif->deflink;
+
+ vif->driver_flags = IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+
+ iwl_mvm_set_link_fw_id(mvm, vif, &vif->bss_conf);
+
+ /*
+ * Not much to do here. The stack will not allow interface
+ * types or combinations that we didn't advertise, so we
+ * don't really have to check the types.
+ */
+
+ /* make sure that beacon statistics don't go backwards with FW reset */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ mvmvif->link[i]->beacon_stats.accu_num_beacons +=
+ mvmvif->link[i]->beacon_stats.num_beacons;
+
+ /* Allocate resources for the MAC context, and add it to the fw */
+ ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+ if (ret)
+ goto out;
+
+ rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
+
+ /*
+ * The AP binding flow can be done only after the beacon
+ * template is configured (which happens only in the mac80211
+ * start_ap() flow), and adding the broadcast station can happen
+ * only after the binding.
+ * In addition, since modifying the MAC before adding a bcast
+ * station is not allowed by the FW, delay the adding of MAC context to
+ * the point where we can also add the bcast station.
+ * In short: there's not much we can do at this point, other than
+ * allocating resources :)
+ */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ ret = 0;
+ goto out;
+ }
+
+ mvmvif->features |= hw->netdev_features;
+
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ goto out_remove_mac;
+
+ /* beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ if (ret)
+ goto out_remove_mac;
+
+ if (!mvm->bf_allowed_vif &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+ mvm->bf_allowed_vif = mvmvif;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_device_vif = vif;
+
+ iwl_mvm_tcm_add_vif(mvm, vif);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvm->monitor_on = true;
+ mvm->monitor_p80 =
+ iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
+ }
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !mvm->csme_vif && mvm->mei_registered) {
+ iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr);
+ iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev);
+ mvm->csme_vif = vif;
+ }
+
+out:
+ if (!ret && (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC))
+ ret = iwl_mvm_alloc_bcast_mcast_sta(mvm, vif);
+
+ goto out_unlock;
+
+ out_remove_mac:
+ mvmvif->deflink.phy_ctxt = NULL;
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ /*
+ * Flush the ROC worker which will flush the OFFCHANNEL queue.
+ * We assume here that all the packets sent to the OFFCHANNEL
+ * queue are sent in ROC session.
+ */
+ flush_work(&mvm->roc_done_wk);
+ }
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tmp_non_bss_wk);
+
+ cancel_delayed_work_sync(&mvmvif->csa_work);
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_probe_resp_data *probe_data;
+
+ iwl_mvm_prepare_mac_removal(mvm, vif);
+
+ if (!(vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC))
+ iwl_mvm_tcm_rm_vif(mvm, vif);
+
+ mutex_lock(&mvm->mutex);
+
+ if (vif == mvm->csme_vif) {
+ iwl_mei_set_netdev(NULL);
+ mvm->csme_vif = NULL;
+ }
+
+ probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
+ lockdep_is_held(&mvm->mutex));
+ RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+
+ if (vif->bss_conf.ftm_responder)
+ memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));
+
+ iwl_mvm_vif_dbgfs_rm_link(mvm, vif);
+
+ /*
+ * For AP/GO interface, the tear down of the resources allocated to the
+ * interface is be handled as part of the stop_ap flow.
+ */
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ goto out;
+
+ iwl_mvm_power_update_mac(mvm);
+
+ /* Before the interface removal, mac80211 would cancel the ROC, and the
+ * ROC worker would be scheduled if needed. The worker would be flushed
+ * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
+ * binding etc. so nothing needs to be done here.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (mvmvif->deflink.phy_ctxt) {
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+ mvmvif->deflink.phy_ctxt = NULL;
+ }
+ mvm->p2p_device_vif = NULL;
+ }
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+ RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = false;
+
+out:
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta);
+ iwl_mvm_dealloc_bcast_sta(mvm, vif);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+struct iwl_mvm_mc_iter_data {
+ struct iwl_mvm *mvm;
+ int port_id;
+};
+
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_mc_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int ret, len;
+
+ /* if we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->cfg.assoc)
+ return;
+
+ cmd->port_id = data->port_id++;
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret)
+ IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_mc_iter_data iter_data = {
+ .mvm = mvm,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mc_iface_iterator, &iter_data);
+
+ /*
+ * Send a (synchronous) ech command so that we wait for the
+ * multiple asynchronous MCAST_FILTER_CMD commands sent by
+ * the interface iterator. Otherwise, we might get here over
+ * and over again (by userspace just sending a lot of these)
+ * and the CPU can send them faster than the firmware can
+ * process them.
+ * Note that the CPU is still faster - but with this we'll
+ * actually send fewer commands overall because the CPU will
+ * not schedule the work in mac80211 as frequently if it's
+ * still running when rescheduled (possibly multiple times).
+ */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "Failed to synchronize multicast groups update\n");
+}
+
+u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count;
+ bool pass_all;
+ int len;
+
+ addr_count = netdev_hw_addr_list_count(mc_list);
+ pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
+ IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
+ if (pass_all)
+ addr_count = 0;
+
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ return 0;
+
+ if (pass_all) {
+ cmd->pass_all = 1;
+#if defined(__linux__)
+ return (u64)(unsigned long)cmd;
+#elif defined(__FreeBSD__)
+ return (u64)(uintptr_t)cmd;
+#endif
+ }
+
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+#if defined(__linux__)
+ IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+#elif defined(__FreeBSD__)
+ IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %6D\n",
+ cmd->count, addr->addr, ":");
+#endif
+ memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr, ETH_ALEN);
+ cmd->count++;
+ }
+
+#if defined(__linux__)
+ return (u64)(unsigned long)cmd;
+#elif defined(__FreeBSD__)
+ return (u64)(uintptr_t)cmd;
+#endif
+}
+
+void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+#if defined(__linux__)
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
+#elif defined(__FreeBSD__)
+ struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast;
+#endif
+
+ guard(mvm)(mvm);
+
+ /* replace previous configuration */
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = cmd;
+
+ if (!cmd)
+ goto out;
+
+ if (changed_flags & FIF_ALLMULTI)
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+ if (cmd->pass_all)
+ cmd->count = 0;
+
+ iwl_mvm_recalc_multicast(mvm);
+out:
+ *total_flags = 0;
+}
+
+static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int filter_flags,
+ unsigned int changed_flags)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* We support only filter for probe requests */
+ if (!(changed_flags & FIF_PROBE_REQ))
+ return;
+
+ /* Supported only for p2p client interfaces */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc ||
+ !vif->p2p)
+ return;
+
+ guard(mvm)(mvm);
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+}
+
+int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mu_group_mgmt_cmd cmd = {};
+
+ memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
+ WLAN_MEMBERSHIP_LEN);
+ memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
+ WLAN_USER_POSITION_LEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ UPDATE_MU_GROUPS_CMD),
+ 0, sizeof(cmd), &cmd);
+}
+
+static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.mu_mimo_owner) {
+ struct iwl_mu_group_mgmt_notif *notif = _data;
+
+ /*
+ * MU-MIMO Group Id action frame is little endian. We treat
+ * the data received from firmware as if it came from the
+ * action frame, so no conversion is needed.
+ */
+ ieee80211_update_mu_groups(vif, 0,
+ (u8 *)&notif->membership_status,
+ (u8 *)&notif->user_position);
+ }
+}
+
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mu_mimo_iface_iterator, notif);
+}
+
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /*
+ * If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
+ struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
+ bool inheritance)
+{
+ int i;
+
+ /*
+ * FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ ru_index_tmp >>= 1;
+
+ /*
+ * According to the 11be spec, if for a specific BW the PPE Thresholds
+ * isn't present - it should inherit the thresholds from the last
+ * BW for which we had PPE Thresholds. In 11ax though, we don't have
+ * this inheritance - continue in this case
+ */
+ if (!(ru_index_tmp & 1)) {
+ if (inheritance)
+ goto set_thresholds;
+ else
+ continue;
+ }
+
+ high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+set_thresholds:
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+}
+
+static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext,
+ bool inheritance)
+{
+ u8 nss = (link_sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &link_sta->he_cap.ppe_thres[0];
+ u8 ru_index_bitmap =
+ u8_get_bits(*ppe,
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
+ inheritance);
+}
+
+static int
+iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ int low_th = -1;
+ int high_th = -1;
+ int i;
+
+ /* all the macros are the same for EHT and HE */
+ switch (nominal_padding) {
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ if (low_th < 0 || high_th < 0)
+ return -EINVAL;
+
+ /* Set the PPE thresholds accordingly */
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ int i;
+
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ u8 bw;
+
+ for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ if (nominal_padding >
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
+ else if (nominal_padding ==
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[0] == IWL_HE_PKT_EXT_NONE &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
+ }
+ }
+}
+
+/* Set the pkt_ext field according to PPE Thresholds element */
+int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+ u8 nominal_padding;
+ int i, ret = 0;
+
+ if (WARN_ON(!link_sta))
+ return -EINVAL;
+
+ /* Initialize the PPE thresholds to "None" (7), as described in Table
+ * 9-262ac of 80211.ax/D3.0.
+ */
+ memset(pkt_ext, IWL_HE_PKT_EXT_NONE,
+ sizeof(struct iwl_he_pkt_ext_v2));
+
+ if (link_sta->eht_cap.has_eht) {
+ nominal_padding =
+ u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ /* If PPE Thresholds exists, parse them into a FW-familiar
+ * format.
+ */
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] &
+ IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0];
+ u8 ru_index_bitmap =
+ u16_get_bits(*ppe,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap,
+ ppe, ppe_pos_bit, true);
+ /* EHT PPE Thresholds doesn't exist - set the API according to
+ * HE PPE Tresholds
+ */
+ } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ /* Even though HE Capabilities IE doesn't contain PPE
+ * Thresholds for BW 320Mhz, thresholds for this BW will
+ * be filled in with the same values as 160Mhz, due to
+ * the inheritance, as required.
+ */
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, link_sta, pkt_ext,
+ true);
+
+ /* According to the requirements, for MCSs 12-13 the
+ * maximum value between HE PPE Threshold and Common
+ * Nominal Packet Padding needs to be taken
+ */
+ iwl_mvm_get_optimal_ppe_info(pkt_ext, nominal_padding);
+
+ /* if PPE Thresholds doesn't present in both EHT IE and HE IE -
+ * take the Thresholds from Common Nominal Packet Padding field
+ */
+ } else {
+ ret = iwl_mvm_set_pkt_ext_from_nominal_padding(pkt_ext,
+ nominal_padding);
+ }
+ } else if (link_sta->he_cap.has_he) {
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mvm_set_pkt_ext_from_he_ppe(mvm, link_sta, pkt_ext,
+ false);
+ /* PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding field.
+ */
+ } else {
+ nominal_padding =
+ u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ ret = iwl_mvm_set_pkt_ext_from_nominal_padding(pkt_ext,
+ nominal_padding);
+ }
+ }
+
+ for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ int bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th =
+ &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ IWL_DEBUG_HT(mvm,
+ "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
+ i, bw, qam_th[0], qam_th[1]);
+ }
+ }
+ return ret;
+}
+
+/*
+ * This function sets the MU EDCA parameters ans returns whether MU EDCA
+ * is enabled or not
+ */
+bool iwl_mvm_set_fw_mu_edca_params(struct iwl_mvm *mvm,
+ const struct iwl_mvm_vif_link_info *link_info,
+ struct iwl_he_backoff_conf *trig_based_txf)
+{
+ int i;
+ /* Mark MU EDCA as enabled, unless none detected on some AC */
+ bool mu_edca_enabled = true;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &link_info->queue_params[i].mu_edca_param_rec;
+ u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i);
+
+ if (!link_info->queue_params[i].mu_edca) {
+ mu_edca_enabled = false;
+ break;
+ }
+
+ trig_based_txf[ac].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ trig_based_txf[ac].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ trig_based_txf[ac].aifsn =
+ cpu_to_le16(mu_edca->aifsn & 0xf);
+ trig_based_txf[ac].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+
+ return mu_edca_enabled;
+}
+
+bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *own_he_cap = NULL;
+
+ /* This capability is the same for all bands,
+ * so take it from one of them.
+ */
+ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ return (own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN));
+}
+
+__le32 iwl_mvm_get_sta_htc_flags(struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta)
+{
+ u8 *mac_cap_info =
+ &link_sta->he_cap.he_cap_elem.mac_cap_info[0];
+ __le32 htc_flags = 0;
+
+ if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+ if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ htc_flags |=
+ cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+ else if (link_adap == 3)
+ htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+ }
+ if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+ if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+ if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+ return htc_flags;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 sta_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = {
+ .sta_id = sta_id,
+ .tid_limit = IWL_MAX_TID_COUNT,
+ .bss_color = vif->bss_conf.he_bss_color.color,
+ .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+ .frame_time_rts_th =
+ cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+ };
+ struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {};
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD);
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2);
+ int size;
+ struct ieee80211_sta *sta;
+ u32 flags;
+ int i;
+ void *cmd;
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE))
+ ver = 1;
+
+ switch (ver) {
+ case 1:
+ /* same layout as v2 except some data at the end */
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v1);
+ break;
+ case 2:
+ cmd = &sta_ctxt_cmd_v2;
+ size = sizeof(struct iwl_he_sta_context_cmd_v2);
+ break;
+ case 3:
+ cmd = &sta_ctxt_cmd;
+ size = sizeof(struct iwl_he_sta_context_cmd_v3);
+ break;
+ default:
+ IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver);
+ return;
+ }
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+ if (IS_ERR_OR_NULL(sta)) {
+ rcu_read_unlock();
+ WARN(1, "Can't find STA to configure HE\n");
+ return;
+ }
+
+ if (!sta->deflink.he_cap.has_he) {
+ rcu_read_unlock();
+ return;
+ }
+
+ flags = 0;
+
+ /* Block 26-tone RU OFDMA transmissions */
+ if (mvmvif->deflink.he_ru_2mhz_block)
+ flags |= STA_CTXT_HE_RU_2MHZ_BLOCK;
+
+ /* HTC flags */
+ sta_ctxt_cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, &sta->deflink);
+
+ /* PPE Thresholds */
+ if (!iwl_mvm_set_sta_pkt_ext(mvm, &sta->deflink, &sta_ctxt_cmd.pkt_ext))
+ flags |= STA_CTXT_HE_PACKET_EXT;
+
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP)
+ flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN)
+ flags |= STA_CTXT_HE_ACK_ENABLED;
+
+ rcu_read_unlock();
+
+ if (iwl_mvm_set_fw_mu_edca_params(mvm, &mvmvif->deflink,
+ &sta_ctxt_cmd.trig_based_txf[0]))
+ flags |= STA_CTXT_HE_MU_EDCA_CW;
+
+ if (vif->bss_conf.uora_exists) {
+ flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+ sta_ctxt_cmd.rand_alloc_ecwmin =
+ vif->bss_conf.uora_ocw_range & 0x7;
+ sta_ctxt_cmd.rand_alloc_ecwmax =
+ (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+ }
+
+ if (!iwl_mvm_is_nic_ack_enabled(mvm, vif))
+ flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED;
+
+ if (vif->bss_conf.nontransmitted) {
+ flags |= STA_CTXT_HE_REF_BSSID_VALID;
+ ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr,
+ vif->bss_conf.transmitter_bssid);
+ sta_ctxt_cmd.max_bssid_indicator =
+ vif->bss_conf.bssid_indicator;
+ sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index;
+ sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap;
+ sta_ctxt_cmd.profile_periodicity =
+ vif->bss_conf.profile_periodicity;
+ }
+
+ sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+ if (ver < 3) {
+ /* fields before pkt_ext */
+ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) !=
+ offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd,
+ offsetof(typeof(sta_ctxt_cmd), pkt_ext));
+
+ /* pkt_ext */
+ for (i = 0;
+ i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th);
+ i++) {
+ u8 bw;
+
+ for (bw = 0;
+ bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]);
+ bw++) {
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) !=
+ sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw]));
+
+ memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw],
+ &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw],
+ sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]));
+ }
+ }
+
+ /* fields after pkt_ext */
+ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext) !=
+ sizeof(sta_ctxt_cmd_v2) -
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext));
+ memcpy((u8 *)&sta_ctxt_cmd_v2 +
+ offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext),
+ (u8 *)&sta_ctxt_cmd +
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext),
+ sizeof(sta_ctxt_cmd) -
+ offsetofend(typeof(sta_ctxt_cmd), pkt_ext));
+ sta_ctxt_cmd_v2.reserved3 = 0;
+ }
+
+ if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd))
+ IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
+void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u32 duration_override, unsigned int link_id)
+{
+ u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
+ u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
+
+ if (duration_override > duration)
+ duration = duration_override;
+
+ /* Try really hard to protect the session and hear a beacon
+ * The new session protection command allows us to protect the
+ * session for a much longer time since the firmware will internally
+ * create two events: a 300TU one with a very high priority that
+ * won't be fragmented which should be enough for 99% of the cases,
+ * and another one (which we configure here to be 900TU long) which
+ * will have a slightly lower priority, but more importantly, can be
+ * fragmented so that it'll allow other activities to run.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, 900,
+ min_duration, false,
+ link_id);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ min_duration, 500, false);
+}
+
+/* Handle association common part to MLD and non-MLD modes */
+void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+ int link_id;
+
+ /* The firmware tracks the MU-MIMO group on its own.
+ * However, on HW restart we should restore this data.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) {
+ ret = iwl_mvm_update_mu_groups(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update VHT MU_MIMO groups\n");
+ }
+
+ iwl_mvm_recalc_multicast(mvm);
+
+ /* reset rssi values */
+ for_each_mvm_vif_valid_link(mvmvif, link_id)
+ mvmvif->link[link_id]->bf_data.ave_beacon_signal = 0;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT,
+ IEEE80211_SMPS_AUTOMATIC);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+}
+
+/* Execute the common part for MLD and non-MLD modes */
+void
+iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (changes & BSS_CHANGED_BEACON_INFO) {
+ /* We received a beacon from the associated AP so
+ * remove the session protection.
+ */
+ iwl_mvm_stop_session_protection(mvm, vif);
+
+ iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+ }
+
+ if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
+ /* Send power command on every beacon change,
+ * because we may have not enabled beacon abort yet.
+ */
+ BSS_CHANGED_BEACON_INFO)) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+
+ if (changes & BSS_CHANGED_CQM) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ IWL_DEBUG_MAC80211(mvm, "CQM info_changed\n");
+ if (link_info)
+ link_info->bf_data.last_cqm_event = 0;
+
+ if (mvmvif->bf_enabled) {
+ /* FIXME: need to update per link when FW API will
+ * support it
+ */
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update CQM thresholds\n");
+ }
+ }
+
+ if (changes & BSS_CHANGED_BANDWIDTH)
+ iwl_mvm_update_link_smps(vif, link_conf);
+
+ if (changes & BSS_CHANGED_TPE) {
+ IWL_DEBUG_CALIB(mvm, "Changing TPE\n");
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+ }
+}
+
+static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+ int i;
+
+ /*
+ * Re-calculate the tsf id, as the leader-follower relations depend
+ * on the beacon interval, which was not known when the station
+ * interface was added.
+ */
+ if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
+ if ((vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
+
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ }
+
+ /* Update MU EDCA params */
+ if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
+ vif->cfg.assoc &&
+ (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax))
+ iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
+
+ /*
+ * If we're not associated yet, take the (new) BSSID before associating
+ * so the firmware knows. If we're already associated, then use the old
+ * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
+ * branch for disassociation below.
+ */
+ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
+ memcpy(mvmvif->deflink.bssid, bss_conf->bssid, ETH_ALEN);
+
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->deflink.bssid);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ /* after sending it once, adopt mac80211 data */
+ memcpy(mvmvif->deflink.bssid, bss_conf->bssid, ETH_ALEN);
+ mvmvif->associated = vif->cfg.assoc;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ /* add quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, true, NULL);
+ if (ret) {
+ IWL_ERR(mvm, "failed to update quotas\n");
+ return;
+ }
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ /*
+ * If we're restarting then the firmware will
+ * obviously have lost synchronisation with
+ * the AP. It will attempt to synchronise by
+ * itself, but we can make it more reliable by
+ * scheduling a session protection time event.
+ *
+ * The firmware needs to receive a beacon to
+ * catch up with synchronisation, use 110% of
+ * the beacon interval.
+ *
+ * Set a large maximum delay to allow for more
+ * than a single interface.
+ *
+ * For new firmware versions, rely on the
+ * firmware. This is relevant for DCM scenarios
+ * only anyway.
+ */
+ u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+ iwl_mvm_protect_session(mvm, vif, dur, dur,
+ 5 * dur, false);
+ } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status) &&
+ !vif->bss_conf.dtim_period) {
+ /*
+ * If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0, 0);
+ }
+
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, 0);
+ }
+ } else if (mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
+ iwl_mvm_mei_host_disassociated(mvm);
+ /*
+ * If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
+ */
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* remove quota for this interface */
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to update quotas\n");
+
+ /* this will take the cleared BSSID from bss_conf */
+ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update MAC %pM (clear after unassoc)\n",
+ vif->addr);
+ }
+
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ }
+
+ iwl_mvm_bss_info_changed_station_common(mvm, vif, &vif->bss_conf,
+ changes);
+}
+
+bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int *ret)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmvif->ap_assoc_sta_count = 0;
+
+ /* must be set before quota calculations */
+ mvmvif->ap_ibss_active = true;
+
+ /* send all the early keys to the device now */
+ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
+ struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i];
+
+ if (!key)
+ continue;
+
+ mvmvif->ap_early_keys[i] = NULL;
+
+ *ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
+ if (*ret)
+ return true;
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
+ iwl_mvm_vif_set_low_latency(mvmvif, true,
+ LOW_LATENCY_VIF_TYPE);
+ iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id);
+ }
+
+ /* power updated needs to be done before quotas */
+ iwl_mvm_power_update_mac(mvm);
+
+ return false;
+}
+
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /*
+ * Re-calculate the tsf id, as the leader-follower relations depend on
+ * the beacon interval, which was not known when the AP interface
+ * was added.
+ */
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
+ /* For older devices need to send beacon template before adding mac
+ * context. For the newer, the beacon is a resource that belongs to a
+ * MAC, so need to send beacon template after adding the mac.
+ */
+ if (mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_22000) {
+ /* Add the mac context */
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_unlock;
+
+ /* Send the beacon template */
+ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
+ if (ret)
+ goto out_unlock;
+ } else {
+ /* Send the beacon template */
+ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
+ if (ret)
+ goto out_unlock;
+
+ /* Add the mac context */
+ ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+ if (ret)
+ goto out_unlock;
+ }
+
+ /* Perform the binding */
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out_remove;
+
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs adding the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW so make the order of removal depend on
+ * the TLV
+ */
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+ /*
+ * Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+ if (ret) {
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ goto out_unbind;
+ }
+ } else {
+ /*
+ * Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+ if (ret)
+ goto out_unbind;
+ ret = iwl_mvm_add_mcast_sta(mvm, vif);
+ if (ret) {
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ goto out_unbind;
+ }
+ }
+
+ if (iwl_mvm_start_ap_ibss_common(hw, vif, &ret))
+ goto out_failed;
+
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (ret)
+ goto out_failed;
+
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ /* we don't support TDLS during DCM */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ iwl_mvm_ftm_restart_responder(mvm, vif, &vif->bss_conf);
+
+ goto out_unlock;
+
+out_failed:
+ iwl_mvm_power_update_mac(mvm);
+ mvmvif->ap_ibss_active = false;
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+out_unbind:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+out_remove:
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static int iwl_mvm_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return iwl_mvm_start_ap_ibss(hw, vif, link_conf);
+}
+
+static int iwl_mvm_start_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+/* Common part for MLD and non-MLD ops */
+void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_prepare_mac_removal(mvm, vif);
+
+ /* Handle AP stop while in CSA */
+ if (rcu_access_pointer(mvm->csa_vif) == vif) {
+ iwl_mvm_remove_time_event(mvm, mvmvif,
+ &mvmvif->time_event_data);
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+ mvmvif->csa_countdown = false;
+ }
+
+ if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ mvm->csa_tx_block_bcn_timeout = 0;
+ }
+
+ mvmvif->ap_ibss_active = false;
+ mvm->ap_last_beacon_gp2 = 0;
+
+ if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
+ iwl_mvm_vif_set_low_latency(mvmvif, false,
+ LOW_LATENCY_VIF_TYPE);
+ iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id);
+ }
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+}
+
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ iwl_mvm_stop_ap_ibss_common(mvm, vif);
+
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
+
+ iwl_mvm_update_quotas(mvm, false, NULL);
+
+ iwl_mvm_ftm_responder_clear(mvm, vif);
+
+ /*
+ * This is not very nice, but the simplest:
+ * For older FWs removing the mcast sta before the bcast station may
+ * cause assert 0x2b00.
+ * This is fixed in later FW (which will stop beaconing when removing
+ * bcast station).
+ * So make the order of removal depend on the TLV
+ */
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ iwl_mvm_send_rm_bcast_sta(mvm, vif);
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_rm_mcast_sta(mvm, vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+
+ iwl_mvm_power_update_mac(mvm);
+
+ iwl_mvm_mac_ctxt_remove(mvm, vif);
+}
+
+static void iwl_mvm_stop_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ iwl_mvm_stop_ap_ibss(hw, vif, link_conf);
+}
+
+static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* Changes will be applied when the AP/IBSS is started */
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ /* Need to send a new beacon template to the FW */
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, &vif->bss_conf))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
+
+ if (changes & BSS_CHANGED_FTM_RESPONDER) {
+ int ret = iwl_mvm_ftm_start_responder(mvm, vif, &vif->bss_conf);
+
+ if (ret)
+ IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n",
+ ret);
+ }
+
+}
+
+static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u64 changes)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mvm_update_mu_groups(mvm, vif);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n",
+ bss_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, bss_conf, bss_conf->txpower);
+ }
+}
+
+int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (hw_req->req.n_channels == 0 ||
+ hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
+ return -EINVAL;
+
+ guard(mvm)(mvm);
+ return iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
+}
+
+void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a hw_scan when it's already stopped. This can
+ * happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_scan_completed() and the userspace called
+ * cancel scan before ieee80211_scan_work() could run.
+ * To handle that, simply return if the scan is not running.
+ */
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+}
+
+void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from mac80211 */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, false);
+}
+
+void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, true);
+}
+
+static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned long txqs = 0, tids = 0;
+ int tid;
+
+ /*
+ * If we have TVQM then we get too high queue numbers - luckily
+ * we really shouldn't get here with that because such hardware
+ * should have firmware supporting buffer station offload.
+ */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ __set_bit(tid_data->txq_id, &txqs);
+
+ if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
+ continue;
+
+ __set_bit(tid, &tids);
+ }
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
+ ieee80211_sta_set_buffered(sta, tid, true);
+
+ if (txqs)
+ iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
+ /*
+ * The fw updates the STA to be asleep. Tx packets on the Tx
+ * queues to this station will not be transmitted. The fw will
+ * send a Tx response with TX_STATUS_FAIL_DEST_PS.
+ */
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (WARN_ON(mvmsta->deflink.sta_id == IWL_INVALID_STA))
+ break;
+
+ if (txqs)
+ iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
+ iwl_mvm_sta_modify_ps_wake(mvm, sta);
+ break;
+ default:
+ break;
+ }
+ spin_unlock_bh(&mvmsta->lock);
+}
+
+void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+ __iwl_mvm_mac_sta_notify(hw, cmd, sta);
+}
+
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
+
+ if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations))
+ return;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+ if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!mvmsta->vif ||
+ mvmsta->vif->type != NL80211_IFTYPE_AP) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (mvmsta->sleeping != sleeping) {
+ mvmsta->sleeping = sleeping;
+ __iwl_mvm_mac_sta_notify(mvm->hw,
+ sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
+ sta);
+ ieee80211_sta_ps_transition(sta, sleeping);
+ }
+
+ if (sleeping) {
+ switch (notif->type) {
+ case IWL_MVM_PM_EVENT_AWAKE:
+ case IWL_MVM_PM_EVENT_ASLEEP:
+ break;
+ case IWL_MVM_PM_EVENT_UAPSD:
+ ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
+ break;
+ case IWL_MVM_PM_EVENT_PS_POLL:
+ ieee80211_sta_pspoll(sta);
+ break;
+ default:
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int link_id;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+
+ /*
+ * This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ * Since there's mvm->mutex here, no need to have RCU lock for
+ * mvm_sta->link access.
+ */
+ guard(mvm)(mvm);
+ for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) {
+ struct iwl_mvm_link_sta *link_sta;
+ u32 sta_id;
+
+ if (!mvm_sta->link[link_id])
+ continue;
+
+ link_sta = rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ sta_id = link_sta->sta_id;
+ if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) {
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id],
+ ERR_PTR(-ENOENT));
+ RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
+ }
+ }
+}
+
+static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const u8 *bssid)
+{
+ int i;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_tcm_mac *mdata;
+
+ mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ mdata->opened_rx_ba_sessions = false;
+ }
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
+ return;
+
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+
+ if (!vif->p2p &&
+ (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+
+ for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
+ if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
+static void
+iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 *peer_addr,
+ enum nl80211_tdls_operation action)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_tdls *tdls_trig;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_TDLS);
+ if (!trig)
+ return;
+
+ tdls_trig = (void *)trig->data;
+
+ if (!(tdls_trig->action_bitmap & BIT(action)))
+ return;
+
+ if (tdls_trig->peer_mode &&
+ memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "TDLS event occurred, peer %pM, action %d",
+ peer_addr, action);
+}
+
+struct iwl_mvm_he_obss_narrow_bw_ru_data {
+ bool tolerated;
+};
+
+static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *_data)
+{
+ struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data;
+ const struct cfg80211_bss_ies *ies;
+ const struct element *elem;
+
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+ ies->len);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] &
+ WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
+ data->tolerated = false;
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = {
+ .tolerated = true,
+ };
+
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan ||
+ !mvmvif->link[link_id]))
+ return;
+
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
+ mvmvif->link[link_id]->he_ru_2mhz_block = false;
+ return;
+ }
+
+ cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper,
+ iwl_mvm_check_he_obss_narrow_bw_ru_iter,
+ &iter_data);
+
+ /*
+ * If there is at least one AP on radar channel that cannot
+ * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+ */
+ mvmvif->link[link_id]->he_ru_2mhz_block = !iter_data.tolerated;
+}
+
+static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!mvm->cca_40mhz_workaround)
+ return;
+
+ /* decrement and check that we reached zero */
+ mvm->cca_40mhz_workaround--;
+ if (mvm->cca_40mhz_workaround)
+ return;
+
+ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+ he->he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+}
+
+static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_sta *mvm_sta)
+{
+#if IS_ENABLED(CONFIG_IWLMEI)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mei_conn_info conn_info = {
+ .ssid_len = vif->cfg.ssid_len,
+ };
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ if (!mvm->mei_registered)
+ return;
+
+ /* FIXME: MEI needs to be updated for MLO */
+ if (!vif->bss_conf.chanreq.oper.chan)
+ return;
+
+ conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value;
+
+ switch (mvm_sta->pairwise_cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256;
+ break;
+ case 0:
+ /* open profile */
+ break;
+ default:
+ /* cipher not supported, don't send anything to iwlmei */
+ return;
+ }
+
+ switch (mvmvif->rekey_data.akm) {
+ case WLAN_AKM_SUITE_SAE & 0xff:
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE;
+ break;
+ case WLAN_AKM_SUITE_PSK & 0xff:
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK;
+ break;
+ case WLAN_AKM_SUITE_8021X & 0xff:
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA;
+ break;
+ case 0:
+ /* open profile */
+ conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN;
+ break;
+ default:
+ /* auth method / AKM not supported */
+ /* TODO: All the FT vesions of these? */
+ return;
+ }
+
+ memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+ memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ /* TODO: add support for collocated AP data */
+ iwl_mei_host_associated(&conn_info, NULL);
+#endif
+}
+
+static int iwl_mvm_mac_ctxt_changed_wrapper(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool force_assoc_off)
+{
+ return iwl_mvm_mac_ctxt_changed(mvm, vif, force_assoc_off, NULL);
+}
+
+static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ static const struct iwl_mvm_sta_state_ops callbacks = {
+ .add_sta = iwl_mvm_add_sta,
+ .update_sta = iwl_mvm_update_sta,
+ .rm_sta = iwl_mvm_rm_sta,
+ .mac_ctxt_changed = iwl_mvm_mac_ctxt_changed_wrapper,
+ };
+
+ return iwl_mvm_mac_sta_state_common(hw, vif, sta, old_state, new_state,
+ &callbacks);
+}
+
+/* FIXME: temporary making two assumptions in all sta handling functions:
+ * (1) when setting sta state, the link exists and protected
+ * (2) if a link is valid in sta then it's valid in vif (can
+ * use same index in the link array)
+ */
+static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int link_id;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct ieee80211_bss_conf *conf =
+ link_conf_dereference_check(vif, link_id);
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(sta, link_id);
+
+ if (!conf || !link_sta || !mvmvif->link[link_id]->phy_ctxt)
+ continue;
+
+ iwl_mvm_rs_rate_init(mvm, vif, sta, conf, link_sta,
+ mvmvif->link[link_id]->phy_ctxt->channel->band);
+ }
+}
+
+static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ /* Beacon interval check - firmware will crash if the beacon
+ * interval is less than 16. We can't avoid connecting at all,
+ * so refuse the station state change, this will cause mac80211
+ * to abandon attempts to connect to this AP, and eventually
+ * wpa_s will blocklist the AP...
+ */
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (!link_conf)
+ continue;
+
+ if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) {
+ IWL_ERR(mvm,
+ "Beacon interval %d for AP %pM is too small\n",
+ link_conf->beacon_int, link_sta->addr);
+ return false;
+ }
+
+ link_conf->he_support = link_sta->he_cap.has_he;
+ }
+
+ return true;
+}
+
+static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool is_sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (!link_conf || !mvmvif->link[link_id])
+ continue;
+
+ link_conf->he_support = link_sta->he_cap.has_he;
+
+ if (is_sta) {
+ mvmvif->link[link_id]->he_ru_2mhz_block = false;
+ if (link_sta->he_cap.has_he)
+ iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif,
+ link_id,
+ link_conf);
+ }
+ }
+}
+
+static int
+iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const struct iwl_mvm_sta_state_ops *callbacks)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int i;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !iwl_mvm_vif_conf_from_sta(mvm, vif, sta))
+ return -EINVAL;
+
+ if (sta->tdls &&
+ (vif->p2p ||
+ iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_TDLS_STA_COUNT ||
+ iwl_mvm_phy_ctx_count(mvm) > 1)) {
+ IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
+ return -EBUSY;
+ }
+
+ ret = callbacks->add_sta(mvm, vif, sta);
+ if (sta->tdls && ret == 0) {
+ iwl_mvm_recalc_tdls_state(mvm, vif, true);
+ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+ NL80211_TDLS_SETUP);
+ }
+
+ if (ret)
+ return ret;
+
+ for_each_sta_active_link(vif, sta, link_sta, i)
+ link_sta->agg.max_rc_amsdu_len = 1;
+
+ ieee80211_sta_recalc_aggregates(sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mvmvif->ap_sta = sta;
+
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
+ return 0;
+}
+
+static int
+iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
+ struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const struct iwl_mvm_sta_state_ops *callbacks)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_vif_set_he_support(hw, vif, sta, false);
+ mvmvif->ap_assoc_sta_count++;
+ callbacks->mac_ctxt_changed(mvm, vif, false);
+
+ /* since the below is not for MLD API, it's ok to use
+ * the default bss_conf
+ */
+ if (!mvm->mld_api_is_used &&
+ (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
+ iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ iwl_mvm_vif_set_he_support(hw, vif, sta, true);
+
+ callbacks->mac_ctxt_changed(mvm, vif, false);
+
+ if (!mvm->mld_api_is_used)
+ goto out;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON(!link_conf))
+ return -EINVAL;
+ if (!mvmvif->link[link_id])
+ continue;
+
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ALL &
+ ~LINK_CONTEXT_MODIFY_ACTIVE,
+ true);
+ }
+ }
+
+out:
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
+ return callbacks->update_sta(mvm, vif, sta);
+}
+
+static int
+iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const struct iwl_mvm_sta_state_ops *callbacks)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* we don't support TDLS during DCM */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ if (sta->tdls) {
+ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+ NL80211_TDLS_ENABLE_LINK);
+ } else {
+ /* enable beacon filtering */
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+
+ mvmvif->authorized = 1;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ mvmvif->link_selection_res = vif->active_links;
+ mvmvif->link_selection_primary =
+ vif->active_links ? __ffs(vif->active_links) : 0;
+ }
+
+ callbacks->mac_ctxt_changed(mvm, vif, false);
+ iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
+
+ memset(&mvmvif->last_esr_exit, 0,
+ sizeof(mvmvif->last_esr_exit));
+
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
+
+ /* Block until FW notif will arrive */
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
+
+ /* when client is authorized (AP station marked as such),
+ * try to enable the best link(s).
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_select_links(mvm, vif);
+ }
+
+ mvm_sta->authorized = true;
+
+ /* MFP is set by default before the station is authorized.
+ * Clear it here in case it's not used.
+ */
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
+ return 0;
+}
+
+static int
+iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const struct iwl_mvm_sta_state_ops *callbacks)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmsta->authorized = false;
+
+ /* once we move into assoc state, need to update rate scale to
+ * disable using wide bandwidth
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
+ if (!sta->tdls) {
+ /* Set this but don't call iwl_mvm_mac_ctxt_changed()
+ * yet to avoid sending high prio again for a little
+ * time.
+ */
+ mvmvif->authorized = 0;
+ mvmvif->link_selection_res = 0;
+
+ /* disable beacon filtering */
+ iwl_mvm_disable_beacon_filter(mvm, vif);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tmp_non_bss_wk);
+ }
+
+ return 0;
+}
+
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!iwl_mvm_has_rlc_offload(mvm) ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2)
+ return;
+
+ mvmvif->ps_disabled = !vif->cfg.ps;
+
+ if (update)
+ iwl_mvm_power_update_mac(mvm);
+}
+
+/* Common part for MLD and non-MLD modes */
+int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state,
+ const struct iwl_mvm_sta_state_ops *callbacks)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret;
+
+ IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ /*
+ * If we are in a STA removal flow and in DQA mode:
+ *
+ * This is after the sync_rcu part, so the queues have already been
+ * flushed. No more TXs on their way in mac80211's path, and no more in
+ * the queues.
+ * Also, we won't be getting any new TX frames for this station.
+ * What we might have are deferred TX frames that need to be taken care
+ * of.
+ *
+ * Drop any still-queued deferred-frame before removing the STA, and
+ * make sure the worker is no longer handling frames for this STA.
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ flush_work(&mvm->add_stream_wk);
+
+ /*
+ * No need to make sure deferred TX indication is off since the
+ * worker will already remove it if it was on
+ */
+
+ /*
+ * Additionally, reset the 40 MHz capability if we disconnected
+ * from the AP now.
+ */
+ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
+
+ /* Also free dup data just in case any assertions below fail */
+ kfree(mvm_sta->dup_data);
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ /* this would be a mac80211 bug ... but don't crash, unless we had a
+ * firmware crash while we were activating a link, in which case it is
+ * legit to have phy_ctxt = NULL. Don't bother not to WARN if we are in
+ * recovery flow since we spit tons of error messages anyway.
+ */
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON_ONCE(!mvmvif->link[link_id] ||
+ !mvmvif->link[link_id]->phy_ctxt)) {
+ mutex_unlock(&mvm->mutex);
+ return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status) ? 0 : -EINVAL;
+ }
+ }
+
+ /* track whether or not the station is associated */
+ mvm_sta->sta_state = new_state;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = iwl_mvm_sta_state_notexist_to_none(mvm, vif, sta,
+ callbacks);
+ if (ret < 0)
+ goto out_unlock;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_AUTH) {
+ /*
+ * EBS may be disabled due to previous failures reported by FW.
+ * Reset EBS status here assuming environment has been changed.
+ */
+ mvm->last_ebs_successful = true;
+ iwl_mvm_check_uapsd(mvm, vif, sta->addr);
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = iwl_mvm_sta_state_auth_to_assoc(hw, mvm, vif, sta,
+ callbacks);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta,
+ callbacks);
+ iwl_mvm_smps_workaround(mvm, vif, true);
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta,
+ callbacks);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ mvmvif->ap_assoc_sta_count--;
+ callbacks->mac_ctxt_changed(mvm, vif, false);
+ } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ iwl_mvm_stop_session_protection(mvm, vif);
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE) {
+ ret = 0;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ iwl_mvm_stop_session_protection(mvm, vif);
+ mvmvif->ap_sta = NULL;
+ }
+ ret = callbacks->rm_sta(mvm, vif, sta);
+ if (sta->tdls) {
+ iwl_mvm_recalc_tdls_state(mvm, vif, false);
+ iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+ NL80211_TDLS_DISABLE_LINK);
+ }
+
+ if (unlikely(ret &&
+ test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status)))
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ if (sta->tdls && ret == 0) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mvm->rts_threshold = value;
+
+ return 0;
+}
+
+void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta, u32 changed)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (changed & (IEEE80211_RC_BW_CHANGED |
+ IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED))
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ changed & IEEE80211_RC_NSS_CHANGED)
+ iwl_mvm_sf_update(mvm, vif, false);
+}
+
+static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->deflink.queue_params[ac] = *params;
+
+ /*
+ * No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ guard(mvm)(mvm);
+ return iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ }
+ return 0;
+}
+
+void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (info->was_assoc && !mvmvif->session_prot_connection_loss)
+ return;
+
+ guard(mvm)(mvm);
+ iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id);
+}
+
+void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* for successful cases (auth/assoc), don't cancel session protection */
+ if (info->success)
+ return;
+
+ guard(mvm)(mvm);
+ iwl_mvm_stop_session_protection(mvm, vif);
+}
+
+int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ if (!vif->cfg.idle)
+ return -EBUSY;
+
+ return iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
+}
+
+int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a sched_scan when it's already stopped. This
+ * can happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_sched_scan_stopped() and the userspace called
+ * stop sched scan before ieee80211_sched_scan_stopped_work()
+ * could run. To handle this, simply return if the scan is
+ * not running.
+ */
+ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
+ mutex_unlock(&mvm->mutex);
+ return 0;
+ }
+
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
+ mutex_unlock(&mvm->mutex);
+ iwl_mvm_wait_for_async_handlers(mvm);
+
+ return ret;
+}
+
+static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta = NULL;
+ struct iwl_mvm_key_pn *ptk_pn = NULL;
+ int keyidx = key->keyidx;
+ u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);
+ int ret, i;
+ u8 key_offset;
+
+ if (sta)
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (!mvm->trans->mac_cfg->gen2) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
+ } else {
+ IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -EOPNOTSUPP;
+ /* support HW crypto on TX */
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ (keyidx == 6 || keyidx == 7))
+ rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6],
+ key);
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta) {
+ /*
+ * GTK on AP interface is a TX-only key, return 0;
+ * on IBSS they're per-station and because we're lazy
+ * we don't support them for RX, so do the same.
+ * CMAC/GMAC in AP/IBSS modes must be done in software
+ * on older NICs.
+ *
+ * Except, of course, beacon protection - it must be
+ * offloaded since we just set a beacon template, and
+ * then we must also offload the IGTK (not just BIGTK)
+ * for firmware reasons.
+ *
+ * So just check for beacon protection - if we don't
+ * have it we cannot get here with keyidx >= 6, and
+ * if we do have it we need to send the key to FW in
+ * all cases (CMAC/GMAC).
+ */
+ if (!wiphy_ext_feature_isset(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION) &&
+ (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
+ key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
+ !iwl_mvm_has_new_tx_api(mvm)) {
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ ret = 0;
+ break;
+ }
+
+ if (!mvmvif->ap_ibss_active) {
+ for (i = 0;
+ i < ARRAY_SIZE(mvmvif->ap_early_keys);
+ i++) {
+ if (!mvmvif->ap_early_keys[i]) {
+ mvmvif->ap_early_keys[i] = key;
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(mvmvif->ap_early_keys))
+ ret = -ENOSPC;
+ else
+ ret = 0;
+
+ break;
+ }
+ }
+
+ /* During FW restart, in order to restore the state as it was,
+ * don't try to reprogram keys we previously failed for.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ IWL_DEBUG_MAC80211(mvm,
+ "skip invalid idx key programming during restart\n");
+ ret = 0;
+ break;
+ }
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ mvmsta && iwl_mvm_has_new_rx_api(mvm) &&
+ key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ struct ieee80211_key_seq seq;
+ int tid, q;
+
+ WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
+ ptk_pn = kzalloc(struct_size(ptk_pn, q,
+ mvm->trans->info.num_rxqs),
+ GFP_KERNEL);
+ if (!ptk_pn) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ ieee80211_get_key_rx_seq(key, tid, &seq);
+ for (q = 0; q < mvm->trans->info.num_rxqs; q++)
+ memcpy(ptk_pn->q[q].pn[tid],
+ seq.ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+
+ rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
+ }
+
+ /* in HW restart reuse the index, otherwise request a new one */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ key_offset = key->hw_key_idx;
+ else
+ key_offset = STA_KEY_IDX_INVALID;
+
+ if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ mvmsta->pairwise_cipher = key->cipher;
+
+ IWL_DEBUG_MAC80211(mvm, "set hwcrypto key (sta:%pM, id:%d)\n",
+ sta ? sta->addr : NULL, key->keyidx);
+
+ if (sec_key_ver)
+ ret = iwl_mvm_sec_key_add(mvm, vif, sta, key);
+ else
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
+
+ if (ret) {
+ IWL_WARN(mvm, "set key failed\n");
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ if (ptk_pn) {
+ RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
+ kfree(ptk_pn);
+ }
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0,
+ * unless we have new TX API where we cannot
+ * put key material into the TX_CMD
+ */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ ret = -EOPNOTSUPP;
+ else
+ ret = 0;
+ }
+
+ break;
+ case DISABLE_KEY:
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ (keyidx == 6 || keyidx == 7))
+ RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6],
+ NULL);
+
+ ret = -ENOENT;
+ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
+ if (mvmvif->ap_early_keys[i] == key) {
+ mvmvif->ap_early_keys[i] = NULL;
+ ret = 0;
+ }
+ }
+
+ /* found in pending list - don't do anything else */
+ if (ret == 0)
+ break;
+
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ ret = 0;
+ break;
+ }
+
+ if (mvmsta && iwl_mvm_has_new_rx_api(mvm) &&
+ key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ ptk_pn = rcu_dereference_protected(
+ mvmsta->ptk_pn[keyidx],
+ lockdep_is_held(&mvm->mutex));
+ RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
+ if (ptk_pn)
+ kfree_rcu(ptk_pn, rcu_head);
+ }
+
+ IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
+ if (sec_key_ver)
+ ret = iwl_mvm_sec_key_del(mvm, vif, sta, key);
+ else
+ ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* When resuming from wowlan, FW already knows about the newest keys */
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
+ return 0;
+
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
+}
+
+void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
+}
+
+
+static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_hs20_roc_res *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mvm_time_event_data *te_data = data;
+
+ if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ IWL_DEBUG_TE(mvm,
+ "Aux ROC: Received response from ucode: status=%d uid=%d\n",
+ resp->status, resp->event_unique_id);
+
+ te_data->uid = le32_to_cpu(resp->event_unique_id);
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+ te_data->uid);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return true;
+}
+
+static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration)
+{
+ int res;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
+ static const u16 time_event_response[] = { HOT_SPOT_CMD };
+ struct iwl_notification_wait wait_time_event;
+ u32 req_dur, delay;
+ struct iwl_hs20_roc_req aux_roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
+ .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+ struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm,
+ &aux_roc_req.channel_info);
+ u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20,
+ 0);
+
+ /* Set the time and duration */
+ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm));
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &req_dur, &delay);
+ tail->duration = cpu_to_le32(req_dur);
+ tail->apply_time_max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "ROC: Requesting to remain on channel %u for %ums\n",
+ channel->hw_value, req_dur);
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+
+ /* Set the node address */
+ memcpy(tail->node_addr, vif->addr, ETH_ALEN);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+
+ if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
+ spin_unlock_bh(&mvm->time_event_lock);
+ return -EIO;
+ }
+
+ te_data->vif = vif;
+ te_data->duration = duration;
+ te_data->id = HOT_SPOT_CMD;
+
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * Use a notification wait, which really just processes the
+ * command response and doesn't wait for anything, in order
+ * to be able to process the response and get the UID inside
+ * the RX path. Using CMD_WANT_SKB doesn't work because it
+ * stores the buffer and then wakes up this thread, by which
+ * time another notification (that the time event started)
+ * might already be processed unsuccessfully.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+ time_event_response,
+ ARRAY_SIZE(time_event_response),
+ iwl_mvm_rx_aux_roc, te_data);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len,
+ &aux_roc_req);
+
+ if (res) {
+ IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
+ iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+ goto out_clear_te;
+ }
+
+ /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+ /* should never fail */
+ WARN_ON_ONCE(res);
+
+ if (res) {
+ out_clear_te:
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+
+ return res;
+}
+
+static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
+ IWL_ERR(mvm, "hotspot not supported\n");
+ return -EINVAL;
+ }
+
+ if (iwl_mvm_has_new_station_api(mvm->fw)) {
+ ret = iwl_mvm_add_aux_sta(mvm, lmac_id);
+ WARN(ret, "Failed to allocate aux station");
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+ return ret;
+
+ /* The station and queue allocation must be done only after the binding
+ * is done, as otherwise the FW might incorrectly configure its state.
+ */
+ return iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+}
+
+static int iwl_mvm_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ static const struct iwl_mvm_roc_ops ops = {
+ .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20,
+ .link = iwl_mvm_roc_link,
+ };
+
+ return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+}
+
+static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration)
+{
+ int ret;
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
+ u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
+ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
+ } else if (fw_ver >= 3) {
+ ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
+ ROC_ACTIVITY_HOTSPOT);
+ } else {
+ ret = -EOPNOTSUPP;
+ IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ enum iwl_roc_activity activity;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_mld_add_aux_sta(mvm,
+ iwl_mvm_get_lmac_id(mvm, channel->band));
+ if (ret)
+ return ret;
+
+ return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity);
+}
+
+static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct cfg80211_chan_def chandef;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvmvif->deflink.phy_ctxt &&
+ channel == mvmvif->deflink.phy_ctxt->channel)
+ return 0;
+
+ /* Try using a PHY context that is already in use */
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[i];
+
+ if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt)
+ continue;
+
+ if (channel == phy_ctxt->channel) {
+ if (mvmvif->deflink.phy_ctxt)
+ iwl_mvm_phy_ctxt_unref(mvm,
+ mvmvif->deflink.phy_ctxt);
+
+ mvmvif->deflink.phy_ctxt = phy_ctxt;
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+ return 0;
+ }
+ }
+
+ /* We already have a phy_ctxt, but it's not on the right channel */
+ if (mvmvif->deflink.phy_ctxt)
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+
+ mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!mvmvif->deflink.phy_ctxt)
+ return -ENOSPC;
+
+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+
+ return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt,
+ &chandef, NULL, 1, 1);
+}
+
+/* Execute the common part for MLD and non-MLD modes */
+int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type,
+ const struct iwl_mvm_roc_ops *ops)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ u32 lmac_id;
+ int ret;
+
+ IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+ duration, type);
+
+ /*
+ * Flush the done work, just in case it's still pending, so that
+ * the work it does can complete and we can accept new frames.
+ */
+ flush_work(&mvm->roc_done_wk);
+
+ if (!IS_ERR_OR_NULL(bss_vif)) {
+ ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_ROC);
+ if (ret)
+ return ret;
+ }
+
+ guard(mvm)(mvm);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ lmac_id = iwl_mvm_get_lmac_id(mvm, channel->band);
+
+ /* Use aux roc framework (HS20) */
+ ret = ops->add_aux_sta_for_hs20(mvm, lmac_id);
+ if (!ret)
+ ret = iwl_mvm_roc_station(mvm, channel, vif, duration);
+ return ret;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ /* handle below */
+ break;
+ default:
+ IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
+ return -EINVAL;
+ }
+
+ if (iwl_mvm_has_p2p_over_aux(mvm)) {
+ ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type);
+ return ret;
+ }
+
+ ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel);
+ if (ret)
+ return ret;
+
+ ret = ops->link(mvm, vif);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+}
+
+int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ IWL_DEBUG_MAC80211(mvm, "enter\n");
+
+ iwl_mvm_stop_roc(mvm, vif);
+
+ IWL_DEBUG_MAC80211(mvm, "leave\n");
+ return 0;
+}
+
+struct iwl_mvm_chanctx_usage_data {
+ struct iwl_mvm *mvm;
+ struct ieee80211_chanctx_conf *ctx;
+ bool use_def;
+};
+
+static void iwl_mvm_chanctx_usage_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_chanctx_usage_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ int link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
+ continue;
+
+ if (iwl_mvm_enable_fils(data->mvm, vif, data->ctx))
+ data->use_def = true;
+
+ if (vif->type == NL80211_IFTYPE_AP && link_conf->ftmr_params)
+ data->use_def = true;
+ }
+}
+
+struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm_chanctx_usage_data data = {
+ .mvm = mvm,
+ .ctx = ctx,
+ .use_def = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_chanctx_usage_iter,
+ &data);
+
+ return data.use_def ? &ctx->def : &ctx->min_def;
+}
+
+static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
+
+ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!phy_ctxt) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to add PHY context\n");
+ goto out;
+ }
+
+ *phy_ctxt_id = phy_ctxt->id;
+out:
+ return ret;
+}
+
+int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return __iwl_mvm_add_chanctx(mvm, ctx);
+}
+
+static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
+}
+
+void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ __iwl_mvm_remove_chanctx(mvm, ctx);
+}
+
+void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx, u32 changed)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
+
+ if (WARN_ONCE((phy_ctxt->ref > 1) &&
+ (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
+ IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+ IEEE80211_CHANCTX_CHANGE_RADAR |
+ IEEE80211_CHANCTX_CHANGE_MIN_DEF)),
+ "Cannot change PHY. Ref=%d, changed=0x%X\n",
+ phy_ctxt->ref, changed))
+ return;
+
+ guard(mvm)(mvm);
+
+ /* we are only changing the min_width, may be a noop */
+ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_DEF) {
+ if (phy_ctxt->width == def->width)
+ return;
+
+ /* we are just toggling between 20_NOHT and 20 */
+ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
+ def->width <= NL80211_CHAN_WIDTH_20)
+ return;
+ }
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
+}
+
+/*
+ * This function executes the common part for MLD and non-MLD modes.
+ *
+ * Returns true if we're done assigning the chanctx
+ * (either on failure or success)
+ */
+static bool
+__iwl_mvm_assign_vif_chanctx_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx, int *ret)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmvif->deflink.phy_ctxt = phy_ctxt;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ /* only needed if we're switching chanctx (i.e. during CSA) */
+ if (switching_chanctx) {
+ mvmvif->ap_ibss_active = true;
+ break;
+ }
+ fallthrough;
+ case NL80211_IFTYPE_ADHOC:
+ /*
+ * The AP binding flow is handled as part of the start_ap flow
+ * (in bss_info_changed), similarly for IBSS.
+ */
+ *ret = 0;
+ return true;
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ /* always disable PS when a monitor interface is active */
+ mvmvif->ps_disabled = true;
+ break;
+ default:
+ *ret = -EINVAL;
+ return true;
+ }
+ return false;
+}
+
+static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ON(!link_conf))
+ return -EINVAL;
+
+ if (__iwl_mvm_assign_vif_chanctx_common(mvm, vif, ctx,
+ switching_chanctx, &ret))
+ goto out;
+
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (ret)
+ goto out;
+
+ /*
+ * Power state must be updated before quotas,
+ * otherwise fw will complain.
+ */
+ iwl_mvm_power_update_mac(mvm);
+
+ /* Setting the quota at this stage is only required for monitor
+ * interfaces. For the other types, the bss_info changed flow
+ * will handle quota settings.
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvmvif->monitor_active = true;
+ ret = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (ret)
+ goto out_remove_binding;
+
+ ret = iwl_mvm_add_snif_sta(mvm, vif);
+ if (ret)
+ goto out_remove_binding;
+
+ }
+
+ /* Handle binding during CSA */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (!switching_chanctx) {
+ mvmvif->csa_bcn_pending = false;
+ goto out;
+ }
+
+ mvmvif->csa_bcn_pending = true;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
+ u32 duration = 5 * vif->bss_conf.beacon_int;
+
+ /* Protect the session to make sure we hear the first
+ * beacon on the new channel.
+ */
+ iwl_mvm_protect_session(mvm, vif, duration, duration,
+ vif->bss_conf.beacon_int / 2,
+ true);
+ }
+
+ iwl_mvm_update_quotas(mvm, false, NULL);
+
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+ }
+
+ goto out;
+
+out_remove_binding:
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ iwl_mvm_power_update_mac(mvm);
+out:
+ if (ret)
+ mvmvif->deflink.phy_ctxt = NULL;
+ return ret;
+}
+
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
+}
+
+/*
+ * This function executes the common part for MLD and non-MLD modes.
+ *
+ * Returns if chanctx unassign chanctx is done
+ * (either on failure or success)
+ */
+static bool __iwl_mvm_unassign_vif_chanctx_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool switching_chanctx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_remove_time_event(mvm, mvmvif,
+ &mvmvif->time_event_data);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_ADHOC:
+ return true;
+ case NL80211_IFTYPE_MONITOR:
+ mvmvif->monitor_active = false;
+ mvmvif->ps_disabled = false;
+ break;
+ case NL80211_IFTYPE_AP:
+ /* This part is triggered only during CSA */
+ if (!switching_chanctx || !mvmvif->ap_ibss_active)
+ return true;
+
+ mvmvif->csa_countdown = false;
+
+ /* Set CS bit on all the stations */
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
+
+ /* Save blocked iface, the timeout is set on the next beacon */
+ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
+
+ mvmvif->ap_ibss_active = false;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_vif *disabled_vif = NULL;
+
+ if (__iwl_mvm_unassign_vif_chanctx_common(mvm, vif, switching_chanctx))
+ goto out;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ iwl_mvm_rm_snif_sta(mvm, vif);
+
+
+ if (vif->type == NL80211_IFTYPE_STATION && switching_chanctx) {
+ disabled_vif = vif;
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
+ iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
+ }
+
+ iwl_mvm_update_quotas(mvm, false, disabled_vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+
+out:
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+ switching_chanctx)
+ return;
+ mvmvif->deflink.phy_ctxt = NULL;
+ iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ __iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ const struct iwl_mvm_switch_vif_chanctx_ops *ops)
+{
+ int ret;
+
+ guard(mvm)(mvm);
+ ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx, true);
+ __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
+
+ ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ ret = ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].new_ctx, true);
+ if (ret) {
+ IWL_ERR(mvm,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_remove;
+ }
+
+ /* we don't support TDLS during DCM - can be caused by channel switch */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ return 0;
+
+out_remove:
+ __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
+
+out_reassign:
+ if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
+ IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
+ goto out_restart;
+ }
+
+ if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx, true)) {
+ IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+ goto out_restart;
+ }
+
+ return ret;
+
+out_restart:
+ /* things keep failing, better restart the hw */
+ iwl_force_nmi(mvm->trans);
+ return ret;
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ const struct iwl_mvm_switch_vif_chanctx_ops *ops)
+{
+ int ret;
+
+ guard(mvm)(mvm);
+ ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx, true);
+
+ ret = ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].new_ctx, true);
+ if (ret) {
+ IWL_ERR(mvm,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ return 0;
+
+out_reassign:
+ if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx, true)) {
+ IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+ goto out_restart;
+ }
+
+ return ret;
+
+out_restart:
+ /* things keep failing, better restart the hw */
+ iwl_force_nmi(mvm->trans);
+ return ret;
+}
+
+/* Execute the common part for both MLD and non-MLD modes */
+int
+iwl_mvm_switch_vif_chanctx_common(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode,
+ const struct iwl_mvm_switch_vif_chanctx_ops *ops)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ /* we only support a single-vif right now */
+ if (n_vifs > 1)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs, ops);
+ break;
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs, ops);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ static const struct iwl_mvm_switch_vif_chanctx_ops ops = {
+ .__assign_vif_chanctx = __iwl_mvm_assign_vif_chanctx,
+ .__unassign_vif_chanctx = __iwl_mvm_unassign_vif_chanctx,
+ };
+
+ return iwl_mvm_switch_vif_chanctx_common(hw, vifs, n_vifs, mode, &ops);
+}
+
+int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ return mvm->ibss_manager;
+}
+
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (!mvm_sta || !mvm_sta->vif) {
+ IWL_ERR(mvm, "Station is not associated to a vif\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif,
+ &mvm_sta->vif->bss_conf);
+}
+
+void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ /* By implementing this operation, we prevent mac80211 from
+ * starting its own channel switch timer, so that we can call
+ * ieee80211_chswitch_done() ourselves at the right time
+ * (which is when the absence time event starts).
+ */
+
+ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
+ "dummy channel switch op\n");
+}
+
+static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .tsf = cpu_to_le32(chsw->timestamp),
+ .cs_count = chsw->count,
+ .cs_mode = chsw->block_tx,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (chsw->delay)
+ cmd.cs_delayed_bcn_count =
+ DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int);
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd);
+}
+
+static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 apply_time;
+
+ /* Schedule the time event to a bit before beacon 1,
+ * to make sure we're in the new channel when the
+ * GO/AP arrives. In case count <= 1 immediately schedule the
+ * TE (this might result with some packet loss or connection
+ * loss).
+ */
+ if (chsw->count <= 1)
+ apply_time = 0;
+ else
+ apply_time = chsw->device_timestamp +
+ ((vif->bss_conf.beacon_int * (chsw->count - 1) -
+ IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
+
+ if (chsw->block_tx)
+ iwl_mvm_csa_client_absent(mvm, vif);
+
+ if (mvmvif->bf_enabled) {
+ int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
+ apply_time);
+
+ return 0;
+}
+
+static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ }
+}
+
+#define IWL_MAX_CSA_BLOCK_TX 1500
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct ieee80211_vif *csa_vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_txq *mvmtxq;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmvif->csa_failed = false;
+ mvmvif->csa_blocks_tx = false;
+
+ IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
+ chsw->chandef.center_freq1);
+
+ iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_CHANNEL_SWITCH);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ csa_vif =
+ rcu_dereference_protected(mvm->csa_vif,
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active,
+ "Another CSA is already in progress"))
+ return -EBUSY;
+
+ /* we still didn't unblock tx. prevent new CS meanwhile */
+ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex)))
+ return -EBUSY;
+
+ rcu_assign_pointer(mvm->csa_vif, vif);
+
+ if (WARN_ONCE(mvmvif->csa_countdown,
+ "Previous CSA countdown didn't complete"))
+ return -EBUSY;
+
+ mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+
+ if (!chsw->block_tx)
+ break;
+ /* don't need blocking in driver otherwise - mac80211 will do */
+ if (!ieee80211_hw_check(mvm->hw, HANDLES_QUIET_CSA))
+ break;
+
+ mvmvif->csa_blocks_tx = true;
+ mvmtxq = iwl_mvm_txq_from_mac80211(vif->txq);
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_csa_block_txqs,
+ NULL);
+ break;
+ case NL80211_IFTYPE_STATION:
+ mvmvif->csa_blocks_tx = chsw->block_tx;
+
+ /*
+ * In the new flow FW is in charge of timing the switch so there
+ * is no need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF,
+ 0))
+ break;
+
+ /*
+ * We haven't configured the firmware to be associated yet since
+ * we don't know the dtim period. In this case, the firmware can't
+ * track the beacons.
+ */
+ if (!vif->cfg.assoc || !vif->bss_conf.dtim_period)
+ return -EBUSY;
+
+ if (chsw->delay > IWL_MAX_CSA_BLOCK_TX &&
+ hweight16(vif->valid_links) <= 1)
+ schedule_delayed_work(&mvmvif->csa_work, 0);
+
+ if (chsw->block_tx) {
+ /*
+ * In case of undetermined / long time with immediate
+ * quiet monitor status to gracefully disconnect
+ */
+ if (!chsw->count ||
+ chsw->count * vif->bss_conf.beacon_int >
+ IWL_MAX_CSA_BLOCK_TX)
+ schedule_delayed_work(&mvmvif->csa_work,
+ msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX));
+ }
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
+ ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw);
+ if (ret)
+ return ret;
+ } else {
+ iwl_mvm_schedule_client_csa(mvm, vif, chsw);
+ }
+
+ mvmvif->csa_count = chsw->count;
+ mvmvif->csa_misbehave = false;
+ break;
+ default:
+ break;
+ }
+
+ mvmvif->ps_disabled = true;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+ if (ret)
+ return ret;
+
+ /* we won't be on this channel any longer */
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ return ret;
+}
+
+static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return iwl_mvm_pre_channel_switch(mvm, vif, chsw);
+}
+
+void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+ .tsf = cpu_to_le32(chsw->timestamp),
+ .cs_count = chsw->count,
+ .cs_mode = chsw->block_tx,
+ };
+
+ /*
+ * In the new flow FW is in charge of timing the switch so there is no
+ * need for all of this
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ CHANNEL_SWITCH_ERROR_NOTIF, 0))
+ return;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
+ return;
+
+ IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n",
+ mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx);
+
+ if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
+ if (mvmvif->csa_misbehave) {
+ struct ieee80211_bss_conf *link_conf;
+
+ /* Second time, give up on this AP*/
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[chsw->link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+
+ iwl_mvm_abort_channel_switch(hw, vif, link_conf);
+ ieee80211_chswitch_done(vif, false, 0);
+ mvmvif->csa_misbehave = false;
+ return;
+ }
+ mvmvif->csa_misbehave = true;
+ }
+ mvmvif->csa_count = chsw->count;
+
+ guard(mvm)(mvm);
+ if (mvmvif->csa_failed)
+ return;
+
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd));
+}
+
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+{
+ int i;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ /* we can't ask the firmware anything if it is dead */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ return;
+ if (drop) {
+ guard(mvm)(mvm);
+ iwl_mvm_flush_tx_path(mvm,
+ iwl_mvm_flushable_queues(mvm) & queues);
+ } else {
+ iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
+ }
+ return;
+ }
+
+ guard(mvm)(mvm);
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ if (drop)
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF);
+ else
+ iwl_mvm_wait_sta_queues_empty(mvm,
+ iwl_mvm_sta_from_mac80211(sta));
+ }
+}
+
+void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_sta *sta;
+ bool ap_sta_done = false;
+ int i;
+ u32 msk = 0;
+
+ if (!vif) {
+ iwl_mvm_flush_no_vif(mvm, queues, drop);
+ return;
+ }
+
+ if (!drop && hweight16(vif->active_links) <= 1) {
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+ if (link_conf->csa_active && mvmvif->csa_blocks_tx)
+ drop = true;
+ }
+
+ /* Make sure we're done with the deferred traffic before flushing */
+ flush_work(&mvm->add_stream_wk);
+
+ mutex_lock(&mvm->mutex);
+
+ /* flush the AP-station and all TDLS peers */
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->vif != vif)
+ continue;
+
+ if (sta == mvmvif->ap_sta) {
+ if (ap_sta_done)
+ continue;
+ ap_sta_done = true;
+ }
+
+ if (drop) {
+ if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id,
+ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ } else {
+ if (iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
+ else /* only used for !iwl_mvm_has_new_tx_api() below */
+ msk |= mvmsta->tfd_queue_msk;
+ }
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ * If the firmware is dead, this can't work...
+ */
+ if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
+}
+
+void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+
+ guard(mvm)(mvm);
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!mvm_link_sta)
+ continue;
+
+ if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id,
+ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ }
+}
+
+static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx,
+ struct survey_info *survey)
+{
+ int chan_idx;
+ enum nl80211_band band;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Find and return the next entry that has a non-zero active time */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ mvm->hw->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct iwl_mvm_acs_survey_channel *info =
+ &mvm->acs_survey->bands[band][chan_idx];
+
+ if (!info->time)
+ continue;
+
+ /* Found (the next) channel to report */
+ survey->channel = &sband->channels[chan_idx];
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+ survey->time = info->time;
+ survey->time_busy = info->time_busy;
+ survey->time_rx = info->time_rx;
+ survey->time_tx = info->time_tx;
+ survey->noise = info->noise;
+ if (survey->noise < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ /* Clear time so that channel is only reported once */
+ info->time = 0;
+
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = -ENOENT;
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ memset(survey, 0, sizeof(*survey));
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ return -ENOENT;
+
+ /*
+ * Return the beacon stats at index zero and pass on following indices
+ * to the function returning the full survey, most likely for ACS
+ * (Automatic Channel Selection).
+ */
+ if (idx > 0)
+ return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey);
+
+ guard(mvm)(mvm);
+
+ if (iwl_mvm_firmware_running(mvm)) {
+ int ret = iwl_mvm_request_statistics(mvm, false);
+
+ if (ret)
+ return ret;
+ }
+
+ survey->filled = SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+
+ survey->time_rx = mvm->accu_radio_stats.rx_time +
+ mvm->radio_stats.rx_time;
+ do_div(survey->time_rx, USEC_PER_MSEC);
+
+ survey->time_tx = mvm->accu_radio_stats.tx_time +
+ mvm->radio_stats.tx_time;
+ do_div(survey->time_tx, USEC_PER_MSEC);
+
+ /* the new fw api doesn't support the following fields */
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return 0;
+
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_SCAN;
+ survey->time = mvm->accu_radio_stats.on_time_rf +
+ mvm->radio_stats.on_time_rf;
+ do_div(survey->time, USEC_PER_MSEC);
+
+ survey->time_scan = mvm->accu_radio_stats.on_time_scan +
+ mvm->radio_stats.on_time_scan;
+ do_div(survey->time_scan, USEC_PER_MSEC);
+
+ return 0;
+}
+
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 gi_ltf;
+
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rinfo->bw = RATE_INFO_BW_320;
+ break;
+ }
+
+ if (format == RATE_MCS_MOD_TYPE_CCK ||
+ format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) {
+ int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
+
+ /* add the offset needed to get to the legacy ofdm indices */
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
+ rate += IWL_FIRST_OFDM_RATE;
+
+ switch (rate) {
+ case IWL_RATE_1M_INDEX:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_INDEX:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_INDEX:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_INDEX:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_INDEX:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_INDEX:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_INDEX:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_INDEX:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_INDEX:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_INDEX:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_INDEX:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_INDEX:
+ rinfo->legacy = 540;
+ }
+ return;
+ }
+
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK) + 1;
+ rinfo->mcs = format == RATE_MCS_MOD_TYPE_HT ?
+ RATE_HT_MCS_INDEX(rate_n_flags) :
+ u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
+
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_EHT:
+ /* TODO: GI/LTF/RU. How does the firmware encode them? */
+ rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
+ gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (gi_ltf == 3)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ break;
+ case RATE_MCS_MOD_TYPE_HT:
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ }
+}
+
+void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int i;
+
+ if (mvmsta->deflink.avg_energy) {
+ sinfo->signal_avg = -(s8)mvmsta->deflink.avg_energy;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+ struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->deflink.lq_sta.rs_fw;
+
+ iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ /* if beacon filtering isn't on mac80211 does it anyway */
+ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+ return;
+
+ if (!vif->cfg.assoc)
+ return;
+
+ guard(mvm)(mvm);
+
+ if (sta != mvmvif->ap_sta)
+ return;
+
+ if (iwl_mvm_request_statistics(mvm, false))
+ return;
+
+ sinfo->rx_beacon = 0;
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ sinfo->rx_beacon += mvmvif->link[i]->beacon_stats.num_beacons +
+ mvmvif->link[i]->beacon_stats.accu_num_beacons;
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
+ if (mvmvif->deflink.beacon_stats.avg_signal) {
+ /* firmware only reports a value after RXing a few beacons */
+ sinfo->rx_beacon_signal_avg =
+ mvmvif->deflink.beacon_stats.avg_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ }
+}
+
+static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_mlme_event *mlme)
+{
+ if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) &&
+ (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) {
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
+ NULL);
+ return;
+ }
+
+ if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) {
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_DEASSOC,
+ NULL);
+ return;
+ }
+}
+
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \
+ do { \
+ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \
+ break; \
+ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \
+ } while (0)
+
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+ if (iwl_trans_dbg_ini_valid(mvm->trans)) {
+ iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme);
+ return;
+ }
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MLME);
+ if (!trig)
+ return;
+
+ trig_mlme = (void *)trig->data;
+
+ if (event->u.mlme.data == ASSOC_EVENT) {
+ if (event->u.mlme.status == MLME_DENIED)
+ CHECK_MLME_TRIGGER(stop_assoc_denied,
+ "DENIED ASSOC: reason %d",
+ event->u.mlme.reason);
+ else if (event->u.mlme.status == MLME_TIMEOUT)
+ CHECK_MLME_TRIGGER(stop_assoc_timeout,
+ "ASSOC TIMEOUT");
+ } else if (event->u.mlme.data == AUTH_EVENT) {
+ if (event->u.mlme.status == MLME_DENIED)
+ CHECK_MLME_TRIGGER(stop_auth_denied,
+ "DENIED AUTH: reason %d",
+ event->u.mlme.reason);
+ else if (event->u.mlme.status == MLME_TIMEOUT)
+ CHECK_MLME_TRIGGER(stop_auth_timeout,
+ "AUTH TIMEOUT");
+ } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
+ CHECK_MLME_TRIGGER(stop_rx_deauth,
+ "DEAUTH RX %d", event->u.mlme.reason);
+ } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
+ CHECK_MLME_TRIGGER(stop_tx_deauth,
+ "DEAUTH TX %d", event->u.mlme.reason);
+ }
+#undef CHECK_MLME_TRIGGER
+}
+
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
+ return;
+
+ ba_trig = (void *)trig->data;
+
+ if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "BAR received from %pM, tid %d, ssn %d",
+ event->u.ba.sta->addr, event->u.ba.tid,
+ event->u.ba.ssn);
+}
+
+void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (event->type) {
+ case MLME_EVENT:
+ iwl_mvm_event_mlme_callback(mvm, vif, event);
+ break;
+ case BAR_RX_EVENT:
+ iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+ break;
+ case BA_FRAME_TIMEOUT:
+ iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
+ event->u.ba.tid);
+ break;
+ default:
+ break;
+ }
+}
+
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ enum iwl_mvm_rxq_notif_type type,
+ bool sync,
+ const void *data, u32 size)
+{
+ DEFINE_RAW_FLEX(struct iwl_rxq_sync_cmd, cmd, payload,
+ sizeof(struct iwl_mvm_internal_rxq_notif));
+ struct iwl_mvm_internal_rxq_notif *notif =
+ (struct iwl_mvm_internal_rxq_notif *)cmd->payload;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
+ .data[0] = cmd,
+ .len[0] = __struct_size(cmd),
+ .data[1] = data,
+ .len[1] = size,
+ .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
+ };
+ int ret;
+
+ cmd->rxq_mask = cpu_to_le32(BIT(mvm->trans->info.num_rxqs) - 1);
+ cmd->count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) +
+ size);
+ notif->type = type;
+ notif->sync = sync;
+
+ /* size must be a multiple of DWORD */
+ if (WARN_ON(cmd->count & cpu_to_le32(3)))
+ return;
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ if (sync) {
+ notif->cookie = mvm->queue_sync_cookie;
+ mvm->queue_sync_state = (1 << mvm->trans->info.num_rxqs) - 1;
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
+ goto out;
+ }
+
+ if (sync) {
+ lockdep_assert_held(&mvm->mutex);
+ ret = wait_event_timeout(mvm->rx_sync_waitq,
+ READ_ONCE(mvm->queue_sync_state) == 0,
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ mvm->queue_sync_state,
+ mvm->queue_sync_cookie);
+ }
+
+out:
+ if (sync) {
+ mvm->queue_sync_state = 0;
+ mvm->queue_sync_cookie++;
+ }
+}
+
+void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0);
+}
+
+int
+iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *stats)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+ !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ *stats = mvm->ftm_resp_stats;
+ mutex_unlock(&mvm->mutex);
+
+ stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) |
+ BIT(NL80211_FTM_STATS_PARTIAL_NUM) |
+ BIT(NL80211_FTM_STATS_FAILED_NUM) |
+ BIT(NL80211_FTM_STATS_ASAP_NUM) |
+ BIT(NL80211_FTM_STATS_NON_ASAP_NUM) |
+ BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) |
+ BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) |
+ BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) |
+ BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM);
+
+ return 0;
+}
+
+int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return iwl_mvm_ftm_start(mvm, vif, request);
+}
+
+void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ iwl_mvm_ftm_abort(mvm, request);
+}
+
+static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
+{
+ u8 protocol = ip_hdr(skb)->protocol;
+
+ if (!IS_ENABLED(CONFIG_INET))
+ return false;
+
+ return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
+}
+
+static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* For now don't aggregate IPv6 in AMSDU */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ if (!iwl_mvm_is_csum_supported(mvm))
+ return true;
+
+ return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
+}
+
+int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_set_hw_timestamp *hwts)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 protocols = 0;
+
+ /* HW timestamping is only supported for a specific station */
+ if (!hwts->macaddr)
+ return -EOPNOTSUPP;
+
+ if (hwts->enable)
+ protocols =
+ IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
+
+ guard(mvm)(mvm);
+ return iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
+}
+
+const struct ieee80211_ops iwl_mvm_hw_ops = {
+ .tx = iwl_mvm_mac_tx,
+ .wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
+ .ampdu_action = iwl_mvm_mac_ampdu_action,
+ .get_antenna = iwl_mvm_op_get_antenna,
+ .set_antenna = iwl_mvm_op_set_antenna,
+ .start = iwl_mvm_mac_start,
+ .reconfig_complete = iwl_mvm_mac_reconfig_complete,
+ .stop = iwl_mvm_mac_stop,
+ .add_interface = iwl_mvm_mac_add_interface,
+ .remove_interface = iwl_mvm_mac_remove_interface,
+ .config = iwl_mvm_mac_config,
+ .prepare_multicast = iwl_mvm_prepare_multicast,
+ .configure_filter = iwl_mvm_configure_filter,
+ .config_iface_filter = iwl_mvm_config_iface_filter,
+ .bss_info_changed = iwl_mvm_bss_info_changed,
+ .hw_scan = iwl_mvm_mac_hw_scan,
+ .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
+ .sta_state = iwl_mvm_mac_sta_state,
+ .sta_notify = iwl_mvm_mac_sta_notify,
+ .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
+ .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .link_sta_rc_update = iwl_mvm_sta_rc_update,
+ .conf_tx = iwl_mvm_mac_conf_tx,
+ .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx,
+ .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
+ .flush = iwl_mvm_mac_flush,
+ .flush_sta = iwl_mvm_mac_flush_sta,
+ .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+ .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
+ .set_key = iwl_mvm_mac_set_key,
+ .update_tkip_key = iwl_mvm_mac_update_tkip_key,
+ .remain_on_channel = iwl_mvm_roc,
+ .cancel_remain_on_channel = iwl_mvm_cancel_roc,
+ .add_chanctx = iwl_mvm_add_chanctx,
+ .remove_chanctx = iwl_mvm_remove_chanctx,
+ .change_chanctx = iwl_mvm_change_chanctx,
+ .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+ .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
+
+ .start_ap = iwl_mvm_start_ap,
+ .stop_ap = iwl_mvm_stop_ap,
+ .join_ibss = iwl_mvm_start_ibss,
+ .leave_ibss = iwl_mvm_stop_ibss,
+
+ .tx_last_beacon = iwl_mvm_tx_last_beacon,
+
+ .set_tim = iwl_mvm_set_tim,
+
+ .channel_switch = iwl_mvm_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
+ .post_channel_switch = iwl_mvm_post_channel_switch,
+ .abort_channel_switch = iwl_mvm_abort_channel_switch,
+ .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
+
+ .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
+ .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
+ .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
+
+ .event_callback = iwl_mvm_mac_event_callback,
+
+ .sync_rx_queues = iwl_mvm_sync_rx_queues,
+
+#ifdef CONFIG_PM_SLEEP
+ /* look at d3.c */
+ .suspend = iwl_mvm_suspend,
+ .resume = iwl_mvm_resume,
+ .set_wakeup = iwl_mvm_set_wakeup,
+ .set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+ .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+ .get_survey = iwl_mvm_mac_get_survey,
+ .sta_statistics = iwl_mvm_mac_sta_statistics,
+ .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats,
+ .start_pmsr = iwl_mvm_start_pmsr,
+ .abort_pmsr = iwl_mvm_abort_pmsr,
+
+ .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .vif_add_debugfs = iwl_mvm_vif_add_debugfs,
+ .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs,
+#endif
+ .set_hw_timestamp = iwl_mvm_set_hw_timestamp,
+};
diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-key.c b/sys/contrib/dev/iwlwifi/mvm/mld-key.c
new file mode 100644
index 000000000000..ef0be44207e1
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mld-key.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022 - 2024 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <net/mac80211.h>
+#include "mvm.h"
+#include "fw/api/context.h"
+#include "fw/api/datapath.h"
+
+static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info = &mvmvif->deflink;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (keyconf->link_id >= 0) {
+ link_info = mvmvif->link[keyconf->link_id];
+ if (!link_info)
+ return 0;
+ }
+
+ /* AP group keys are per link and should be on the mcast/bcast STA */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* IGTK/BIGTK to bcast STA */
+ if (keyconf->keyidx >= 4)
+ return BIT(link_info->bcast_sta.sta_id);
+ /* GTK for data to mcast STA */
+ return BIT(link_info->mcast_sta.sta_id);
+ }
+
+ /* for client mode use the AP STA also for group keys */
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mvmvif->ap_sta;
+
+ /* During remove the STA was removed and the group keys come later
+ * (which sounds like a bad sequence, but remember that to mac80211 the
+ * group keys have no sta pointer), so we don't have a STA now.
+ * Since this happens for group keys only, just use the link_info as
+ * the group keys are per link; make sure that is the case by checking
+ * we do have a link_id or are not doing MLO.
+ * Of course the same can be done during add as well, but we must do
+ * it during remove, since we don't have the mvmvif->ap_sta pointer.
+ */
+ if (!sta && (keyconf->link_id >= 0 || !ieee80211_vif_is_mld(vif)))
+ return BIT(link_info->ap_sta_id);
+
+ /* STA should be non-NULL now, but iwl_mvm_sta_fw_id_mask() checks */
+
+ /* pass link_id to filter by it if not -1 (GTK on client) */
+ return iwl_mvm_sta_fw_id_mask(mvm, sta, keyconf->link_id);
+}
+
+u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
+ u32 flags = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!pairwise)
+ flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP104:
+ flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_WEP40:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_WEP;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_CCMP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP;
+ break;
+ }
+
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mvmvif->ap_sta;
+
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
+ */
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
+ flags |= IWL_SEC_KEY_FLAG_MFP;
+
+ if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
+ return flags;
+}
+
+struct iwl_mvm_sta_key_update_data {
+ struct ieee80211_sta *sta;
+ u32 old_sta_mask;
+ u32 new_sta_mask;
+ int err;
+};
+
+static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ struct iwl_mvm_sta_key_update_data *data = _data;
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+ .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask),
+ .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask),
+ .u.modify.key_id = cpu_to_le32(key->keyidx),
+ .u.modify.key_flags =
+ cpu_to_le32(iwl_mvm_get_sec_flags(mvm, vif, sta, key)),
+ };
+ int err;
+
+ /* only need to do this for pairwise keys (link_id == -1) */
+ if (sta != data->sta || key->link_id >= 0)
+ return;
+
+ err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+
+ if (err)
+ data->err = err;
+}
+
+int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_mvm_sta_key_update_data data = {
+ .sta = sta,
+ .old_sta_mask = old_sta_mask,
+ .new_sta_mask = new_sta_mask,
+ };
+
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
+ &data);
+ return data.err;
+}
+
+static int __iwl_mvm_sec_key_del(struct iwl_mvm *mvm, u32 sta_mask,
+ u32 key_flags, u32 keyidx, u32 flags)
+{
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .u.remove.sta_mask = cpu_to_le32(sta_mask),
+ .u.remove.key_id = cpu_to_le32(keyidx),
+ .u.remove.key_flags = cpu_to_le32(key_flags),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_id, flags, sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .u.add.sta_mask = cpu_to_le32(sta_mask),
+ .u.add.key_id = cpu_to_le32(keyconf->keyidx),
+ .u.add.key_flags = cpu_to_le32(key_flags),
+ .u.add.tx_seq = cpu_to_le64(atomic64_read(&keyconf->tx_pn)),
+ };
+ int max_key_len = sizeof(cmd.u.add.key);
+ int ret;
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ max_key_len -= IWL_SEC_WEP_KEY_OFFSET;
+
+ if (WARN_ON(keyconf->keylen > max_key_len))
+ return -EINVAL;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ memcpy(cmd.u.add.key + IWL_SEC_WEP_KEY_OFFSET, keyconf->key,
+ keyconf->keylen);
+ else
+ memcpy(cmd.u.add.key, keyconf->key, keyconf->keylen);
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ memcpy(cmd.u.add.tkip_mic_rx_key,
+ keyconf->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ 8);
+ memcpy(cmd.u.add.tkip_mic_tx_key,
+ keyconf->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+ 8);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /*
+ * For WEP, the same key is used for multicast and unicast so need to
+ * upload it again. If this fails, remove the original as well.
+ */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ cmd.u.add.key_flags ^= cpu_to_le32(IWL_SEC_KEY_FLAG_MCAST_KEY);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ if (ret)
+ __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags,
+ keyconf->keyidx, 0);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_sec_key_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf);
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link = NULL;
+ int ret;
+
+ if (keyconf->keyidx == 4 || keyconf->keyidx == 5) {
+ unsigned int link_id = 0;
+
+ /* set to -1 for non-MLO right now */
+ if (keyconf->link_id >= 0)
+ link_id = keyconf->link_id;
+
+ mvm_link = mvmvif->link[link_id];
+ if (WARN_ON(!mvm_link))
+ return -EINVAL;
+
+ if (mvm_link->igtk) {
+ IWL_DEBUG_MAC80211(mvm, "remove old IGTK %d\n",
+ mvm_link->igtk->keyidx);
+ ret = iwl_mvm_sec_key_del(mvm, vif, sta,
+ mvm_link->igtk);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to remove old IGTK (ret=%d)\n",
+ ret);
+ }
+
+ WARN_ON(mvm_link->igtk);
+ }
+
+ ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf);
+ if (ret)
+ return ret;
+
+ if (mvm_link)
+ mvm_link->igtk = keyconf;
+
+ /* We don't really need this, but need it to be not invalid,
+ * and if we switch links multiple times it might go to be
+ * invalid when removed.
+ */
+ keyconf->hw_key_idx = 0;
+
+ return 0;
+}
+
+static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u32 flags)
+{
+ u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf);
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ if (keyconf->keyidx == 4 || keyconf->keyidx == 5) {
+ struct iwl_mvm_vif_link_info *mvm_link;
+ unsigned int link_id = 0;
+
+ /* set to -1 for non-MLO right now */
+ if (keyconf->link_id >= 0)
+ link_id = keyconf->link_id;
+
+ mvm_link = mvmvif->link[link_id];
+ if (WARN_ON(!mvm_link))
+ return -EINVAL;
+
+ if (mvm_link->igtk == keyconf) {
+ /* no longer in HW - mark for later */
+ mvm_link->igtk->hw_key_idx = STA_KEY_IDX_INVALID;
+ mvm_link->igtk = NULL;
+ }
+ }
+
+ ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ flags);
+ if (ret)
+ return ret;
+
+ /* For WEP, delete the key again as unicast */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ key_flags ^= IWL_SEC_KEY_FLAG_MCAST_KEY;
+ ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags,
+ keyconf->keyidx, flags);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
+ IWL_SEC_KEY_FLAG_MFP;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ 0);
+}
+
+int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ return _iwl_mvm_sec_key_del(mvm, vif, sta, keyconf, 0);
+}
+
+static void iwl_mvm_sec_key_remove_ap_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ unsigned int link_id = (uintptr_t)data;
+
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ if (sta)
+ return;
+
+ if (key->link_id >= 0 && key->link_id != link_id)
+ return;
+
+ _iwl_mvm_sec_key_del(mvm, vif, NULL, key, CMD_ASYNC);
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link,
+ unsigned int link_id)
+{
+ u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
+ u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0);
+
+ if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION ||
+ link->ap_sta_id == IWL_INVALID_STA))
+ return;
+
+ if (!sec_key_ver)
+ return;
+
+ ieee80211_iter_keys(mvm->hw, vif,
+ iwl_mvm_sec_key_remove_ap_iter,
+ (void *)(uintptr_t)link_id);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-mac.c b/sys/contrib/dev/iwlwifi/mvm/mld-mac.c
new file mode 100644
index 000000000000..2d116a41913c
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mld-mac.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022 - 2025 Intel Corporation
+ */
+#include "mvm.h"
+
+static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd,
+ int cmd_ver)
+{
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_ap_support = 1;
+ } else {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_support = 1;
+ }
+}
+
+static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ int cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ MAC_CONFIG_CMD), 1);
+
+ if (WARN_ON(cmd_ver > 3))
+ return;
+
+ cmd->id_and_color = cpu_to_le32(mvmvif->id);
+ cmd->action = cpu_to_le32(action);
+
+ cmd->mac_type = cpu_to_le32(iwl_mvm_get_mac_type(vif));
+
+ memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
+
+ cmd->wifi_gen_v2.he_support = 0;
+ cmd->wifi_gen_v2.eht_support = 0;
+
+ /* should be set by specific context type handler */
+ cmd->filter_flags = 0;
+
+ cmd->nic_not_ack_enabled =
+ cpu_to_le32(!iwl_mvm_is_nic_ack_enabled(mvm, vif));
+
+ if (iwlwifi_mod_params.disable_11ax)
+ return;
+
+ /* If we have MLO enabled, then the firmware needs to enable
+ * address translation for the station(s) we add. That depends
+ * on having EHT enabled in firmware, which in turn depends on
+ * mac80211 in the code below.
+ * However, mac80211 doesn't enable HE/EHT until it has parsed
+ * the association response successfully, so just skip all that
+ * and enable both when we have MLO.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
+ else
+ cmd->wifi_gen.eht_support = 1;
+ return;
+ }
+
+ rcu_read_lock();
+ for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) {
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (!link_conf)
+ continue;
+
+ if (link_conf->he_support)
+ iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
+
+ /* It's not reasonable to have EHT without HE and FW API doesn't
+ * support it. Ignore EHT in this case.
+ */
+ if (!link_conf->he_support && link_conf->eht_support)
+ continue;
+
+ if (link_conf->eht_support) {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
+ else
+ cmd->wifi_gen.eht_support = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+
+static int iwl_mvm_mld_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_mac_config_cmd *cmd)
+{
+ int ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
+ 0, sizeof(*cmd), cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send MAC_CONFIG_CMD (action:%d): %d\n",
+ le32_to_cpu(cmd->action), ret);
+ return ret;
+}
+
+static int iwl_mvm_mld_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off)
+{
+ struct iwl_mac_config_cmd cmd = {};
+ u16 esr_transition_timeout;
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ /*
+ * We always want to hear MCAST frames, if we're not authorized yet,
+ * we'll drop them.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP);
+
+ if (vif->p2p)
+ cmd.client.ctwin =
+ iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(mvm, vif);
+
+ if (vif->cfg.assoc && !force_assoc_off) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ cmd.client.is_assoc = 1;
+
+ if (!mvmvif->authorized &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO))
+ cmd.client.data_policy |=
+ cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE);
+
+ } else {
+ cmd.client.is_assoc = 0;
+
+ /* Allow beacons to pass through as long as we are not
+ * associated, or we do not have dtim period information.
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+ }
+
+ cmd.client.assoc_id = cpu_to_le16(vif->cfg.aid);
+ if (ieee80211_vif_is_mld(vif)) {
+ esr_transition_timeout =
+ u16_get_bits(vif->cfg.eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ cmd.client.esr_transition_timeout =
+ min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU,
+ esr_transition_timeout);
+ cmd.client.medium_sync_delay =
+ cpu_to_le16(vif->cfg.eml_med_sync_delay);
+ }
+
+ if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
+ cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+ if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax)
+ cmd.client.data_policy |=
+ cpu_to_le16(iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(mvm, vif));
+
+ return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_config_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+ iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+
+ return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mld_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_config_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+
+ return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_config_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+ iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ cmd.p2p_dev.is_disc_extended =
+ iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
+
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
+
+ return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mld_mac_ctxt_cmd_ap_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_config_cmd cmd = {};
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+ /* Fill the common data for all mac context types */
+ iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+ iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(mvm, mvmvif,
+ &cmd.filter_flags,
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ,
+ MAC_CFG_FILTER_ACCEPT_BEACON);
+
+ return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mld_mac_ctx_send(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 action, bool force_assoc_off)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ return iwl_mvm_mld_mac_ctxt_cmd_sta(mvm, vif, action,
+ force_assoc_off);
+ case NL80211_IFTYPE_AP:
+ return iwl_mvm_mld_mac_ctxt_cmd_ap_go(mvm, vif, action);
+ case NL80211_IFTYPE_MONITOR:
+ return iwl_mvm_mld_mac_ctxt_cmd_listener(mvm, vif, action);
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return iwl_mvm_mld_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+ case NL80211_IFTYPE_ADHOC:
+ return iwl_mvm_mld_mac_ctxt_cmd_ibss(mvm, vif, action);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ ret = iwl_mvm_mld_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+ true);
+ if (ret)
+ return ret;
+
+ /* will only do anything at resume from D3 time */
+ iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
+ mvmvif->uploaded = true;
+ return 0;
+}
+
+int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool force_assoc_off)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ return iwl_mvm_mld_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+ force_assoc_off);
+}
+
+int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_config_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .id_and_color = cpu_to_le32(mvmvif->id),
+ };
+ int ret;
+
+ if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+ vif->addr, ieee80211_vif_type_p2p(vif)))
+ return -EIO;
+
+ ret = iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ mvmvif->uploaded = false;
+
+ return 0;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c
new file mode 100644
index 000000000000..bf24f8cb673e
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c
@@ -0,0 +1,1422 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022-2025 Intel Corporation
+ */
+#include "mvm.h"
+
+static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+ int i;
+
+ guard(mvm)(mvm);
+
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
+ mvmvif->mvm = mvm;
+
+ vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+
+ /* Not much to do here. The stack will not allow interface
+ * types or combinations that we didn't advertise, so we
+ * don't really have to check the types.
+ */
+
+ /* make sure that beacon statistics don't go backwards with FW reset */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ mvmvif->link[i]->beacon_stats.accu_num_beacons +=
+ mvmvif->link[i]->beacon_stats.num_beacons;
+
+ /* Allocate resources for the MAC context, and add it to the fw */
+ ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+ if (ret)
+ return ret;
+
+ rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
+
+ mvmvif->features |= hw->netdev_features;
+
+ /* reset deflink MLO parameters */
+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->deflink.active = 0;
+
+ ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif);
+ if (ret)
+ return ret;
+
+ /* beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ if (ret)
+ goto out_remove_mac;
+
+ if (!mvm->bf_allowed_vif &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+ mvm->bf_allowed_vif = mvmvif;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+ /* We want link[0] to point to the default link, unless we have MLO and
+ * in this case this will be modified later by .change_vif_links()
+ * If we are in the restart flow with an MLD connection, we will wait
+ * to .change_vif_links() to setup the links.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ !ieee80211_vif_is_mld(vif)) {
+ mvmvif->link[0] = &mvmvif->deflink;
+
+ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out_free_bf;
+ }
+
+ /* Save a pointer to p2p device vif, so it can later be used to
+ * update the p2p device MAC when a GO is started/stopped
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mvm->p2p_device_vif = vif;
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ goto out_free_bf;
+
+ iwl_mvm_tcm_add_vif(mvm, vif);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvm->monitor_on = true;
+ ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
+ }
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !mvm->csme_vif && mvm->mei_registered) {
+ iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr);
+ iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev);
+ mvm->csme_vif = vif;
+ }
+
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
+ return 0;
+
+ out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+ out_remove_mac:
+ mvmvif->link[0] = NULL;
+ iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
+ return ret;
+}
+
+static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_probe_resp_data *probe_data;
+
+ iwl_mvm_prepare_mac_removal(mvm, vif);
+
+ if (!(vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC))
+ iwl_mvm_tcm_rm_vif(mvm, vif);
+
+ guard(mvm)(mvm);
+
+ if (vif == mvm->csme_vif) {
+ iwl_mei_set_netdev(NULL);
+ mvm->csme_vif = NULL;
+ }
+
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
+
+ if (vif->bss_conf.ftm_responder)
+ memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));
+
+ iwl_mvm_vif_dbgfs_rm_link(mvm, vif);
+
+ iwl_mvm_power_update_mac(mvm);
+
+ /* Before the interface removal, mac80211 would cancel the ROC, and the
+ * ROC worker would be scheduled if needed. The worker would be flushed
+ * in iwl_mvm_prepare_mac_removal() and thus at this point the link is
+ * not active. So need only to remove the link.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (mvmvif->deflink.phy_ctxt) {
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+ mvmvif->deflink.phy_ctxt = NULL;
+ }
+ mvm->p2p_device_vif = NULL;
+ iwl_mvm_remove_link(mvm, vif, &vif->bss_conf);
+ } else {
+ iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+ }
+
+ iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
+
+ RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
+
+ probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
+ lockdep_is_held(&mvm->mutex));
+ RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mvm->monitor_on = false;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
+ }
+}
+
+static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
+{
+ unsigned int n_active = 0;
+ int i;
+
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
+ n_active++;
+ }
+
+ return n_active;
+}
+
+static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id, ret = 0;
+
+ mvmvif->esr_active = true;
+
+ /* Indicate to mac80211 that EML is enabled */
+ vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
+
+ iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW,
+ IEEE80211_SMPS_OFF);
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
+
+ if (!link->phy_ctxt)
+ continue;
+
+ ret = iwl_mvm_phy_send_rlc(mvm, link->phy_ctxt, 2, 2);
+ if (ret)
+ break;
+
+ link->phy_ctxt->rlc_disabled = true;
+ }
+
+ if (vif->active_links == mvmvif->link_selection_res &&
+ !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary))))
+ mvmvif->primary_link = mvmvif->link_selection_primary;
+ else
+ mvmvif->primary_link = __ffs(vif->active_links);
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
+ NULL);
+
+ return ret;
+}
+
+static int
+__iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx)
+{
+ u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
+ unsigned int link_id = link_conf->link_id;
+ int ret;
+
+ if (WARN_ON_ONCE(!mvmvif->link[link_id]))
+ return -EINVAL;
+
+ /* if the assigned one was not counted yet, count it now */
+ if (!mvmvif->link[link_id]->phy_ctxt)
+ n_active++;
+
+ /* mac parameters such as HE support can change at this stage
+ * For sta, need first to configure correct state from drv_sta_state
+ * and only after that update mac config.
+ */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
+ if (ret) {
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+ return -EINVAL;
+ }
+ }
+
+ mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
+
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
+ mvmvif->link[link_id]->listen_lmac = true;
+ ret = iwl_mvm_esr_mode_active(mvm, vif);
+ if (ret) {
+ IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
+ goto out;
+ }
+ }
+
+ if (switching_chanctx) {
+ /* reactivate if we turned this off during channel switch */
+ if (vif->type == NL80211_IFTYPE_AP)
+ mvmvif->ap_ibss_active = true;
+ }
+
+ /* send it first with phy context ID */
+ ret = iwl_mvm_link_changed(mvm, vif, link_conf, 0, false);
+ if (ret)
+ goto out;
+
+ /*
+ * if link switching (link not active yet) we'll activate it in
+ * firmware later on link-info change, which mac80211 guarantees
+ * for link switch after the stations are set up
+ */
+ if (ieee80211_vif_link_active(vif, link_conf->link_id)) {
+ ret = iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_RATES_INFO,
+ true);
+ if (ret)
+ goto out;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+
+ /*
+ * Power state must be updated before quotas,
+ * otherwise fw will complain.
+ */
+ iwl_mvm_power_update_mac(mvm);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = iwl_mvm_mld_add_snif_sta(mvm, vif, link_conf);
+ if (ret)
+ goto deactivate;
+ }
+
+ return 0;
+
+deactivate:
+ iwl_mvm_link_changed(mvm, vif, link_conf, LINK_CONTEXT_MODIFY_ACTIVE,
+ false);
+out:
+ mvmvif->link[link_id]->phy_ctxt = NULL;
+ iwl_mvm_power_update_mac(mvm);
+ return ret;
+}
+
+static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ int ret;
+
+ ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
+ true);
+ /*
+ * Don't activate this link if failed to exit EMLSR in
+ * the BSS interface
+ */
+ if (ret)
+ return ret;
+ }
+
+ guard(mvm)(mvm);
+ return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
+}
+
+static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ int link_id, ret = 0;
+
+ mvmvif->esr_active = false;
+
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+ iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW,
+ IEEE80211_SMPS_AUTOMATIC);
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 static_chains, dynamic_chains;
+
+ mvmvif->link[link_id]->listen_lmac = false;
+
+ rcu_read_lock();
+
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+
+ if (!chanctx_conf || !phy_ctxt) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ phy_ctxt->rlc_disabled = false;
+ static_chains = chanctx_conf->rx_chains_static;
+ dynamic_chains = chanctx_conf->rx_chains_dynamic;
+
+ rcu_read_unlock();
+
+ ret = iwl_mvm_phy_send_rlc(mvm, phy_ctxt, static_chains,
+ dynamic_chains);
+ if (ret)
+ break;
+ }
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_ESR_LINK_DOWN,
+ NULL);
+
+ return ret;
+}
+
+static void
+__iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx)
+
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
+ unsigned int link_id = link_conf->link_id;
+
+ /* shouldn't happen, but verify link_id is valid before accessing */
+ if (WARN_ON_ONCE(!mvmvif->link[link_id]))
+ return;
+
+ if (vif->type == NL80211_IFTYPE_AP && switching_chanctx) {
+ mvmvif->csa_countdown = false;
+
+ /* Set CS bit on all the stations */
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
+
+ /* Save blocked iface, the timeout is set on the next beacon */
+ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
+
+ mvmvif->ap_ibss_active = false;
+ }
+
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE, false);
+
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
+ int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
+
+ if (ret)
+ IWL_ERR(mvm, "failed to deactivate ESR mode (%d)\n",
+ ret);
+ }
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ iwl_mvm_mld_rm_snif_sta(mvm, vif);
+
+ if (switching_chanctx)
+ return;
+ mvmvif->link[link_id]->phy_ctxt = NULL;
+ iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ mutex_lock(&mvm->mutex);
+ __iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
+ /* in the non-MLD case, remove/re-add the link to clean up FW state */
+ if (!ieee80211_vif_is_mld(vif) && !mvmvif->ap_sta &&
+ !WARN_ON_ONCE(vif->cfg.assoc)) {
+ iwl_mvm_remove_link(mvm, vif, link_conf);
+ iwl_mvm_add_link(mvm, vif, link_conf);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
+}
+
+static void
+iwl_mvm_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+ const struct ieee80211_bss_conf *bss_info)
+{
+ u8 i;
+
+ /*
+ * NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+ * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+ */
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.psd_local[0].power));
+
+ /* if not valid, mac80211 puts default (max value) */
+ for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+ cmd->psd_pwr[i] = min(bss_info->tpe.psd_local[0].power[i],
+ bss_info->tpe.psd_reg_client[0].power[i]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.max_local[0].power));
+
+ for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+ cmd->eirp_pwr[i] = min(bss_info->tpe.max_local[0].power[i],
+ bss_info->tpe.max_reg_client[0].power[i]);
+}
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap)
+{
+ struct iwl_txpower_constraints_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[bss_conf->link_id];
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD);
+ u32 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return;
+
+ if (!link_info->active ||
+ link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
+ return;
+
+ if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+ return;
+
+ cmd.link_id = cpu_to_le16(link_info->fw_link_id);
+ memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+ memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+ if (is_ap) {
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ } else if (bss_conf->power_type == IEEE80211_REG_UNSET_AP) {
+ return;
+ } else {
+ cmd.ap_type = cpu_to_le16(bss_conf->power_type - 1);
+ iwl_mvm_tpe_sta_cmd_data(&cmd, bss_conf);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP,
+ AP_TX_POWER_CONSTRAINTS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+ ret);
+}
+
+static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ guard(mvm)(mvm);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf, true);
+
+ /* Send the beacon template */
+ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
+ if (ret)
+ return ret;
+
+ /* the link should be already activated when assigning chan context */
+ ret = iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ALL &
+ ~LINK_CONTEXT_MODIFY_ACTIVE,
+ true);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf);
+ if (ret)
+ return ret;
+
+ /* Send the bcast station. At this stage the TBTT and DTIM time
+ * events are added and applied to the scheduler
+ */
+ ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, link_conf);
+ if (ret)
+ goto out_rm_mcast;
+
+ if (iwl_mvm_start_ap_ibss_common(hw, vif, &ret))
+ goto out_failed;
+
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mld_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ /* we don't support TDLS during DCM */
+ if (iwl_mvm_phy_ctx_count(mvm) > 1)
+ iwl_mvm_teardown_tdls_peers(mvm);
+
+ iwl_mvm_ftm_restart_responder(mvm, vif, link_conf);
+
+ return 0;
+
+out_failed:
+ iwl_mvm_power_update_mac(mvm);
+ mvmvif->ap_ibss_active = false;
+ iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf);
+out_rm_mcast:
+ iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
+ return ret;
+}
+
+static int iwl_mvm_mld_start_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return iwl_mvm_mld_start_ap_ibss(hw, vif, link_conf);
+}
+
+static int iwl_mvm_mld_start_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mvm_mld_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ iwl_mvm_stop_ap_ibss_common(mvm, vif);
+
+ /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+ if (vif->p2p && mvm->p2p_device_vif)
+ iwl_mvm_mld_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false);
+
+ iwl_mvm_ftm_responder_clear(mvm, vif);
+
+ iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf);
+ iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
+
+ iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ iwl_mvm_mld_stop_ap_ibss(hw, vif, link_conf);
+}
+
+static void iwl_mvm_mld_stop_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ iwl_mvm_mld_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ static const struct iwl_mvm_sta_state_ops callbacks = {
+ .add_sta = iwl_mvm_mld_add_sta,
+ .update_sta = iwl_mvm_mld_update_sta,
+ .rm_sta = iwl_mvm_mld_rm_sta,
+ .mac_ctxt_changed = iwl_mvm_mld_mac_ctxt_changed,
+ };
+
+ return iwl_mvm_mac_sta_state_common(hw, vif, sta, old_state, new_state,
+ &callbacks);
+}
+
+static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_bss_conf *other_link;
+ int link_id;
+
+ /* Exit EMLSR if links don't have equal bandwidths */
+ for_each_vif_active_link(vif, other_link, link_id) {
+ if (link_id == link_conf->link_id)
+ continue;
+ if (link_conf->chanreq.oper.width ==
+ other_link->chanreq.oper.width)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool has_he, has_eht;
+ u32 link_changes = 0;
+ int ret;
+
+ if (WARN_ON_ONCE(!mvmvif->link[link_conf->link_id]))
+ return;
+
+ /* not yet marked active in vif means during link switch */
+ if (!ieee80211_vif_link_active(vif, link_conf->link_id) &&
+ vif->cfg.assoc && mvmvif->link[link_conf->link_id]->phy_ctxt)
+ link_changes |= LINK_CONTEXT_MODIFY_ACTIVE;
+
+ has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax;
+ has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be;
+
+ /* Update EDCA params */
+ if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos)
+ link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+ if (changes & BSS_CHANGED_ERP_SLOT)
+ link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+ if (vif->cfg.assoc && (has_he || has_eht)) {
+ IWL_DEBUG_MAC80211(mvm, "Associated in HE mode\n");
+ link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+ }
+
+ if ((changes & BSS_CHANGED_BANDWIDTH) &&
+ ieee80211_vif_link_active(vif, link_conf->link_id) &&
+ mvmvif->esr_active &&
+ !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_BANDWIDTH,
+ iwl_mvm_get_primary_link(vif));
+
+ /* if associated, maybe puncturing changed - we'll check later */
+ if (vif->cfg.assoc)
+ link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
+
+ if (link_changes) {
+ ret = iwl_mvm_link_changed(mvm, vif, link_conf, link_changes,
+ true);
+ if (ret)
+ IWL_ERR(mvm, "failed to update link\n");
+ }
+
+ ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid,
+ ETH_ALEN);
+
+ iwl_mvm_bss_info_changed_station_common(mvm, vif, link_conf, changes);
+}
+
+static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif)
+{
+ int i;
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ if (mvmvif->link[i]->ap_sta_id != IWL_INVALID_STA)
+ return true;
+ }
+
+ return false;
+}
+
+static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ bool protect = false;
+ unsigned int i;
+ int ret;
+
+ /* This might get called without active links during the
+ * chanctx switch, but we don't care about it anyway.
+ */
+ if (changes == BSS_CHANGED_IDLE)
+ return;
+
+ ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
+ if (ret)
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ mvmvif->associated = vif->cfg.assoc;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
+ /*
+ * Clear statistics to get clean beacon counter, and ask for
+ * periodic statistics, as they are needed for link
+ * selection and RX OMI decisions.
+ */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_request_periodic_system_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* We are in assoc so only one link is active-
+ * The association link
+ */
+ unsigned int link_id =
+ ffs(vif->active_links) - 1;
+
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0, link_id);
+ }
+
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
+
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
+ */
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+ }
+
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ }
+
+ if (changes & BSS_CHANGED_PS) {
+ iwl_mvm_smps_workaround(mvm, vif, false);
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+
+ if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
+ ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
+}
+
+static void
+iwl_mvm_mld_link_info_changed_ap_ibss(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 link_changes = LINK_CONTEXT_MODIFY_PROTECT_FLAGS |
+ LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+ /* Changes will be applied when the AP/IBSS is started */
+ if (!mvmvif->ap_ibss_active)
+ return;
+
+ if (link_conf->he_support)
+ link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+
+ if (changes & BSS_CHANGED_ERP_SLOT)
+ link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_SLOT |
+ BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS |
+ BSS_CHANGED_HE_BSS_COLOR) &&
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ link_changes, true))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+ /* Need to send a new beacon template to the FW */
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
+
+ /* FIXME: need to decide if we need FTM responder per link */
+ if (changes & BSS_CHANGED_FTM_RESPONDER) {
+ int ret = iwl_mvm_ftm_start_responder(mvm, vif, link_conf);
+
+ if (ret)
+ IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n",
+ ret);
+ }
+}
+
+static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mvm_mld_link_info_changed_station(mvm, vif, link_conf,
+ changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mvm_mld_link_info_changed_ap_ibss(mvm, vif, link_conf,
+ changes);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mvm_update_mu_groups(mvm, vif);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n",
+ link_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, link_conf, link_conf->txpower);
+ }
+}
+
+static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changes)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+
+ if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes);
+}
+
+static int
+iwl_mvm_mld_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ static const struct iwl_mvm_switch_vif_chanctx_ops ops = {
+ .__assign_vif_chanctx = __iwl_mvm_mld_assign_vif_chanctx,
+ .__unassign_vif_chanctx = __iwl_mvm_mld_unassign_vif_chanctx,
+ };
+
+ return iwl_mvm_switch_vif_chanctx_common(hw, vifs, n_vifs, mode, &ops);
+}
+
+static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int filter_flags,
+ unsigned int changed_flags)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* We support only filter for probe requests */
+ if (!(changed_flags & FIF_PROBE_REQ))
+ return;
+
+ /* Supported only for p2p client interfaces */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc ||
+ !vif->p2p)
+ return;
+
+ guard(mvm)(mvm);
+ iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
+}
+
+static int
+iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
+
+ if (!mvm_link)
+ return -EINVAL;
+
+ mvm_link->queue_params[ac] = *params;
+
+ /* No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ guard(mvm)(mvm);
+ return iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_QOS_PARAMS,
+ true);
+ }
+ return 0;
+}
+
+static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* The PHY context ID might have changed so need to set it */
+ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
+ if (WARN(ret, "Failed to set PHY context ID\n"))
+ return ret;
+
+ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_RATES_INFO,
+ true);
+
+ if (WARN(ret, "Failed linking P2P_DEVICE\n"))
+ return ret;
+
+ /* The station and queue allocation must be done only after the linking
+ * is done, as otherwise the FW might incorrectly configure its state.
+ */
+ return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
+}
+
+static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type)
+{
+ static const struct iwl_mvm_roc_ops ops = {
+ .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta,
+ .link = iwl_mvm_mld_roc_link,
+ };
+
+ return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
+}
+
+static int
+iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u16 removed = old_links & ~new_links;
+ u16 added = new_links & ~old_links;
+ int err, i;
+
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ break;
+
+ if (!(added & BIT(i)))
+ continue;
+ new_link[i] = kzalloc(sizeof(*new_link[i]), GFP_KERNEL);
+ if (!new_link[i]) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ iwl_mvm_init_link(new_link[i]);
+ }
+
+ mutex_lock(&mvm->mutex);
+
+ /* If we're in RESTART flow, the default link wasn't added in
+ * drv_add_interface(), and link[0] doesn't point to it.
+ */
+ if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status)) {
+ err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
+ if (err)
+ goto out_err;
+ mvmvif->link[0] = NULL;
+ }
+
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (removed & BIT(i)) {
+ struct ieee80211_bss_conf *link_conf = old[i];
+
+ err = iwl_mvm_disable_link(mvm, vif, link_conf);
+ if (err)
+ goto out_err;
+ kfree(mvmvif->link[i]);
+ mvmvif->link[i] = NULL;
+ } else if (added & BIT(i)) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = link_conf_dereference_protected(vif, i);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status))
+ mvmvif->link[i] = new_link[i];
+ new_link[i] = NULL;
+ err = iwl_mvm_add_link(mvm, vif, link_conf);
+ if (err)
+ goto out_err;
+ }
+ }
+
+ err = 0;
+ if (new_links == 0) {
+ mvmvif->link[0] = &mvmvif->deflink;
+ err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (err == 0)
+ mvmvif->primary_link = 0;
+ } else if (!(new_links & BIT(mvmvif->primary_link))) {
+ /*
+ * Ensure we always have a valid primary_link, the real
+ * decision happens later when PHY is activated.
+ */
+ mvmvif->primary_link = __ffs(new_links);
+ }
+
+out_err:
+ /* we really don't have a good way to roll back here ... */
+ mutex_unlock(&mvm->mutex);
+
+free:
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++)
+ kfree(new_link[i]);
+ return err;
+}
+
+static int
+iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
+}
+
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ const struct wiphy_iftype_ext_capab *ext_capa;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
+ hweight16(ieee80211_vif_usable_links(vif)) == 1)
+ return false;
+
+ if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
+ ieee80211_vif_type_p2p(vif));
+ return (ext_capa &&
+ (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP));
+}
+
+static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int n_links = hweight16(desired_links);
+
+ if (n_links <= 1)
+ return true;
+
+ guard(mvm)(mvm);
+
+ /* Check if HW supports the wanted number of links */
+ if (n_links > iwl_mvm_max_active_links(mvm, vif))
+ return false;
+
+ /* If it is an eSR device, check that we can enter eSR */
+ return iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
+ iwl_mvm_vif_has_esr_cap(mvm, vif);
+}
+
+static enum ieee80211_neg_ttlm_res
+iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+ u8 i;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
+static int
+iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ if (mvmvif->esr_active) {
+ u8 primary = iwl_mvm_get_primary_link(vif);
+ int selected;
+
+ /* prefer primary unless quiet CSA on it */
+ if (chsw->link_id == primary && chsw->block_tx)
+ selected = iwl_mvm_get_other_link(vif, primary);
+ else
+ selected = primary;
+
+ /*
+ * remembers to tell the firmware that this link can't tx
+ * Note that this logic seems to be unrelated to esr, but it
+ * really is needed only when esr is active. When we have a
+ * single link, the firmware will handle all this on its own.
+ * In multi-link scenarios, we can learn about the CSA from
+ * another link and this logic is too complex for the firmware
+ * to track.
+ * Since we want to de-activate the link that got a CSA, we
+ * need to tell the firmware not to send any frame on that link
+ * as the firmware may not be aware that link is under a CSA
+ * with mode=1 (no Tx allowed).
+ */
+ if (chsw->block_tx && mvmvif->link[chsw->link_id])
+ mvmvif->link[chsw->link_id]->csa_block_tx = true;
+
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * If we've not kept the link active that's doing the CSA
+ * then we don't need to do anything else, just return.
+ */
+ if (selected != chsw->link_id)
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ }
+
+ ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+#define IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT (5 * HZ)
+
+static void iwl_mvm_mld_prep_add_interface(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_vif *mvmvif;
+ int ret;
+
+ IWL_DEBUG_MAC80211(mvm, "prep_add_interface: type=%u\n",
+ type);
+
+ if (IS_ERR_OR_NULL(bss_vif) ||
+ !(type == NL80211_IFTYPE_AP ||
+ type == NL80211_IFTYPE_P2P_GO ||
+ type == NL80211_IFTYPE_P2P_CLIENT))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
+ ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
+ if (ret)
+ return;
+
+ wiphy_delayed_work_queue(mvmvif->mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tmp_non_bss_wk,
+ IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT);
+}
+
+const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
+ .tx = iwl_mvm_mac_tx,
+ .wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
+ .ampdu_action = iwl_mvm_mac_ampdu_action,
+ .get_antenna = iwl_mvm_op_get_antenna,
+ .set_antenna = iwl_mvm_op_set_antenna,
+ .start = iwl_mvm_mac_start,
+ .reconfig_complete = iwl_mvm_mac_reconfig_complete,
+ .stop = iwl_mvm_mac_stop,
+ .add_interface = iwl_mvm_mld_mac_add_interface,
+ .remove_interface = iwl_mvm_mld_mac_remove_interface,
+ .config = iwl_mvm_mac_config,
+ .prepare_multicast = iwl_mvm_prepare_multicast,
+ .configure_filter = iwl_mvm_configure_filter,
+ .config_iface_filter = iwl_mvm_mld_config_iface_filter,
+ .link_info_changed = iwl_mvm_mld_link_info_changed,
+ .vif_cfg_changed = iwl_mvm_mld_vif_cfg_changed,
+ .hw_scan = iwl_mvm_mac_hw_scan,
+ .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+ .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
+ .sta_state = iwl_mvm_mld_mac_sta_state,
+ .sta_notify = iwl_mvm_mac_sta_notify,
+ .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
+ .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+ .link_sta_rc_update = iwl_mvm_sta_rc_update,
+ .conf_tx = iwl_mvm_mld_mac_conf_tx,
+ .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx,
+ .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
+ .flush = iwl_mvm_mac_flush,
+ .flush_sta = iwl_mvm_mac_flush_sta,
+ .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+ .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
+ .set_key = iwl_mvm_mac_set_key,
+ .update_tkip_key = iwl_mvm_mac_update_tkip_key,
+ .remain_on_channel = iwl_mvm_mld_roc,
+ .cancel_remain_on_channel = iwl_mvm_cancel_roc,
+ .add_chanctx = iwl_mvm_add_chanctx,
+ .remove_chanctx = iwl_mvm_remove_chanctx,
+ .change_chanctx = iwl_mvm_change_chanctx,
+ .assign_vif_chanctx = iwl_mvm_mld_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mvm_mld_unassign_vif_chanctx,
+ .switch_vif_chanctx = iwl_mvm_mld_switch_vif_chanctx,
+
+ .start_ap = iwl_mvm_mld_start_ap,
+ .stop_ap = iwl_mvm_mld_stop_ap,
+ .join_ibss = iwl_mvm_mld_start_ibss,
+ .leave_ibss = iwl_mvm_mld_stop_ibss,
+
+ .tx_last_beacon = iwl_mvm_tx_last_beacon,
+
+ .channel_switch = iwl_mvm_channel_switch,
+ .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
+ .post_channel_switch = iwl_mvm_post_channel_switch,
+ .abort_channel_switch = iwl_mvm_abort_channel_switch,
+ .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
+
+ .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
+ .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
+ .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
+
+ .event_callback = iwl_mvm_mac_event_callback,
+
+ .sync_rx_queues = iwl_mvm_sync_rx_queues,
+
+#ifdef CONFIG_PM_SLEEP
+ /* look at d3.c */
+ .suspend = iwl_mvm_suspend,
+ .resume = iwl_mvm_resume,
+ .set_wakeup = iwl_mvm_set_wakeup,
+ .set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+ .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+ .get_survey = iwl_mvm_mac_get_survey,
+ .sta_statistics = iwl_mvm_mac_sta_statistics,
+ .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats,
+ .start_pmsr = iwl_mvm_start_pmsr,
+ .abort_pmsr = iwl_mvm_abort_pmsr,
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .vif_add_debugfs = iwl_mvm_vif_add_debugfs,
+ .link_add_debugfs = iwl_mvm_link_add_debugfs,
+ .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs,
+#endif
+ .set_hw_timestamp = iwl_mvm_set_hw_timestamp,
+
+ .change_vif_links = iwl_mvm_mld_change_vif_links,
+ .change_sta_links = iwl_mvm_mld_change_sta_links,
+ .can_activate_links = iwl_mvm_mld_can_activate_links,
+ .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
+ .prep_add_interface = iwl_mvm_mld_prep_add_interface,
+};
diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-sta.c b/sys/contrib/dev/iwlwifi/mvm/mld-sta.c
new file mode 100644
index 000000000000..e1010521c3ea
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mld-sta.c
@@ -0,0 +1,1228 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022-2025 Intel Corporation
+ */
+#include "mvm.h"
+#include "time-sync.h"
+#include "sta.h"
+
+u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int filter_link_id)
+{
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ u32 result = 0;
+
+ if (!sta)
+ return 0;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+
+ /* it's easy when the STA is not an MLD */
+ if (!sta->valid_links)
+ return BIT(mvmsta->deflink.sta_id);
+
+ /* but if it is an MLD, get the mask of all the FW STAs it has ... */
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ /* unless we have a specific link in mind */
+ if (filter_link_id >= 0 && link_id != filter_link_id)
+ continue;
+
+ mvm_link_sta =
+ rcu_dereference_check(mvmsta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!mvm_link_sta)
+ continue;
+
+ result |= BIT(mvm_link_sta->sta_id);
+ }
+
+ return result;
+}
+
+static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
+ struct iwl_sta_cfg_cmd *cmd)
+{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
+ int cmd_len = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) > 1 ?
+ sizeof(*cmd) :
+ sizeof(struct iwl_sta_cfg_cmd_v1);
+ int ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+ 0, cmd_len, cmd);
+ if (ret)
+ IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
+ return ret;
+}
+
+/*
+ * Add an internal station to the FW table
+ */
+static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr, int link_id)
+{
+ struct iwl_sta_cfg_cmd cmd;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = cpu_to_le32((u8)sta->sta_id);
+
+ cmd.link_id = cpu_to_le32(link_id);
+
+ cmd.station_type = cpu_to_le32(sta->type);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) &&
+ sta->type == STATION_TYPE_BCAST_MGMT)
+ cmd.mfp = cpu_to_le32(1);
+
+ if (addr) {
+ memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
+ memcpy(cmd.peer_link_address, addr, ETH_ALEN);
+ }
+
+ return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id)
+{
+ struct iwl_remove_sta_cmd rm_sta_cmd = {
+ .sta_id = cpu_to_le32(sta_id),
+ };
+ int ret;
+
+ /* Note: internal stations are marked as error values */
+ if (!rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) {
+ IWL_ERR(mvm, "Invalid station id %d\n", sta_id);
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
+ 0, sizeof(rm_sta_cmd), &rm_sta_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ u32 lmac_id)
+{
+ int ret;
+
+ struct iwl_aux_sta_cmd cmd = {
+ .sta_id = cpu_to_le32(sta->sta_id),
+ .lmac_id = cpu_to_le32(lmac_id),
+ };
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send AUX_STA_CMD\n");
+ return ret;
+}
+
+/*
+ * Adds an internal sta to the FW table with its queues
+ */
+int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr, int link_id,
+ u16 *queue, u8 tid,
+ unsigned int *_wdg_timeout)
+{
+ int ret, txq;
+ unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout :
+ mvm->trans->mac_cfg->base->wd_timeout;
+
+ if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
+ return -ENOSPC;
+
+ if (sta->type == STATION_TYPE_AUX)
+ ret = iwl_mvm_add_aux_sta_to_fw(mvm, sta, link_id);
+ else
+ ret = iwl_mvm_mld_add_int_sta_to_fw(mvm, sta, addr, link_id);
+ if (ret)
+ return ret;
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ txq = iwl_mvm_tvqm_enable_txq(mvm, NULL, sta->sta_id, tid,
+ wdg_timeout);
+ if (txq < 0) {
+ iwl_mvm_mld_rm_sta_from_fw(mvm, sta->sta_id);
+ return txq;
+ }
+ *queue = txq;
+
+ return 0;
+}
+
+/*
+ * Adds a new int sta: allocate it in the driver, add it to the FW table,
+ * and add its queues.
+ */
+static int iwl_mvm_mld_add_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *int_sta, u16 *queue,
+ enum nl80211_iftype iftype,
+ enum iwl_fw_sta_type sta_type,
+ int link_id, const u8 *addr, u8 tid,
+ unsigned int *wdg_timeout)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* qmask argument is not used in the new tx api, send a don't care */
+ ret = iwl_mvm_allocate_int_sta(mvm, int_sta, 0, iftype,
+ sta_type);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, int_sta, addr, link_id,
+ queue, tid, wdg_timeout);
+ if (ret) {
+ iwl_mvm_dealloc_int_sta(mvm, int_sta);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ */
+int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link =
+ mvmvif->link[link_conf->link_id];
+ struct iwl_mvm_int_sta *bsta = &mvm_link->bcast_sta;
+ static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ const u8 *baddr = _baddr;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif);
+ u16 *queue;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ baddr = link_conf->bssid;
+
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ queue = &mvm_link->mgmt_queue;
+ } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ queue = &mvm->p2p_dev_queue;
+ } else {
+ WARN(1, "Missing required TXQ for adding bcast STA\n");
+ return -EINVAL;
+ }
+
+ return iwl_mvm_mld_add_int_sta(mvm, bsta, queue,
+ ieee80211_vif_type_p2p(vif),
+ STATION_TYPE_BCAST_MGMT,
+ mvm_link->fw_link_id, baddr,
+ IWL_MAX_TID_COUNT, &wdg_timeout);
+}
+
+/* Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ */
+int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link =
+ mvmvif->link[link_conf->link_id];
+ struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta;
+ static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+ const u8 *maddr = _maddr;
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -EOPNOTSUPP;
+
+ /* In IBSS, ieee80211_check_queues() sets the cab_queue to be
+ * invalid, so make sure we use the queue we want.
+ * Note that this is done here as we want to avoid making DQA
+ * changes in mac80211 layer.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ mvm_link->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+
+ return iwl_mvm_mld_add_int_sta(mvm, msta, &mvm_link->cab_queue,
+ vif->type, STATION_TYPE_MCAST,
+ mvm_link->fw_link_id, maddr, 0,
+ &timeout);
+}
+
+/* Allocate a new station entry for the sniffer station to the given vif,
+ * and send it to the FW.
+ */
+int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link =
+ mvmvif->link[link_conf->link_id];
+
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_mld_add_int_sta(mvm, &mvm->snif_sta, &mvm->snif_queue,
+ vif->type, STATION_TYPE_BCAST_MGMT,
+ mvm_link->fw_link_id, NULL,
+ IWL_MAX_TID_COUNT, NULL);
+}
+
+int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* In CDB NICs we need to specify which lmac to use for aux activity;
+ * use the link_id argument place to send lmac_id to the function.
+ */
+ return iwl_mvm_mld_add_int_sta(mvm, &mvm->aux_sta, &mvm->aux_queue,
+ NL80211_IFTYPE_UNSPECIFIED,
+ STATION_TYPE_AUX, lmac_id, NULL,
+ IWL_MAX_TID_COUNT, NULL);
+}
+
+static int iwl_mvm_mld_disable_txq(struct iwl_mvm *mvm, u32 sta_mask,
+ u16 *queueptr, u8 tid)
+{
+ int queue = *queueptr;
+ int ret = 0;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IWL_MGMT_TID;
+
+ if (mvm->sta_remove_requires_queue_remove) {
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD);
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.tid = cpu_to_le32(tid),
+ .u.remove.sta_mask = cpu_to_le32(sta_mask),
+ };
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
+ sizeof(remove_cmd),
+ &remove_cmd);
+ }
+
+ iwl_trans_txq_free(mvm->trans, queue);
+ *queueptr = IWL_MVM_INVALID_QUEUE;
+
+ return ret;
+}
+
+/* Removes a sta from the FW table, disable its queues, and dealloc it
+ */
+static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *int_sta,
+ bool flush, u8 tid, u16 *queuptr)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(int_sta->sta_id == IWL_INVALID_STA))
+ return -EINVAL;
+
+ if (flush)
+ iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
+
+ iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, int_sta->sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ iwl_mvm_dealloc_int_sta(mvm, int_sta);
+
+ return ret;
+}
+
+int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id];
+ u16 *queueptr;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(!link))
+ return -EIO;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ queueptr = &link->mgmt_queue;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ queueptr = &mvm->p2p_dev_queue;
+ break;
+ default:
+ WARN(1, "Can't free bcast queue on vif type %d\n",
+ vif->type);
+ return -EINVAL;
+ }
+
+ return iwl_mvm_mld_rm_int_sta(mvm, &link->bcast_sta,
+ true, IWL_MAX_TID_COUNT, queueptr);
+}
+
+/* Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id];
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(!link))
+ return -EIO;
+
+ return iwl_mvm_mld_rm_int_sta(mvm, &link->mcast_sta, true, 0,
+ &link->cab_queue);
+}
+
+int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_mld_rm_int_sta(mvm, &mvm->snif_sta, false,
+ IWL_MAX_TID_COUNT, &mvm->snif_queue);
+}
+
+int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_mld_rm_int_sta(mvm, &mvm->aux_sta, false,
+ IWL_MAX_TID_COUNT, &mvm->aux_queue);
+}
+
+/* send a cfg sta command to add/update a sta in firmware */
+static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct iwl_mvm_link_sta *mvm_link_sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvm_vif->link[link_conf->link_id];
+ struct iwl_sta_cfg_cmd cmd = {
+ .sta_id = cpu_to_le32(mvm_link_sta->sta_id),
+ .station_type = cpu_to_le32(mvm_sta->sta_type),
+ };
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ /* when adding sta, link should exist in FW */
+ if (WARN_ON(link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
+ return -EINVAL;
+
+ cmd.link_id = cpu_to_le32(link_info->fw_link_id);
+
+ memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
+ memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
+
+ if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
+ cmd.assoc_id = cpu_to_le32(sta->aid);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) &&
+ (sta->mfp || mvm_sta->sta_state < IEEE80211_STA_AUTHORIZED))
+ cmd.mfp = cpu_to_le32(1);
+
+ switch (link_sta->rx_nss) {
+ case 1:
+ cmd.mimo = cpu_to_le32(0);
+ break;
+ case 2 ... 8:
+ cmd.mimo = cpu_to_le32(1);
+ break;
+ }
+
+ switch (link_sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ break;
+ case IEEE80211_SMPS_STATIC:
+ /* override NSS */
+ cmd.mimo = cpu_to_le32(0);
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ cmd.mimo_protection = cpu_to_le32(1);
+ break;
+ case IEEE80211_SMPS_OFF:
+ /* nothing */
+ break;
+ }
+
+ mpdu_dens = iwl_mvm_get_sta_ampdu_dens(link_sta, link_conf, &agg_size);
+ cmd.tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
+ cmd.tx_ampdu_max_size = cpu_to_le32(agg_size);
+
+ if (sta->wme) {
+ cmd.sp_length =
+ cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
+ cmd.uapsd_acs = cpu_to_le32(iwl_mvm_get_sta_uapsd_acs(sta));
+ }
+
+ if (link_sta->he_cap.has_he) {
+ cmd.trig_rnd_alloc =
+ cpu_to_le32(link_conf->uora_exists ? 1 : 0);
+
+ /* PPE Thresholds */
+ iwl_mvm_set_sta_pkt_ext(mvm, link_sta, &cmd.pkt_ext);
+
+ /* HTC flags */
+ cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, link_sta);
+
+ if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN)
+ cmd.ack_enabled = cpu_to_le32(1);
+ }
+
+ return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
+}
+
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id)
+{
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL);
+ RCU_INIT_POINTER(mvm_sta->link[link_id], NULL);
+
+ if (mvm_sta_link != &mvm_sta->deflink)
+ kfree_rcu(mvm_sta_link, rcu_head);
+}
+
+static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ unsigned int link_id;
+
+ for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) {
+ struct iwl_mvm_link_sta *link =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (!link)
+ continue;
+
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id);
+ }
+}
+
+static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ unsigned int link_id)
+{
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_protected(sta, link_id);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_link_sta *link;
+ u32 sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ if (sta_id == IWL_INVALID_STA)
+ return -ENOSPC;
+
+ if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) {
+ link = &mvm_sta->deflink;
+ } else {
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ }
+
+ link->sta_id = sta_id;
+ rcu_assign_pointer(mvm_sta->link[link_id], link);
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[link->sta_id], sta);
+ rcu_assign_pointer(mvm->fw_id_to_link_sta[link->sta_id],
+ link_sta);
+
+ return 0;
+}
+
+/* allocate all the links of a sta, called when the station is first added */
+static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON(mvm_sta->link[link_id]))
+ continue;
+
+ ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta);
+ return ret;
+}
+
+static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta,
+ struct iwl_mvm_vif_link_info *vif_link,
+ struct iwl_mvm_link_sta *sta_link)
+{
+ if (!sta->tdls) {
+ WARN_ON(vif_link->ap_sta_id != IWL_INVALID_STA);
+ vif_link->ap_sta_id = sta_link->sta_id;
+ } else {
+ WARN_ON(vif_link->ap_sta_id == IWL_INVALID_STA);
+ }
+}
+
+static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ /* no active link found */
+ int ret = -EINVAL;
+ int sta_id;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ /* First add an empty station since allocating a queue requires
+ * a valid station. Since we need a link_id to allocate a station,
+ * pick up the first valid one.
+ */
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_vif_link_info *mvm_link;
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (!link_conf)
+ continue;
+
+ mvm_link = mvmvif->link[link_conf->link_id];
+
+ if (!mvm_link || !mvm_link_sta)
+ continue;
+
+ sta_id = mvm_link_sta->sta_id;
+ ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta,
+ link_conf, mvm_link_sta);
+ if (ret)
+ return ret;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+ rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], link_sta);
+ ret = 0;
+ }
+
+ iwl_mvm_realloc_queues_after_restart(mvm, sta);
+
+ return ret;
+}
+
+int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned long link_sta_added_to_fw = 0;
+ struct ieee80211_link_sta *link_sta;
+ int ret = 0;
+ unsigned int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&mvm_sta->lock);
+
+ ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_INVALID_STA,
+ STATION_TYPE_PEER);
+ } else {
+ ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
+ }
+
+ if (ret)
+ goto err;
+
+ /* at this stage sta link pointers are already allocated */
+ ret = iwl_mvm_mld_update_sta(mvm, vif, sta);
+ if (ret)
+ goto err;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON(!link_conf || !mvm_link_sta)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
+ mvm_link_sta);
+ if (ret)
+ goto err;
+
+ link_sta_added_to_fw |= BIT(link_id);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id],
+ mvm_link_sta);
+ }
+ return 0;
+
+err:
+ /* remove all already allocated stations in FW */
+ for_each_set_bit(link_id, &link_sta_added_to_fw,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id);
+ }
+
+ /* free all sta resources in the driver */
+ iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta);
+ return ret;
+}
+
+int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON(!link_conf || !mvm_link_sta))
+ return -EINVAL;
+
+ ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
+ mvm_link_sta);
+
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update sta link %d\n", link_id);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_mld_disable_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ u32 sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ iwl_mvm_mld_disable_txq(mvm, sta_mask,
+ &mvm_sta->tid_data[i].txq_id, i);
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+}
+
+int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* flush its queues here since we are freeing mvm_sta */
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON(!mvm_link_sta))
+ return -EINVAL;
+
+ ret = iwl_mvm_flush_sta_tids(mvm, mvm_link_sta->sta_id,
+ 0xffff);
+ if (ret)
+ return ret;
+ }
+
+ ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+ if (ret)
+ return ret;
+
+ iwl_mvm_mld_disable_sta_queues(mvm, vif, sta);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ iwl_mvm_sta_del(mvm, vif, sta, link_sta);
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id);
+
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id);
+ }
+ kfree(mvm_sta->mpdu_counters);
+ mvm_sta->mpdu_counters = NULL;
+
+ return ret;
+}
+
+int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STA))
+ return 0;
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
+ RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
+ return ret;
+}
+
+void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ bool disable)
+{
+ struct iwl_mvm_sta_disable_tx_cmd cmd;
+ int ret;
+
+ cmd.sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
+ cmd.disable = cpu_to_le32(disable);
+
+ if (WARN_ON(iwl_mvm_has_no_host_disable_tx(mvm)))
+ return;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, STA_DISABLE_TX_CMD),
+ CMD_ASYNC, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Failed to send STA_DISABLE_TX_CMD command (%d)\n",
+ ret);
+}
+
+void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool disable)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvm_sta->lock);
+
+ if (mvm_sta->disable_tx == disable) {
+ spin_unlock_bh(&mvm_sta->lock);
+ return;
+ }
+
+ iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable);
+
+ spin_unlock_bh(&mvm_sta->lock);
+}
+
+void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ bool disable)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ int i;
+
+ rcu_read_lock();
+
+ /* Block/unblock all the stations of the given mvmvif */
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvm_sta->mac_id_n_color !=
+ FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
+ continue;
+
+ iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable);
+ }
+
+ rcu_read_unlock();
+}
+
+static int iwl_mvm_mld_update_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_scd_queue_cfg_cmd cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
+ .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
+ .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd
+ };
+ int tid;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[tid];
+ int txq_id = tid_data->txq_id;
+
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
+ else
+ cmd.u.modify.tid = cpu_to_le32(tid);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
+ .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
+ .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ int baid;
+
+ /* mac80211 will remove sessions later, but we ignore all that */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) {
+ struct iwl_mvm_baid_data *data;
+ int ret;
+
+ data = rcu_dereference_protected(mvm->baid_map[baid],
+ lockdep_is_held(&mvm->mutex));
+ if (!data)
+ continue;
+
+ if (!(data->sta_mask & old_sta_mask))
+ continue;
+
+ WARN_ONCE(data->sta_mask != old_sta_mask,
+ "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
+ baid, old_sta_mask, data->sta_mask);
+
+ cmd.modify.tid = cpu_to_le32(data->tid);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+ sizeof(cmd), &cmd);
+ data->sta_mask = new_sta_mask;
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_mld_update_sta_resources(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ int ret;
+
+ ret = iwl_mvm_mld_update_sta_queues(mvm, sta,
+ old_sta_mask,
+ new_sta_mask);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_mld_update_sta_keys(mvm, vif, sta,
+ old_sta_mask,
+ new_sta_mask);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_mld_update_sta_baids(mvm, old_sta_mask, new_sta_mask);
+}
+
+int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_link_sta *mvm_sta_link;
+ struct iwl_mvm_vif_link_info *mvm_vif_link;
+ unsigned long links_to_add = ~old_links & new_links;
+ unsigned long links_to_rem = old_links & ~new_links;
+ unsigned long old_links_long = old_links;
+ u32 current_sta_mask = 0, sta_mask_added = 0, sta_mask_to_rem = 0;
+ unsigned long link_sta_added_to_fw = 0, link_sta_allocated = 0;
+ unsigned int link_id;
+ int ret;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+ lockdep_assert_held(&mvm->mutex);
+
+ for_each_set_bit(link_id, &old_links_long,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ mvm_sta_link =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON(!mvm_sta_link)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ current_sta_mask |= BIT(mvm_sta_link->sta_id);
+ if (links_to_rem & BIT(link_id))
+ sta_mask_to_rem |= BIT(mvm_sta_link->sta_id);
+ }
+
+ if (sta_mask_to_rem) {
+ ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta,
+ current_sta_mask,
+ current_sta_mask &
+ ~sta_mask_to_rem);
+ if (WARN_ON(ret))
+ goto err;
+
+ current_sta_mask &= ~sta_mask_to_rem;
+ }
+
+ for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mvm_sta_link =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ mvm_vif_link = mvm_vif->link[link_id];
+
+ if (WARN_ON(!mvm_sta_link || !mvm_vif_link)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id);
+ if (WARN_ON(ret))
+ goto err;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ mvm_vif_link->ap_sta_id = IWL_INVALID_STA;
+
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
+ }
+
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_protected(sta, link_id);
+ mvm_vif_link = mvm_vif->link[link_id];
+
+ if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ u32 sta_id;
+
+ if (WARN_ON(!mvm_link_sta)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sta_id = mvm_link_sta->sta_id;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+ rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id],
+ link_sta);
+ } else {
+ if (WARN_ON(mvm_sta->link[link_id])) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta,
+ link_id);
+ if (WARN_ON(ret))
+ goto err;
+ }
+
+ link_sta->agg.max_rc_amsdu_len = 1;
+ ieee80211_sta_recalc_aggregates(sta);
+
+ mvm_sta_link =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON(!mvm_sta_link)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif_link,
+ mvm_sta_link);
+
+ link_sta_allocated |= BIT(link_id);
+
+ sta_mask_added |= BIT(mvm_sta_link->sta_id);
+
+ ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
+ mvm_sta_link);
+ if (WARN_ON(ret))
+ goto err;
+
+ link_sta_added_to_fw |= BIT(link_id);
+
+ iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link);
+
+ iwl_mvm_rs_rate_init(mvm, vif, sta, link_conf, link_sta,
+ link_conf->chanreq.oper.chan->band);
+ }
+
+ if (sta_mask_added) {
+ ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta,
+ current_sta_mask,
+ current_sta_mask |
+ sta_mask_added);
+ if (WARN_ON(ret))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ /* remove all already allocated stations in FW */
+ for_each_set_bit(link_id, &link_sta_added_to_fw,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ mvm_sta_link =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id);
+ }
+
+ /* remove all already allocated station links in driver */
+ for_each_set_bit(link_id, &link_sta_allocated,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ mvm_sta_link =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
+ }
+
+ return ret;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/mvm.h b/sys/contrib/dev/iwlwifi/mvm/mvm.h
new file mode 100644
index 000000000000..41125adf4fd1
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/mvm.h
@@ -0,0 +1,3033 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#ifndef __IWL_MVM_H__
+#define __IWL_MVM_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cleanup.h>
+#ifdef CONFIG_IWLWIFI_LEDS
+#include <linux/leds.h>
+#endif
+#include <linux/in6.h>
+
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+#endif
+
+#include <linux/ptp_clock_kernel.h>
+
+#include <linux/ktime.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+#include "fw/notif-wait.h"
+#include "iwl-nvm-utils.h"
+#include "fw/file.h"
+#include "iwl-config.h"
+#include "sta.h"
+#include "fw-api.h"
+#include "constants.h"
+#include "fw/runtime.h"
+#include "fw/dbg.h"
+#include "fw/acpi.h"
+#include "mei/iwl-mei.h"
+#include "iwl-nvm-parse.h"
+
+#include <linux/average.h>
+#if defined(__FreeBSD__)
+#include <net/if_inet6.h>
+#endif
+
+#define IWL_MVM_MAX_ADDRESSES 5
+/* RSSI offset for WkP */
+#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19
+
+/* A TimeUnit is 1024 microsecond */
+#define MSEC_TO_TU(_msec) (_msec*1000/1024)
+
+/* For GO, this value represents the number of TUs before CSA "beacon
+ * 0" TBTT when the CSA time-event needs to be scheduled to start. It
+ * must be big enough to ensure that we switch in time.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40
+
+/* For client, this value represents the number of TUs before CSA
+ * "beacon 1" TBTT, instead. This is because we don't know when the
+ * GO/AP will be in the new channel, so we switch early enough.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10
+
+/*
+ * This value (in TUs) is used to fine tune the CSA NoA end time which should
+ * be just before "beacon 0" TBTT.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
+
+/*
+ * Number of beacons to transmit on a new channel until we unblock tx to
+ * the stations, even if we didn't identify them on a new channel
+ */
+#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
+
+/* offchannel queue towards mac80211 */
+#define IWL_MVM_OFFCHANNEL_QUEUE 0
+
+/* invalid value for FW link id */
+#define IWL_MVM_FW_LINK_ID_INVALID 0xff
+
+extern const struct ieee80211_ops iwl_mvm_hw_ops;
+extern const struct ieee80211_ops iwl_mvm_mld_hw_ops;
+
+/**
+ * struct iwl_mvm_mod_params - module parameters for iwlmvm
+ * @power_scheme: one of enum iwl_power_scheme
+ */
+struct iwl_mvm_mod_params {
+ int power_scheme;
+};
+extern struct iwl_mvm_mod_params iwlmvm_mod_params;
+
+struct iwl_mvm_phy_ctxt {
+ u16 id;
+ u16 color;
+ u32 ref;
+
+ enum nl80211_chan_width width;
+
+ struct ieee80211_channel *channel;
+
+ /* track for RLC config command */
+ u32 center_freq1;
+ bool rlc_disabled;
+ u32 channel_load_by_us;
+ u32 channel_load_not_by_us;
+};
+
+struct iwl_mvm_time_event_data {
+ struct ieee80211_vif *vif;
+ struct list_head list;
+ unsigned long end_jiffies;
+ u32 duration;
+ bool running;
+ u32 uid;
+
+ /*
+ * The access to the 'id' field must be done when the
+ * mvm->time_event_lock is held, as it value is used to indicate
+ * if the te is in the time event list or not (when id == TE_MAX)
+ */
+ u32 id;
+ s8 link_id;
+};
+
+ /* Power management */
+
+/**
+ * enum iwl_power_scheme - iwl power schemes
+ * @IWL_POWER_SCHEME_CAM: Continuously Active Mode
+ * @IWL_POWER_SCHEME_BPS: Balanced Power Save (default)
+ * @IWL_POWER_SCHEME_LP: Low Power
+ */
+enum iwl_power_scheme {
+ IWL_POWER_SCHEME_CAM = 1,
+ IWL_POWER_SCHEME_BPS,
+ IWL_POWER_SCHEME_LP
+};
+
+#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+enum iwl_dbgfs_pm_mask {
+ MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
+ MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
+ MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
+ MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
+ MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
+ MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
+ MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+ MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+ MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
+ MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10),
+};
+
+struct iwl_dbgfs_pm {
+ u16 keep_alive_seconds;
+ u32 rx_data_timeout;
+ u32 tx_data_timeout;
+ bool skip_over_dtim;
+ u8 skip_dtim_periods;
+ bool lprx_ena;
+ u32 lprx_rssi_threshold;
+ bool snooze_ena;
+ bool uapsd_misbehaving;
+ bool use_ps_poll;
+ int mask;
+};
+
+/* beacon filtering */
+
+enum iwl_dbgfs_bf_mask {
+ MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
+ MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
+ MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
+ MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
+ MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
+ MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
+ MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
+ MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
+ MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
+ MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
+ MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
+};
+
+struct iwl_dbgfs_bf {
+ u32 bf_energy_delta;
+ u32 bf_roaming_energy_delta;
+ u32 bf_roaming_state;
+ u32 bf_temp_threshold;
+ u32 bf_temp_fast_filter;
+ u32 bf_temp_slow_filter;
+ u32 bf_enable_beacon_filter;
+ u32 bf_debug_flag;
+ u32 bf_escape_timer;
+ u32 ba_escape_timer;
+ u32 ba_enable_beacon_abort;
+ int mask;
+};
+#endif
+
+enum iwl_mvm_smps_type_request {
+ IWL_MVM_SMPS_REQ_BT_COEX,
+ IWL_MVM_SMPS_REQ_TT,
+ IWL_MVM_SMPS_REQ_PROT,
+ IWL_MVM_SMPS_REQ_FW,
+ NUM_IWL_MVM_SMPS_REQ,
+};
+
+enum iwl_bt_force_ant_mode {
+ BT_FORCE_ANT_DIS = 0,
+ BT_FORCE_ANT_AUTO,
+ BT_FORCE_ANT_BT,
+ BT_FORCE_ANT_WIFI,
+
+ BT_FORCE_ANT_MAX,
+};
+
+/**
+ * enum iwl_mvm_low_latency_force - low latency force mode set by debugfs
+ * @LOW_LATENCY_FORCE_UNSET: unset force mode
+ * @LOW_LATENCY_FORCE_ON: for low latency on
+ * @LOW_LATENCY_FORCE_OFF: for low latency off
+ * @NUM_LOW_LATENCY_FORCE: max num of modes
+ */
+enum iwl_mvm_low_latency_force {
+ LOW_LATENCY_FORCE_UNSET,
+ LOW_LATENCY_FORCE_ON,
+ LOW_LATENCY_FORCE_OFF,
+ NUM_LOW_LATENCY_FORCE
+};
+
+/**
+* enum iwl_mvm_low_latency_cause - low latency set causes
+* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
+* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
+* @LOW_LATENCY_VCMD: low latency mode set from vendor command
+* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
+* @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled
+* the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE
+* @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs
+* set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag
+* in low_latency.
+*/
+enum iwl_mvm_low_latency_cause {
+ LOW_LATENCY_TRAFFIC = BIT(0),
+ LOW_LATENCY_DEBUGFS = BIT(1),
+ LOW_LATENCY_VCMD = BIT(2),
+ LOW_LATENCY_VIF_TYPE = BIT(3),
+ LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4),
+ LOW_LATENCY_DEBUGFS_FORCE = BIT(5),
+};
+
+/**
+* struct iwl_mvm_link_bf_data - beacon filtering related data
+* @ave_beacon_signal: average beacon signal
+* @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
+*/
+struct iwl_mvm_link_bf_data {
+ int ave_beacon_signal;
+ int last_cqm_event;
+ int bt_coex_min_thold;
+ int bt_coex_max_thold;
+ int last_bt_coex_event;
+};
+
+/**
+ * struct iwl_probe_resp_data - data for NoA/CSA updates
+ * @rcu_head: used for freeing the data on update
+ * @notif: notification data
+ * @noa_len: length of NoA attribute, calculated from the notification
+ */
+struct iwl_probe_resp_data {
+ struct rcu_head rcu_head;
+ struct iwl_probe_resp_data_notif notif;
+ int noa_len;
+};
+
+/**
+ * struct iwl_mvm_vif_link_info - per link data in Virtual Interface
+ * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
+ * @fw_link_id: the id of the link according to the FW API
+ * @bssid: BSSID for this (client) interface
+ * @bcast_sta: station used for broadcast packets. Used by the following
+ * vifs: P2P_DEVICE, GO and AP.
+ * @beacon_stats: beacon statistics, containing the # of received beacons,
+ * # of received beacons accumulated over FW restart, and the current
+ * average signal of beacons retrieved from the firmware
+ * @smps_requests: the SMPS requests of different parts of the driver,
+ * combined on update to yield the overall request to mac80211.
+ * @probe_resp_data: data from FW notification to store NOA and CSA related
+ * data to be inserted into probe response.
+ * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked
+ * @queue_params: QoS params for this MAC
+ * @mgmt_queue: queue number for unbufferable management frames
+ * @igtk: the current IGTK programmed into the firmware
+ * @active: indicates the link is active in FW (for sanity checking)
+ * @cab_queue: content-after-beacon (multicast) queue
+ * @listen_lmac: indicates this link is allocated to the listen LMAC
+ * @csa_block_tx: we got CSA with mode=1
+ * @mcast_sta: multicast station
+ * @phy_ctxt: phy context allocated to this link, if any
+ * @bf_data: beacon filtering data
+ * @average_beacon_energy: average beacon energy for beacons received during
+ * client connections
+ */
+struct iwl_mvm_vif_link_info {
+ u8 bssid[ETH_ALEN];
+ u8 ap_sta_id;
+ u8 fw_link_id;
+
+ struct iwl_mvm_int_sta bcast_sta;
+ struct iwl_mvm_int_sta mcast_sta;
+
+ struct {
+ u32 num_beacons, accu_num_beacons;
+ u8 avg_signal;
+ } beacon_stats;
+
+ enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+ struct iwl_probe_resp_data __rcu *probe_resp_data;
+
+ struct ieee80211_key_conf *igtk;
+
+ bool he_ru_2mhz_block;
+ bool active;
+ bool listen_lmac;
+ bool csa_block_tx;
+
+ u16 cab_queue;
+ /* Assigned while mac80211 has the link in a channel context,
+ * or, for P2P Device, while it exists.
+ */
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ /* QoS data from mac80211, need to store this here
+ * as mac80211 has a separate callback but we need
+ * to have the data for the MAC context
+ */
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+
+ u16 mgmt_queue;
+
+ struct iwl_mvm_link_bf_data bf_data;
+ u32 average_beacon_energy;
+};
+
+/**
+ * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
+ * blocked.
+ * The low 16 bits are used for blocking reasons, and the 16 higher bits
+ * are used for exit reasons.
+ * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
+ * reasons - use iwl_mvm_exit_esr().
+ *
+ * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
+ *
+ * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
+ * in a loop.
+ * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
+ * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
+ * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ * preventing EMLSR
+ * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
+ * @IWL_MVM_ESR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's link
+ * is preventing EMLSR. This is a temporary blocking that is set when there
+ * is an indication that a non-BSS interface is to be added.
+ * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
+ * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
+ * due to low RSSI.
+ * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
+ * due to BT Coex.
+ * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
+ * preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
+ * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
+ */
+enum iwl_mvm_esr_state {
+ IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
+ IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
+ IWL_MVM_ESR_BLOCKED_TPT = 0x4,
+ IWL_MVM_ESR_BLOCKED_FW = 0x8,
+ IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
+ IWL_MVM_ESR_BLOCKED_ROC = 0x20,
+ IWL_MVM_ESR_BLOCKED_TMP_NON_BSS = 0x40,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
+ IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
+ IWL_MVM_ESR_EXIT_COEX = 0x40000,
+ IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
+ IWL_MVM_ESR_EXIT_CSA = 0x100000,
+ IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
+};
+
+#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
+
+/**
+ * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
+ * @reason: The reason for the last exit from EMLSR.
+ * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
+ * @ts: the time stamp of the last time we existed EMLSR.
+ */
+struct iwl_mvm_esr_exit {
+ unsigned long ts;
+ enum iwl_mvm_esr_state reason;
+};
+
+/**
+ * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @mvm: pointer back to the mvm struct
+ * @id: between 0 and 3
+ * @color: to solve races upon MAC addition and removal
+ * @associated: indicates that we're currently associated, used only for
+ * managing the firmware state in iwl_mvm_bss_info_changed_station()
+ * @ap_assoc_sta_count: count of stations associated to us - valid only
+ * if VIF type is AP
+ * @uploaded: indicates the MAC context has been added to the device
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ * should get quota etc.
+ * @pm_enabled - indicate if MAC power management is allowed
+ * @monitor_active: indicates that monitor context is configured, and that the
+ * interface should get quota etc.
+ * @low_latency: bit flags for low latency
+ * see enum &iwl_mvm_low_latency_cause for causes.
+ * @low_latency_actual: boolean, indicates low latency is set,
+ * as a result from low_latency bit flags and takes force into account.
+ * @authorized: indicates the AP station was set to authorized
+ * @ps_disabled: indicates that this interface requires PS to be disabled
+ * @csa_countdown: indicates that CSA countdown may be started
+ * @csa_failed: CSA failed to schedule time event, report an error later
+ * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel
+ * @csa_blocks_tx: CSA is blocking TX
+ * @features: hw features active for this vif
+ * @max_tx_op: max TXOP in usecs for all ACs, zero for no limit.
+ * @ap_beacon_time: AP beacon time for synchronisation (on older FW)
+ * @bf_enabled: indicates if beacon filtering is enabled
+ * @ba_enabled: indicated if beacon abort is enabled
+ * @bcn_prot: beacon protection data (keys; FIXME: needs to be per link)
+ * @deflink: default link data for use in non-MLO
+ * @link: link data for each link in MLO
+ * @esr_active: indicates eSR mode is active
+ * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
+ * @pm_enabled: indicates powersave is enabled
+ * @link_selection_res: bitmap of active links as it was decided in the last
+ * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
+ * any link selection yet.
+ * @link_selection_primary: primary link selected by link selection
+ * @primary_link: primary link in eSR. Valid only for an associated MLD vif,
+ * and in eSR mode. Valid only for a STA.
+ * @last_esr_exit: Details of the last exit from EMLSR.
+ * @exit_same_reason_count: The number of times we exited due to the specified
+ * @last_esr_exit::reason, only counting exits due to
+ * &IWL_MVM_ESR_PREVENT_REASONS.
+ * @prevent_esr_done_wk: work that should be done when esr prevention ends.
+ * @mlo_int_scan_wk: work for the internal MLO scan.
+ * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
+ * @unblock_esr_tmp_non_bss_wk: work for removing the
+ * IWL_MVM_ESR_BLOCKED_TMP_NON_BSS blocking for EMLSR.
+ * @roc_activity: currently running ROC activity for this vif (or
+ * ROC_NUM_ACTIVITIES if no activity is running).
+ * @session_prot_connection_loss: the connection was lost due to session
+ * protection ending without receiving a beacon, so we need to now
+ * protect the deauth separately
+ * @ap_early_keys: The firmware cannot install keys before stations etc.,
+ * but higher layers work differently, so we store the keys here for
+ * later installation.
+ * @ap_sta: pointer to the AP STA data structure
+ * @csa_count: CSA counter (old CSA implementation w/o firmware)
+ * @csa_misbehave: CSA AP misbehaviour flag (old implementation)
+ * @csa_target_freq: CSA target channel frequency (old implementation)
+ * @csa_work: CSA work (old implementation)
+ * @dbgfs_bf: beamforming debugfs data
+ * @dbgfs_dir: debugfs directory for this vif
+ * @dbgfs_pm: power management debugfs data
+ * @dbgfs_quota_min: debugfs value for minimal quota
+ * @dbgfs_slink: debugfs symlink for this interface
+ * @ftm_unprotected: unprotected FTM debugfs override
+ * @hs_time_event_data: hotspot/AUX ROC time event data
+ * @mac_pwr_cmd: debugfs override for MAC power command
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @rekey_data: rekeying data for WoWLAN GTK rekey offload
+ * @seqno: storage for seqno for older firmware D0/D3 transition
+ * @seqno_valid: indicates @seqno is valid
+ * @time_event_data: session protection time event data
+ * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that)
+ * @tx_key_idx: WEP transmit key index for D3
+ * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to
+ * not use U-APSD on reconnection
+ * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation
+ * in U-APSD
+ */
+struct iwl_mvm_vif {
+ struct iwl_mvm *mvm;
+ u16 id;
+ u16 color;
+
+ bool associated;
+ u8 ap_assoc_sta_count;
+ bool uploaded;
+ bool ap_ibss_active;
+ bool pm_enabled;
+ bool monitor_active;
+ bool esr_active;
+ bool session_prot_connection_loss;
+
+ u8 low_latency: 6;
+ u8 low_latency_actual: 1;
+
+ u8 authorized:1;
+ bool ps_disabled;
+
+ u32 esr_disable_reason;
+ u32 ap_beacon_time;
+ bool bf_enabled;
+ bool ba_enabled;
+
+#ifdef CONFIG_PM_SLEEP
+ /* WoWLAN GTK rekey data */
+ struct {
+ u8 kck[NL80211_KCK_EXT_LEN];
+ u8 kek[NL80211_KEK_EXT_LEN];
+ size_t kek_len;
+ size_t kck_len;
+ u32 akm;
+ __le64 replay_ctr;
+ bool valid;
+ } rekey_data;
+
+ int tx_key_idx;
+
+ bool seqno_valid;
+ u16 seqno;
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 addresses for WoWLAN */
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
+ unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+ int num_target_ipv6_addrs;
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_slink;
+ struct iwl_dbgfs_pm dbgfs_pm;
+ struct iwl_dbgfs_bf dbgfs_bf;
+ struct iwl_mac_power_cmd mac_pwr_cmd;
+ int dbgfs_quota_min;
+ bool ftm_unprotected;
+#endif
+
+ /* FW identified misbehaving AP */
+ u8 uapsd_misbehaving_ap_addr[ETH_ALEN] __aligned(2);
+ struct delayed_work uapsd_nonagg_detected_wk;
+
+ bool csa_countdown;
+ bool csa_failed;
+ bool csa_bcn_pending;
+ bool csa_blocks_tx;
+ u16 csa_target_freq;
+ u16 csa_count;
+ u16 csa_misbehave;
+ struct delayed_work csa_work;
+
+ enum iwl_tsf_id tsf_id;
+
+ struct iwl_mvm_time_event_data time_event_data;
+ struct iwl_mvm_time_event_data hs_time_event_data;
+ enum iwl_roc_activity roc_activity;
+
+ /* TCP Checksum Offload */
+ netdev_features_t features;
+
+ struct ieee80211_sta *ap_sta;
+
+ /* we can only have 2 GTK + 2 IGTK active at a time */
+ struct ieee80211_key_conf *ap_early_keys[4];
+
+ struct {
+ struct ieee80211_key_conf __rcu *keys[2];
+ } bcn_prot;
+
+ u16 max_tx_op;
+
+ u16 link_selection_res;
+ u8 link_selection_primary;
+ u8 primary_link;
+ struct iwl_mvm_esr_exit last_esr_exit;
+ u8 exit_same_reason_count;
+ struct wiphy_delayed_work prevent_esr_done_wk;
+ struct wiphy_delayed_work mlo_int_scan_wk;
+ struct wiphy_work unblock_esr_tpt_wk;
+ struct wiphy_delayed_work unblock_esr_tmp_non_bss_wk;
+
+ struct iwl_mvm_vif_link_info deflink;
+ struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+#define for_each_mvm_vif_valid_link(mvm_vif, link_id) \
+ for (link_id = 0; \
+ link_id < ARRAY_SIZE((mvm_vif)->link); \
+ link_id++) \
+ if ((mvm_vif)->link[link_id])
+
+static inline struct iwl_mvm_vif *
+iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+ return (void *)vif->drv_priv;
+}
+
+extern const u8 tid_to_mac80211_ac[];
+
+#define IWL_MVM_SCAN_STOPPING_SHIFT 8
+
+enum iwl_scan_status {
+ IWL_MVM_SCAN_REGULAR = BIT(0),
+ IWL_MVM_SCAN_SCHED = BIT(1),
+ IWL_MVM_SCAN_NETDETECT = BIT(2),
+ IWL_MVM_SCAN_INT_MLO = BIT(3),
+
+ IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
+ IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
+ IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+ IWL_MVM_SCAN_STOPPING_INT_MLO = BIT(11),
+
+ IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
+ IWL_MVM_SCAN_STOPPING_REGULAR,
+ IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
+ IWL_MVM_SCAN_STOPPING_SCHED,
+ IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
+ IWL_MVM_SCAN_STOPPING_NETDETECT,
+ IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
+ IWL_MVM_SCAN_STOPPING_INT_MLO,
+
+ IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+ IWL_MVM_SCAN_MASK = 0xff,
+};
+
+enum iwl_mvm_scan_type {
+ IWL_SCAN_TYPE_NOT_SET,
+ IWL_SCAN_TYPE_UNASSOC,
+ IWL_SCAN_TYPE_WILD,
+ IWL_SCAN_TYPE_MILD,
+ IWL_SCAN_TYPE_FRAGMENTED,
+ IWL_SCAN_TYPE_FAST_BALANCE,
+};
+
+enum iwl_mvm_sched_scan_pass_all_states {
+ SCHED_SCAN_PASS_ALL_DISABLED,
+ SCHED_SCAN_PASS_ALL_ENABLED,
+ SCHED_SCAN_PASS_ALL_FOUND,
+};
+
+/**
+ * struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure
+ * @ct_kill_exit: worker to exit thermal kill
+ * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
+ * @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
+ * @params: Parameters to configure the thermal throttling algorithm.
+ * @throttle: Is thermal throttling is active?
+ * @power_budget_mw: maximum cTDP power budget as defined for this system and
+ * device
+ */
+struct iwl_mvm_tt_mgmt {
+ struct delayed_work ct_kill_exit;
+ bool dynamic_smps;
+ u32 tx_backoff;
+ u32 min_backoff;
+ struct iwl_tt_params params;
+ bool throttle;
+
+ u32 power_budget_mw;
+};
+
+#ifdef CONFIG_THERMAL
+/**
+ * struct iwl_mvm_thermal_device - thermal zone related data
+ * @trips: temperature thresholds for report
+ * @tzone: thermal zone device data
+*/
+struct iwl_mvm_thermal_device {
+ struct thermal_trip trips[IWL_MAX_DTS_TRIPS];
+ struct thermal_zone_device *tzone;
+};
+
+/*
+ * struct iwl_mvm_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mvm_cooling_device {
+ u32 cur_state;
+ struct thermal_cooling_device *cdev;
+};
+#endif
+
+#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
+
+struct iwl_mvm_frame_stats {
+ u32 legacy_frames;
+ u32 ht_frames;
+ u32 vht_frames;
+ u32 bw_20_frames;
+ u32 bw_40_frames;
+ u32 bw_80_frames;
+ u32 bw_160_frames;
+ u32 sgi_frames;
+ u32 ngi_frames;
+ u32 siso_frames;
+ u32 mimo2_frames;
+ u32 agg_frames;
+ u32 ampdu_count;
+ u32 success_frames;
+ u32 fail_frames;
+ u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
+ int last_frame_idx;
+};
+
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
+
+enum iwl_mvm_tdls_cs_state {
+ IWL_MVM_TDLS_SW_IDLE = 0,
+ IWL_MVM_TDLS_SW_REQ_SENT,
+ IWL_MVM_TDLS_SW_RESP_RCVD,
+ IWL_MVM_TDLS_SW_REQ_RCVD,
+ IWL_MVM_TDLS_SW_ACTIVE,
+};
+
+enum iwl_mvm_traffic_load {
+ IWL_MVM_TRAFFIC_LOW,
+ IWL_MVM_TRAFFIC_MEDIUM,
+ IWL_MVM_TRAFFIC_HIGH,
+};
+
+DECLARE_EWMA(rate, 16, 16)
+
+struct iwl_mvm_tcm_mac {
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ } tx;
+ struct {
+ u32 pkts[IEEE80211_NUM_ACS];
+ u32 airtime;
+ u32 last_ampdu_ref;
+ } rx;
+ struct {
+ /* track AP's transfer in client mode */
+ u64 rx_bytes;
+ struct ewma_rate rate;
+ bool detected;
+ } uapsd_nonagg_detect;
+ bool opened_rx_ba_sessions;
+};
+
+struct iwl_mvm_tcm {
+ struct delayed_work work;
+ spinlock_t lock; /* used when time elapsed */
+ unsigned long ts; /* timestamp when period ends */
+ unsigned long ll_ts;
+ unsigned long uapsd_nonagg_ts;
+ bool paused;
+ struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
+ struct {
+ u32 elapsed; /* milliseconds for this TCM period */
+ u32 airtime[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
+ enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS];
+ enum iwl_mvm_traffic_load global_load;
+ bool low_latency[NUM_MAC_INDEX_DRIVER];
+ bool change[NUM_MAC_INDEX_DRIVER];
+ } result;
+};
+
+/**
+ * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @queue: queue of this reorder buffer
+ * @valid: reordering is valid for this queue
+ * @lock: protect reorder buffer internal state
+ */
+struct iwl_mvm_reorder_buffer {
+ u16 head_sn;
+ u16 num_stored;
+ int queue;
+ bool valid;
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored
+ */
+struct iwl_mvm_reorder_buf_entry {
+ struct sk_buff_head frames;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
+#endif
+;
+
+/**
+ * struct iwl_mvm_baid_data - BA session data
+ * @sta_mask: current station mask for the BAID
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @timeout: the timeout set in the addba request
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @entries_per_queue: # of buffers per queue, this actually gets
+ * aligned up to avoid cache line sharing between queues
+ * @last_rx: last rx jiffies, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @rcu_ptr: BA data RCU protected access
+ * @rcu_head: RCU head for freeing this data
+ * @mvm: mvm pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ * @entries: data
+ */
+struct iwl_mvm_baid_data {
+ struct rcu_head rcu_head;
+ u32 sta_mask;
+ u8 tid;
+ u8 baid;
+ u16 timeout;
+ u16 buf_size;
+ u16 entries_per_queue;
+ unsigned long last_rx;
+ struct timer_list session_timer;
+ struct iwl_mvm_baid_data __rcu **rcu_ptr;
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+ struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
+};
+
+static inline struct iwl_mvm_baid_data *
+iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
+{
+ return (void *)((u8 *)buf -
+ offsetof(struct iwl_mvm_baid_data, reorder_buf) -
+ sizeof(*buf) * buf->queue);
+}
+
+/*
+ * enum iwl_mvm_queue_status - queue status
+ * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
+ * Basically, this means that this queue can be used for any purpose
+ * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
+ * This is the state of a queue that has been dedicated for some RATID
+ * (agg'd or not), but that hasn't yet gone through the actual enablement
+ * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
+ * Note that in this state there is no requirement to already know what TID
+ * should be used with this queue, it is just marked as a queue that will
+ * be used, and shouldn't be allocated to anyone else.
+ * @IWL_MVM_QUEUE_READY: queue is ready to be used
+ * This is the state of a queue that has been fully configured (including
+ * SCD pointers, etc), has a specific RA/TID assigned to it, and can be
+ * used to send traffic.
+ * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared
+ * This is a state in which a single queue serves more than one TID, all of
+ * which are not aggregated. Note that the queue is only associated to one
+ * RA.
+ */
+enum iwl_mvm_queue_status {
+ IWL_MVM_QUEUE_FREE,
+ IWL_MVM_QUEUE_RESERVED,
+ IWL_MVM_QUEUE_READY,
+ IWL_MVM_QUEUE_SHARED,
+};
+
+#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
+#define IWL_MVM_INVALID_QUEUE 0xFFFF
+
+#define IWL_MVM_NUM_CIPHERS 10
+
+
+struct iwl_mvm_txq {
+ struct list_head list;
+ u16 txq_id;
+ atomic_t tx_request;
+#define IWL_MVM_TXQ_STATE_READY 0
+#define IWL_MVM_TXQ_STATE_STOP_FULL 1
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 2
+#define IWL_MVM_TXQ_STATE_STOP_AP_CSA 3
+ unsigned long state;
+};
+
+static inline struct iwl_mvm_txq *
+iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
+{
+ return (void *)txq->drv_priv;
+}
+
+static inline struct iwl_mvm_txq *
+iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
+{
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IEEE80211_NUM_TIDS;
+
+ return (void *)sta->txq[tid]->drv_priv;
+}
+
+/**
+ * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
+ *
+ * @sta_id: sta id
+ * @txq_tid: txq tid
+ */
+struct iwl_mvm_tvqm_txq_info {
+ u8 sta_id;
+ u8 txq_tid;
+};
+
+struct iwl_mvm_dqa_txq_info {
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+ bool reserved; /* Is this the TXQ reserved for a STA */
+ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
+ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ /* Timestamp for inactivation per TID of this queue */
+ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
+ enum iwl_mvm_queue_status status;
+};
+
+struct ptp_data {
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+
+ struct delayed_work dwork;
+
+ /* The last GP2 reading from the hw */
+ u32 last_gp2;
+
+ /* number of wraparounds since scale_update_adj_time_ns */
+ u32 wrap_counter;
+
+ /* GP2 time when the scale was last updated */
+ u32 scale_update_gp2;
+
+ /* Adjusted time when the scale was last updated in nanoseconds */
+ u64 scale_update_adj_time_ns;
+
+ /* clock frequency offset, scaled to 65536000000 */
+ u64 scaled_freq;
+
+ /* Delta between hardware clock and ptp clock in nanoseconds */
+ s64 delta;
+};
+
+struct iwl_time_sync_data {
+ struct sk_buff_head frame_list;
+ u8 peer_addr[ETH_ALEN];
+ bool active;
+};
+
+struct iwl_mei_scan_filter {
+ bool is_mei_limited_scan;
+ struct sk_buff_head scan_res;
+ struct work_struct scan_work;
+};
+
+/**
+ * struct iwl_mvm_acs_survey_channel - per-channel survey information
+ *
+ * Stripped down version of &struct survey_info.
+ *
+ * @time: time in ms the radio was on the channel
+ * @time_busy: time in ms the channel was sensed busy
+ * @time_tx: time in ms spent transmitting data
+ * @time_rx: time in ms spent receiving data
+ * @noise: channel noise in dBm
+ */
+struct iwl_mvm_acs_survey_channel {
+ u32 time;
+ u32 time_busy;
+ u32 time_tx;
+ u32 time_rx;
+ s8 noise;
+};
+
+struct iwl_mvm_acs_survey {
+ struct iwl_mvm_acs_survey_channel *bands[NUM_NL80211_BANDS];
+
+ /* Overall number of channels */
+ int n_channels;
+
+ /* Storage space for per-channel information follows */
+ struct iwl_mvm_acs_survey_channel channels[] __counted_by(n_channels);
+};
+
+struct iwl_mvm {
+ /* for logger access */
+ struct device *dev;
+
+ struct iwl_trans *trans;
+ const struct iwl_fw *fw;
+ const struct iwl_rf_cfg *cfg;
+ struct iwl_phy_db *phy_db;
+ struct ieee80211_hw *hw;
+
+ /* for protecting access to iwl_mvm */
+ struct mutex mutex;
+ struct list_head async_handlers_list;
+ spinlock_t async_handlers_lock;
+ struct work_struct async_handlers_wk;
+
+ /* For async rx handlers that require the wiphy lock */
+ struct wiphy_work async_handlers_wiphy_wk;
+
+ struct wiphy_work trig_link_selection_wk;
+
+ struct work_struct roc_done_wk;
+
+ unsigned long init_status;
+
+ unsigned long status;
+
+ u32 queue_sync_cookie;
+ unsigned long queue_sync_state;
+ /*
+ * for beacon filtering -
+ * currently only one interface can be supported
+ */
+ struct iwl_mvm_vif *bf_allowed_vif;
+
+ bool hw_registered;
+ bool rfkill_safe_init_done;
+
+ u8 cca_40mhz_workaround;
+
+ u8 fw_rates_ver;
+
+ u32 ampdu_ref;
+ bool ampdu_toggle;
+
+ struct iwl_notif_wait_data notif_wait;
+
+ union {
+ struct mvm_statistics_rx_v3 rx_stats_v3;
+ struct mvm_statistics_rx rx_stats;
+ };
+
+ struct {
+ u64 rx_time;
+ u64 tx_time;
+ u64 on_time_rf;
+ u64 on_time_scan;
+ } radio_stats, accu_radio_stats;
+
+ struct list_head add_stream_txqs;
+ union {
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
+ struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
+ };
+ struct work_struct add_stream_wk; /* To add streams to queues */
+ spinlock_t add_stream_lock;
+
+ const char *nvm_file_name;
+ struct iwl_nvm_data *nvm_data;
+ struct iwl_mei_nvm *mei_nvm_data;
+ struct iwl_mvm_csme_conn_info __rcu *csme_conn_info;
+ bool mei_rfkill_blocked;
+ bool mei_registered;
+ struct work_struct sap_connected_wk;
+
+ /*
+ * NVM built based on the SAP data but that we can't free even after
+ * we get ownership because it contains the cfg80211's channel.
+ */
+ struct iwl_nvm_data *temp_nvm_data;
+
+ /* NVM sections */
+ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
+
+ struct iwl_fw_runtime fwrt;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
+
+ /* data related to data path */
+ struct iwl_rx_phy_info last_phy_info;
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX];
+ /* note: fw_id_to_link_sta must be protected by wiphy and mvm mutexes */
+ struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
+ u8 rx_ba_sessions;
+
+ /* configured by mac80211 */
+ u32 rts_threshold;
+
+ /* Scan status, cmd (pre-allocated) and auxiliary station */
+ unsigned int scan_status;
+ size_t scan_cmd_size;
+ void *scan_cmd;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* For CDB this is low band scan type, for non-CDB - type. */
+ enum iwl_mvm_scan_type scan_type;
+ enum iwl_mvm_scan_type hb_scan_type;
+
+ enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+ struct delayed_work scan_timeout_dwork;
+
+ /* max number of simultaneous scans the FW supports */
+ unsigned int max_scans;
+
+ /* UMAC scan tracking */
+ u32 scan_uid_status[IWL_MAX_UMAC_SCANS];
+
+ /* start time of last scan in TSF of the mac that requested the scan */
+ u64 scan_start;
+
+ /* the vif that requested the current scan */
+ struct iwl_mvm_vif *scan_vif;
+ u8 scan_link_id;
+
+ /* rx chain antennas set through debugfs for the scan command */
+ u8 scan_rx_ant;
+
+ /* Internal station */
+ struct iwl_mvm_int_sta aux_sta;
+ struct iwl_mvm_int_sta snif_sta;
+
+ bool last_ebs_successful;
+
+ u8 scan_last_antenna_idx; /* to toggle TX between antennas */
+ u8 mgmt_last_antenna_idx;
+
+ u8 set_tx_ant;
+ u8 set_rx_ant;
+
+ /* last smart fifo state that was successfully sent to firmware */
+ enum iwl_sf_state sf_state;
+
+ /*
+ * Leave this pointer outside the ifdef below so that it can be
+ * assigned without ifdef in the source code.
+ */
+ struct dentry *debugfs_dir;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ u32 dbgfs_prph_reg_addr;
+ bool disable_power_off;
+ bool disable_power_off_d3;
+ bool beacon_inject_active;
+
+ bool scan_iter_notif_enabled;
+
+ struct debugfs_blob_wrapper nvm_hw_blob;
+ struct debugfs_blob_wrapper nvm_sw_blob;
+ struct debugfs_blob_wrapper nvm_calib_blob;
+ struct debugfs_blob_wrapper nvm_prod_blob;
+ struct debugfs_blob_wrapper nvm_phy_sku_blob;
+ struct debugfs_blob_wrapper nvm_reg_blob;
+
+ struct iwl_mvm_frame_stats drv_rx_stats;
+ spinlock_t drv_stats_lock;
+ u16 dbgfs_rx_phyinfo;
+#endif
+
+ struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
+
+ struct list_head time_event_list;
+ spinlock_t time_event_lock;
+
+ /*
+ * A bitmap indicating the index of the key in use. The firmware
+ * can hold 16 keys at most. Reflect this fact.
+ */
+ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ u8 fw_key_deleted[STA_KEY_MAX_NUM];
+
+ struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER];
+
+ u8 *error_recovery_buf;
+
+#ifdef CONFIG_IWLWIFI_LEDS
+ struct led_classdev led;
+#endif
+
+ struct ieee80211_vif *p2p_device_vif;
+
+#ifdef CONFIG_PM_SLEEP
+ struct wiphy_wowlan_support wowlan;
+ int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+
+ /* sched scan settings for net detect */
+ struct ieee80211_scan_ies nd_ies;
+ struct cfg80211_match_set *nd_match_sets;
+ int n_nd_match_sets;
+ struct ieee80211_channel **nd_channels;
+ int n_nd_channels;
+ bool net_detect;
+ bool fast_resume;
+ u8 offload_tid;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ bool d3_wake_sysassert;
+ bool d3_test_active;
+ u32 d3_test_pme_ptr;
+ struct ieee80211_vif *keep_vif;
+ u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
+#endif
+#endif
+
+ wait_queue_head_t rx_sync_waitq;
+
+ /* BT-Coex - only one of those will be used */
+ union {
+ struct iwl_bt_coex_prof_old_notif last_bt_notif;
+ struct iwl_bt_coex_profile_notif last_bt_wifi_loss;
+ };
+ struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+
+ u8 bt_tx_prio;
+ enum iwl_bt_force_ant_mode bt_force_ant_mode;
+
+ /* Aux ROC */
+ struct list_head aux_roc_te_list;
+
+ /* Thermal Throttling and CTkill */
+ struct iwl_mvm_tt_mgmt thermal_throttle;
+#ifdef CONFIG_THERMAL
+ struct iwl_mvm_thermal_device tz_device;
+ struct iwl_mvm_cooling_device cooling_dev;
+#endif
+
+ s32 temperature; /* Celsius */
+ /*
+ * Debug option to set the NIC temperature. This option makes the
+ * driver think this is the actual NIC temperature, and ignore the
+ * real temperature that is received from the fw
+ */
+ bool temperature_test; /* Debug test temperature is enabled */
+
+ bool fw_static_smps_request;
+
+ unsigned long bt_coex_last_tcm_ts;
+ struct iwl_mvm_tcm tcm;
+
+ u8 uapsd_noagg_bssid_write_idx;
+ struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM]
+ __aligned(2);
+
+ struct iwl_time_quota_cmd last_quota_cmd;
+
+ /* Tx queues */
+ u16 aux_queue;
+ u16 snif_queue;
+ u16 probe_queue;
+ u16 p2p_dev_queue;
+
+ /* Indicate if device power save is allowed */
+ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
+ /* Indicate if 32Khz external clock is valid */
+ u32 ext_clock_valid;
+
+ /* This vif used by CSME to send / receive traffic */
+ struct ieee80211_vif *csme_vif;
+ struct ieee80211_vif __rcu *csa_vif;
+ struct ieee80211_vif __rcu *csa_tx_blocked_vif;
+ u8 csa_tx_block_bcn_timeout;
+
+ /* system time of last beacon (for AP/GO interface) */
+ u32 ap_last_beacon_gp2;
+
+ /* indicates that we transmitted the last beacon */
+ bool ibss_manager;
+
+ bool lar_regdom_set;
+ enum iwl_mcc_source mcc_src;
+
+ /* TDLS channel switch data */
+ struct {
+ struct delayed_work dwork;
+ enum iwl_mvm_tdls_cs_state state;
+
+ /*
+ * Current cs sta - might be different from periodic cs peer
+ * station. Value is meaningless when the cs-state is idle.
+ */
+ u8 cur_sta_id;
+
+ /* TDLS periodic channel-switch peer */
+ struct {
+ u8 sta_id;
+ u8 op_class;
+ bool initiator; /* are we the link initiator */
+ struct cfg80211_chan_def chandef;
+ struct sk_buff *skb; /* ch sw template */
+ u32 ch_sw_tm_ie;
+
+ /* timestamp of last ch-sw request sent (GP2 time) */
+ u32 sent_timestamp;
+ } peer;
+ } tdls_cs;
+
+
+ u32 ciphers[IWL_MVM_NUM_CIPHERS];
+
+ struct cfg80211_ftm_responder_stats ftm_resp_stats;
+ struct {
+ struct cfg80211_pmsr_request *req;
+ struct wireless_dev *req_wdev;
+ struct list_head loc_list;
+ int responses[IWL_TOF_MAX_APS];
+ struct {
+ struct list_head resp;
+ } smooth;
+ struct list_head pasn_list;
+ } ftm_initiator;
+
+ struct list_head resp_pasn_list;
+
+ struct ptp_data ptp_data;
+
+ struct {
+ u8 range_resp;
+ } cmd_ver;
+
+ struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
+
+ /*
+ * Drop beacons from other APs in AP mode when there are no connected
+ * clients.
+ */
+ bool drop_bcn_ap_mode;
+
+ struct delayed_work cs_tx_unblock_dwork;
+
+ /* does a monitor vif exist (only one can exist hence bool) */
+ bool monitor_on;
+ /*
+ * primary channel position relative to he whole bandwidth,
+ * in steps of 80 MHz
+ */
+ u8 monitor_p80;
+
+ /* sniffer data to include in radiotap */
+ __le16 cur_aid;
+ u8 cur_bssid[ETH_ALEN];
+
+ /* report rx timestamp in ptp clock time */
+ bool rx_ts_ptp;
+
+ unsigned long last_6ghz_passive_scan_jiffies;
+ unsigned long last_reset_or_resume_time_jiffies;
+
+ bool sta_remove_requires_queue_remove;
+ bool mld_api_is_used;
+
+ /*
+ * Indicates that firmware will do a product reset (and then
+ * therefore fail to load) when we start it (due to OTP burn),
+ * if so don't dump errors etc. since this is expected.
+ */
+ bool fw_product_reset;
+
+ struct iwl_time_sync_data time_sync;
+
+ struct iwl_mei_scan_filter mei_scan_filter;
+
+ struct iwl_mvm_acs_survey *acs_survey;
+
+ bool statistics_clear;
+};
+
+/* Extract MVM priv from op_mode and _hw */
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \
+ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MVM(_hw) \
+ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+
+DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mutex))
+
+/**
+ * enum iwl_mvm_status - MVM status bits
+ * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
+ * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
+ * @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when
+ * P2P is not over AUX)
+ * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
+ * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
+ * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
+ * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
+ * if this is set, when intentionally triggered
+ */
+enum iwl_mvm_status {
+ IWL_MVM_STATUS_HW_RFKILL,
+ IWL_MVM_STATUS_HW_CTKILL,
+ IWL_MVM_STATUS_ROC_P2P_RUNNING,
+ IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ IWL_MVM_STATUS_IN_HW_RESTART,
+ IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ IWL_MVM_STATUS_FIRMWARE_RUNNING,
+ IWL_MVM_STATUS_IN_D3,
+ IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
+};
+
+struct iwl_mvm_csme_conn_info {
+ struct rcu_head rcu_head;
+ struct iwl_mei_conn_info conn_info;
+};
+
+/* Keep track of completed init configuration */
+enum iwl_mvm_init_status {
+ IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
+ IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
+};
+
+static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
+ test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+}
+
+static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+}
+
+static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm)
+{
+ return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+}
+
+/* Must be called with rcu_read_lock() held and it can only be
+ * released when mvmsta is not needed anymore.
+ */
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= mvm->fw->ucode_capa.num_stations)
+ return NULL;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= mvm->fw->ucode_capa.num_stations)
+ return NULL;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline struct ieee80211_vif *
+iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
+{
+ if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac)))
+ return NULL;
+
+ if (rcu)
+ return rcu_dereference(mvm->vif_id_to_mac[vif_id]);
+
+ return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id],
+ lockdep_is_held(&mvm->mutex));
+}
+
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
+static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2);
+}
+
+static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP);
+}
+
+static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
+{
+ /* OCE should never be enabled for LMAC scan FWs */
+ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE);
+}
+
+static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS);
+}
+
+static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF);
+}
+
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+}
+
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+}
+
+static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
+{
+ bool nvm_lar = mvm->nvm_data->lar_enabled;
+ bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+ /*
+ * Enable LAR only if it is supported by the FW (TLV) &&
+ * enabled in the NVM
+ */
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT)
+ return nvm_lar && tlv_lar;
+ else
+ return tlv_lar;
+}
+
+static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
+}
+
+static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+ IWL_MVM_BT_COEX_RRC;
+}
+
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
+static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) &&
+ IWL_MVM_BT_COEX_MPLUT;
+}
+
+static inline
+bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) &&
+ !(iwlwifi_mod_params.uapsd_disable &
+ IWL_DISABLE_UAPSD_P2P_CLIENT);
+}
+
+static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
+}
+
+static inline bool iwl_mvm_has_mld_api(const struct iwl_fw *fw)
+{
+ return fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT);
+}
+
+static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw)
+{
+ return iwl_mvm_has_mld_api(fw) ||
+ iwl_fw_lookup_cmd_ver(fw, ADD_STA, 0) >= 12;
+}
+
+static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
+{
+ /* TODO - replace with TLV once defined */
+ return mvm->trans->mac_cfg->gen2;
+}
+
+static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
+{
+ /* TODO - better define this */
+ return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
+static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
+{
+ /*
+ * TODO:
+ * The issue of how to determine CDB APIs and usage is still not fully
+ * defined.
+ * There is a compilation for CDB and non-CDB FW, but there may
+ * be also runtime check.
+ * For now there is a TLV for checking compilation mode, but a
+ * runtime check will also have to be here - once defined.
+ */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+}
+
+static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
+{
+ /*
+ * TODO: should this be the same as iwl_mvm_is_cdb_supported()?
+ * but then there's a little bit of code in scan that won't make
+ * any sense...
+ */
+ return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
+static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
+}
+
+
+static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
+}
+
+static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BAND_IN_RX_DATA);
+}
+
+static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NEW_RX_STATS);
+}
+
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+}
+
+static inline bool iwl_mvm_has_no_host_disable_tx(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX);
+}
+
+static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
+}
+
+static inline struct agg_tx_status *
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return &((struct iwl_tx_resp *)tx_resp)->status;
+ else
+ return ((struct iwl_tx_resp_v3 *)tx_resp)->status;
+}
+
+static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
+{
+ /* these two TLV are redundant since the responsibility to CT-kill by
+ * FW happens only after we send at least one command of
+ * temperature THs report.
+ */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
+}
+
+static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
+}
+
+static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans)
+{
+ if (CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id))
+ return false;
+
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_FM:
+ /* Step A doesn't support eSR */
+ return CSR_HW_RFID_STEP(trans->info.hw_rf_id);
+ case IWL_CFG_RF_TYPE_WH:
+ case IWL_CFG_RF_TYPE_PE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_trans *trans = mvm->fwrt.trans;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ return mvm->fw->ucode_capa.num_beacons;
+
+ /* Check if HW supports eSR or STR */
+ if (iwl_mvm_is_esr_supported(trans) ||
+ (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id)))
+ return IWL_FW_MAX_ACTIVE_LINKS_NUM;
+
+ return 1;
+}
+
+extern const u8 iwl_mvm_ac_to_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
+
+static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
+ enum ieee80211_ac_numbers ac)
+{
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
+}
+
+static inline bool iwl_mvm_has_rlc_offload(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD),
+ 0) >= 3;
+}
+
+struct iwl_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+};
+
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend);
+int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
+
+/******************
+ * MVM Methods
+ ******************/
+/* uCode */
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm);
+
+/* Utils */
+int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band);
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r);
+u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx);
+u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
+bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+ iwl_fwrt_dump_error_logs(&mvm->fwrt);
+}
+
+u8 first_antenna(u8 mask);
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
+ u64 *boottime, ktime_t *realtime);
+u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+
+/* Tx / Host Commands */
+int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
+ u32 flags, u16 len, const void *data);
+int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
+ struct iwl_host_cmd *cmd,
+ u32 *status);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
+ u16 len, const void *data,
+ u32 *status);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct ieee80211_tx_info *info, u8 sta_id);
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc);
+void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ unsigned int tid);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status);
+#else
+static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
+
+/* Utils to extract sta related data */
+__le32 iwl_mvm_get_sta_htc_flags(struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta);
+u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta);
+u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_bss_conf *link_conf,
+ u32 *_agg_size);
+int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext);
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ tx_cmd_params->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd_params->key, keyconf->key, keyconf->keylen);
+}
+
+static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
+{
+ flush_work(&mvm->async_handlers_wk);
+}
+
+/* Statistics */
+void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+static inline void
+iwl_mvm_handle_rx_system_end_stats_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm,
+ bool enable);
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
+
+/* NVM */
+int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
+
+static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
+{
+ u8 tx_ant = mvm->fw->valid_tx_ant;
+
+ if (mvm->nvm_data && mvm->nvm_data->valid_tx_ant)
+ tx_ant &= mvm->nvm_data->valid_tx_ant;
+
+ if (mvm->set_tx_ant)
+ tx_ant &= mvm->set_tx_ant;
+
+ return tx_ant;
+}
+
+static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
+{
+ u8 rx_ant = mvm->fw->valid_rx_ant;
+
+ if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant)
+ rx_ant &= mvm->nvm_data->valid_rx_ant;
+
+ if (mvm->set_rx_ant)
+ rx_ant &= mvm->set_rx_ant;
+
+ return rx_ant;
+
+}
+
+static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
+{
+ *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant);
+}
+
+static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
+{
+ u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
+ FW_PHY_CFG_RX_CHAIN);
+ u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+ u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+ phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
+ valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+
+ return mvm->fw->phy_config & phy_config;
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm);
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
+
+/*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+ */
+void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* MVM PHY */
+struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm);
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
+ u8 chains_static, u8 chains_dynamic);
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
+ u8 chains_static, u8 chains_dynamic);
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef);
+int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ u8 chains_static, u8 chains_dynamic);
+
+/* MAC (virtual interface) programming */
+
+void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link_info,
+ __le32 *cck_rates, __le32 *ofdm_rates);
+void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ __le32 *protection_flags, u32 ht_flag,
+ u32 tgg_flag);
+void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct iwl_ac_qos *ac, __le32 *qos_flags);
+bool iwl_mvm_set_fw_mu_edca_params(struct iwl_mvm *mvm,
+ const struct iwl_mvm_vif_link_info *link_info,
+ struct iwl_he_backoff_conf *trig_based_txf);
+void iwl_mvm_set_fw_dtim_tbtt(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ __le64 *dtim_tsf, __le32 *dtim_time,
+ __le32 *assoc_beacon_arrive_time);
+__le32 iwl_mac_ctxt_p2p_dev_has_extended_disc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ __le32 *filter_flags,
+ int accept_probe_req_flag,
+ int accept_beacon_flag);
+int iwl_mvm_get_mac_type(struct ieee80211_vif *vif);
+__le32 iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off);
+int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off, const u8 *bssid_override);
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
+ struct sk_buff *beacon,
+ void *data, int len);
+u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif);
+u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif);
+u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw,
+ u8 rate_idx);
+void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+ __le32 *tim_index, __le32 *tim_size,
+ u8 *beacon, u32 frame_size);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif_legacy(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_beacon_filter_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* Bindings */
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band);
+
+/* Links */
+void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link);
+void iwl_mvm_set_link_fw_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u32 changes, bool active);
+int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
+
+struct iwl_mvm_link_sel_data {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ s32 signal;
+ u16 grade;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b);
+
+s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
+
+extern const struct iwl_hcmd_arr iwl_mvm_groups[];
+extern const unsigned int iwl_mvm_groups_size;
+#endif
+
+/* AP and IBSS */
+bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int *ret);
+void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/* BSS Info */
+void iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes);
+void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u64 changes);
+
+/* ROC */
+/**
+ * struct iwl_mvm_roc_ops - callbacks for the remain_on_channel()
+ *
+ * Since the only difference between both MLD and
+ * non-MLD versions of remain_on_channel() is these function calls,
+ * each version will send its specific function calls to
+ * %iwl_mvm_roc_common().
+ *
+ * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta
+ * for Hot Spot 2.0
+ * @link: For a P2P Device interface, pointer to a function that links the
+ * MAC/Link to the PHY context
+ */
+struct iwl_mvm_roc_ops {
+ int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id);
+ int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+};
+
+int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type,
+ const struct iwl_mvm_roc_ops *ops);
+int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+/*Session Protection */
+void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u32 duration_override, unsigned int link_id);
+
+/* Quota management */
+static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_quota_low_latency(mvm) ?
+ sizeof(struct iwl_time_quota_cmd) :
+ sizeof(struct iwl_time_quota_cmd_v1);
+}
+
+static inline struct iwl_time_quota_data
+*iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd,
+ int i)
+{
+ struct iwl_time_quota_data_v1 *quotas;
+
+ if (iwl_mvm_has_quota_low_latency(mvm))
+ return &cmd->quotas[i];
+
+ quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas;
+ return (struct iwl_time_quota_data *)&quotas[i];
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
+ struct ieee80211_vif *disabled_vif);
+
+/* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout_wk(struct work_struct *work);
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* Scheduled scan */
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* UMAC scan */
+int iwl_mvm_config_scan(struct iwl_mvm *mvm);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/* MVM debugfs */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm);
+void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+#else
+static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
+{
+}
+static inline void
+iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+static inline void
+iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+/* rate scaling */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
+int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate);
+void rs_update_last_rssi(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_rx_status *rx_status);
+
+/* power management */
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_ps(struct iwl_mvm *mvm);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+#ifdef CONFIG_IWLWIFI_LEDS
+int iwl_mvm_leds_init(struct iwl_mvm *mvm);
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+void iwl_mvm_leds_sync(struct iwl_mvm *mvm);
+#else
+static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+}
+static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
+{
+}
+#endif
+
+/* D3 (WoWLAN, NetDetect) */
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwl_mvm_resume(struct ieee80211_hw *hw);
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
+extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM_SLEEP
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm);
+#else
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+
+static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+}
+
+static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ return 0;
+}
+#endif
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd_v6 *cmd);
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ bool offload_ns,
+ u32 cmd_flags,
+ u8 sta_id);
+
+/* BT Coex */
+int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
+void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum ieee80211_rssi_event_data);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ enum nl80211_band band);
+u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info, u8 ac);
+
+/* beacon filtering */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd);
+#else
+static inline void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{}
+#endif
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+/* SMPS */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request,
+ unsigned int link_id);
+void
+iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mvm_low_latency_cause cause);
+/* get SystemLowLatencyMode - only needed for beacon threshold? */
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
+void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency,
+ u16 mac_id);
+
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+ /*
+ * should this consider associated/active/... state?
+ *
+ * Normally low-latency should only be active on interfaces
+ * that are active, but at least with debugfs it can also be
+ * enabled on interfaces that aren't active. However, when
+ * interface aren't active then they aren't added into the
+ * binding, so this has no real impact. For now, just return
+ * the current desired low-latency state.
+ */
+ return mvmvif->low_latency_actual;
+}
+
+static inline
+void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
+ enum iwl_mvm_low_latency_cause cause)
+{
+ u8 new_state;
+
+ if (set)
+ mvmvif->low_latency |= cause;
+ else
+ mvmvif->low_latency &= ~cause;
+
+ /*
+ * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are
+ * allowed to actual mode.
+ */
+ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE &&
+ cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE)
+ return;
+
+ if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set)
+ /*
+ * We enter force state
+ */
+ new_state = !!(mvmvif->low_latency &
+ LOW_LATENCY_DEBUGFS_FORCE);
+ else
+ /*
+ * Check if any other one set low latency
+ */
+ new_state = !!(mvmvif->low_latency &
+ ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE |
+ LOW_LATENCY_DEBUGFS_FORCE));
+
+ mvmvif->low_latency_actual = new_state;
+}
+
+/* Return a bitmask with all the hw supported queues, except for the
+ * command queue, which can't be flushed.
+ */
+static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
+{
+ return ((BIT(mvm->trans->mac_cfg->base->num_of_queues) - 1) &
+ ~BIT(IWL_MVM_DQA_CMD_QUEUE));
+}
+
+void iwl_mvm_stop_device(struct iwl_mvm *mvm);
+
+/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
+
+#if IS_ENABLED(CONFIG_IWLMEI)
+
+/* vendor commands */
+void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm);
+
+#else
+
+static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {}
+
+#endif
+
+/* Location Aware Regulatory */
+struct iwl_mcc_update_resp_v8 *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ enum iwl_mcc_source src_id);
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed);
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+ bool *changed);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync);
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
+
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool added_vif);
+
+/* FTM responder */
+int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
+void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/* FTM initiator */
+void iwl_mvm_ftm_restart(struct iwl_mvm *mvm);
+void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
+void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm);
+void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm);
+
+/* TDLS */
+
+/*
+ * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
+ * This TID is marked as used vs the AP and all connected TDLS peers.
+ */
+#define IWL_MVM_TDLS_FW_TID 4
+
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool sta_added);
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
+int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params);
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ enum iwl_mvm_rxq_notif_type type,
+ bool sync,
+ const void *data, u32 size);
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
+
+#define MVM_TCM_PERIOD_MSEC 500
+#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+#define MVM_LL_PERIOD (10 * HZ)
+void iwl_mvm_tcm_work(struct work_struct *work);
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
+
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const char *errmsg);
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_sta *sta,
+ u16 tid);
+void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter);
+
+void iwl_mvm_ptp_init(struct iwl_mvm *mvm);
+void iwl_mvm_ptp_remove(struct iwl_mvm *mvm);
+u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time);
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
+void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir);
+#endif
+
+/* new MLD related APIs */
+int iwl_mvm_sec_key_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf);
+void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link,
+ unsigned int link_id);
+int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask);
+int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags,
+ struct ieee80211_key_conf *keyconf);
+u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+
+bool iwl_rfi_supported(struct iwl_mvm *mvm);
+int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
+ struct iwl_rfi_lut_entry *rfi_table);
+struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return PHY_BAND_24;
+ case NL80211_BAND_5GHZ:
+ return PHY_BAND_5;
+ case NL80211_BAND_6GHZ:
+ return PHY_BAND_6;
+ default:
+ WARN_ONCE(1, "Unsupported band (%u)\n", band);
+ return PHY_BAND_5;
+ }
+}
+
+static inline u8 iwl_mvm_nl80211_band_from_phy(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ case PHY_BAND_6:
+ return NL80211_BAND_6GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
+/* Channel Switch */
+void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk);
+int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+/* Channel Context */
+/**
+ * struct iwl_mvm_switch_vif_chanctx_ops - callbacks for switch_vif_chanctx()
+ *
+ * Since the only difference between both MLD and
+ * non-MLD versions of switch_vif_chanctx() is these function calls,
+ * each version will send its specific function calls to
+ * %iwl_mvm_switch_vif_chanctx_common().
+ *
+ * @__assign_vif_chanctx: pointer to the function that assigns a chanctx to
+ * a given vif
+ * @__unassign_vif_chanctx: pointer to the function that unassigns a chanctx to
+ * a given vif
+ */
+struct iwl_mvm_switch_vif_chanctx_ops {
+ int (*__assign_vif_chanctx)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx);
+ void (*__unassign_vif_chanctx)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx,
+ bool switching_chanctx);
+};
+
+int
+iwl_mvm_switch_vif_chanctx_common(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode,
+ const struct iwl_mvm_switch_vif_chanctx_ops *ops);
+
+/* Channel info utils */
+static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS);
+}
+
+static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci)
+{
+ return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ?
+ sizeof(struct iwl_fw_channel_info) :
+ sizeof(struct iwl_fw_channel_info_v1));
+}
+
+static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 :
+ sizeof(struct iwl_fw_channel_info) -
+ sizeof(struct iwl_fw_channel_info_v1);
+}
+
+static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci,
+ u32 chan, u8 band, u8 width,
+ u8 ctrl_pos)
+{
+ if (iwl_mvm_has_ultra_hb_channel(mvm)) {
+ ci->channel = cpu_to_le32(chan);
+ ci->band = band;
+ ci->width = width;
+ ci->ctrl_pos = ctrl_pos;
+ } else {
+ struct iwl_fw_channel_info_v1 *ci_v1 =
+ (struct iwl_fw_channel_info_v1 *)ci;
+
+ ci_v1->channel = chan;
+ ci_v1->band = band;
+ ci_v1->width = width;
+ ci_v1->ctrl_pos = ctrl_pos;
+ }
+}
+
+static inline void
+iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
+ struct iwl_fw_channel_info *ci,
+ const struct cfg80211_chan_def *chandef)
+{
+ enum nl80211_band band = chandef->chan->band;
+
+ iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
+ iwl_mvm_phy_band_from_nl80211(band),
+ iwl_mvm_get_channel_width(chandef),
+ iwl_mvm_get_ctrl_pos(chandef));
+}
+
+static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
+{
+ u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+ return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
+ IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
+}
+
+static inline
+enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ return IWL_LOCATION_CIPHER_CCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return IWL_LOCATION_CIPHER_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return IWL_LOCATION_CIPHER_GCMP_256;
+ default:
+ return IWL_LOCATION_CIPHER_INVALID;
+ }
+}
+
+struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm);
+static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm)
+{
+ if (mvm->mei_registered)
+ return iwl_mei_get_ownership();
+ return 0;
+}
+
+static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ unsigned int ivlen)
+{
+ if (mvm->mei_registered)
+ iwl_mei_tx_copy_to_csme(skb, ivlen);
+}
+
+static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm)
+{
+ if (mvm->mei_registered)
+ iwl_mei_host_disassociated();
+}
+
+static inline void iwl_mvm_mei_device_state(struct iwl_mvm *mvm, bool up)
+{
+ if (mvm->mei_registered)
+ iwl_mei_device_state(up);
+}
+
+static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
+{
+ bool sw_rfkill =
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
+
+ if (mvm->mei_registered)
+ iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
+ sw_rfkill);
+}
+
+static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
+
+ return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4;
+}
+
+static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (mvm->mei_scan_filter.is_mei_limited_scan &&
+ (ieee80211_is_probe_resp(mgmt->frame_control) ||
+ ieee80211_is_beacon(mgmt->frame_control))) {
+ skb_queue_tail(&mvm->mei_scan_filter.scan_res, skb);
+ schedule_work(&mvm->mei_scan_filter.scan_work);
+ return true;
+ }
+
+ return false;
+}
+
+void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool forbidden);
+
+/* Callbacks for ieee80211_ops */
+void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control, struct sk_buff *skb);
+void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+
+int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant);
+int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant,
+ u32 rx_ant);
+int iwl_mvm_mac_start(struct ieee80211_hw *hw);
+void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type);
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend);
+static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
+{
+ return 0;
+}
+
+u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+
+void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req);
+void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta);
+void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data);
+int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
+void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta, u32 changed);
+void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
+void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info);
+void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key);
+int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx, u32 changed);
+int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
+void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw);
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw);
+void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw);
+void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event);
+void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw);
+int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len);
+int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
+int
+iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *stats);
+int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+
+bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
+ struct iwl_mvm_vif *vif2);
+bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif);
+int iwl_mvm_set_tx_power(struct iwl_mvm *mvm,
+ struct ieee80211_bss_conf *bss_conf,
+ s16 tx_power);
+int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_set_hw_timestamp *hwts);
+int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
+
+struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx);
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay);
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, enum iwl_roc_activity activity);
+
+/* EMLSR */
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low);
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s32 link_rssi,
+ bool primary);
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active);
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap);
+
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update);
+
+/* rate_n_flags conversion */
+u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver);
+__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver);
+
+#endif /* __IWL_MVM_H__ */
diff --git a/sys/contrib/dev/iwlwifi/mvm/nvm.c b/sys/contrib/dev/iwlwifi/mvm/nvm.c
new file mode 100644
index 000000000000..f01d5836fce6
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/nvm.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/firmware.h>
+#if defined(__linux__)
+#include <linux/rtnetlink.h>
+#endif
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+#include "iwl-nvm-utils.h"
+#include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
+#include "fw/acpi.h"
+
+/* Default NVM size to read */
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
+
+#define NVM_WRITE_OPCODE 1
+#define NVM_READ_OPCODE 0
+
+/* load nvm chunk response */
+enum {
+ READ_NVM_CHUNK_SUCCEED = 0,
+ READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
+};
+
+/*
+ * prepare the NVM host command w/ the pointers to the nvm buffer
+ * and send it to fw
+ */
+static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, const u8 *data)
+{
+ struct iwl_nvm_access_cmd nvm_access_cmd = {
+ .offset = cpu_to_le16(offset),
+ .length = cpu_to_le16(length),
+ .type = cpu_to_le16(section),
+ .op_code = NVM_WRITE_OPCODE,
+ };
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .len = { sizeof(struct iwl_nvm_access_cmd), length },
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &nvm_access_cmd, data },
+ /* data may come from vmalloc, so use _DUP */
+ .dataflags = { 0, IWL_HCMD_DFL_DUP },
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_nvm_access_resp *nvm_resp;
+ int ret;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+ /* Extract & check NVM write response */
+ nvm_resp = (void *)pkt->data;
+ if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
+ IWL_ERR(mvm,
+ "NVM access write command failed for section %u (status = 0x%x)\n",
+ section, le16_to_cpu(nvm_resp->status));
+ ret = -EIO;
+ }
+
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
+ u16 offset, u16 length, u8 *data)
+{
+ struct iwl_nvm_access_cmd nvm_access_cmd = {
+ .offset = cpu_to_le16(offset),
+ .length = cpu_to_le16(length),
+ .type = cpu_to_le16(section),
+ .op_code = NVM_READ_OPCODE,
+ };
+ struct iwl_nvm_access_resp *nvm_resp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = NVM_ACCESS_CMD,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &nvm_access_cmd, },
+ };
+ int ret, bytes_read, offset_read;
+ u8 *resp_data;
+
+ cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ /* Extract NVM response */
+ nvm_resp = (void *)pkt->data;
+ ret = le16_to_cpu(nvm_resp->status);
+ bytes_read = le16_to_cpu(nvm_resp->length);
+ offset_read = le16_to_cpu(nvm_resp->offset);
+ resp_data = nvm_resp->data;
+ if (ret) {
+ if ((offset != 0) &&
+ (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+ /*
+ * meaning of NOT_VALID_ADDRESS:
+ * driver try to read chunk from address that is
+ * multiple of 2K and got an error since addr is empty.
+ * meaning of (offset != 0): driver already
+ * read valid data from another chunk so this case
+ * is not an error.
+ */
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+ offset);
+ ret = 0;
+ } else {
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM access command failed with status %d (device: %s)\n",
+ ret, mvm->trans->info.name);
+ ret = -ENODATA;
+ }
+ goto exit;
+ }
+
+ if (offset_read != offset) {
+ IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
+ offset_read);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Write data to NVM */
+ memcpy(data + offset, resp_data, bytes_read);
+ ret = bytes_read;
+
+exit:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
+ const u8 *data, u16 length)
+{
+ int offset = 0;
+
+ /* copy data in chunks of 2k (and remainder if any) */
+
+ while (offset < length) {
+ int chunk_size, ret;
+
+ chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
+ length - offset);
+
+ ret = iwl_nvm_write_chunk(mvm, section, offset,
+ chunk_size, data + offset);
+ if (ret < 0)
+ return ret;
+
+ offset += chunk_size;
+ }
+
+ return 0;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
+ u8 *data, u32 size_read)
+{
+ u16 length, offset = 0;
+ int ret;
+
+ /* Set nvm section read length */
+ length = IWL_NVM_DEFAULT_CHUNK_SIZE;
+
+ ret = length;
+
+ /* Read the NVM until exhausted (reading less than requested) */
+ while (ret == length) {
+ /* Check no memory assumptions fail and cause an overflow */
+ if ((size_read + offset + length) >
+ mvm->trans->mac_cfg->base->eeprom_size) {
+ IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
+ return -ENOBUFS;
+ }
+
+ ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+ if (ret < 0) {
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "Cannot read NVM from section %d offset %d, length %d\n",
+ section, offset, length);
+ return ret;
+ }
+ offset += ret;
+ }
+
+ iwl_nvm_fixups(mvm->trans->info.hw_id, section, data, offset);
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "NVM section %d read completed\n", section);
+ return offset;
+}
+
+static struct iwl_nvm_data *
+iwl_parse_nvm_sections(struct iwl_mvm *mvm)
+{
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+ const __be16 *hw;
+ const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
+ u8 tx_ant = mvm->fw->valid_tx_ant;
+ u8 rx_ant = mvm->fw->valid_rx_ant;
+ int regulatory_type;
+
+ /* Checking for required sections */
+ if (mvm->trans->cfg->nvm_type == IWL_NVM) {
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data) {
+ IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
+ return NULL;
+ }
+ } else {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+ else
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
+ /* SW and REGULATORY sections are mandatory */
+ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+ !mvm->nvm_sections[regulatory_type].data) {
+ IWL_ERR(mvm,
+ "Can't parse empty family 8000 OTP/NVM sections\n");
+ return NULL;
+ }
+ /* MAC_OVERRIDE or at least HW section must exist */
+ if (!mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+ IWL_ERR(mvm,
+ "Can't parse mac_address, empty sections\n");
+ return NULL;
+ }
+
+ /* PHY_SKU section is mandatory in B0 */
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
+ !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+ IWL_ERR(mvm,
+ "Can't parse phy_sku in B0, empty sections\n");
+ return NULL;
+ }
+ }
+
+ hw = (const __be16 *)sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data;
+ sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+ calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+ mac_override =
+ (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+ phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+
+ regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
+ if (mvm->set_tx_ant)
+ tx_ant &= mvm->set_tx_ant;
+
+ if (mvm->set_rx_ant)
+ rx_ant &= mvm->set_rx_ant;
+
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
+ regulatory, mac_override, phy_sku,
+ tx_ant, rx_ant);
+}
+
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+ int i, ret = 0;
+ struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+ for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+ if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
+ continue;
+ ret = iwl_nvm_write_section(mvm, i, sections[i].data,
+ sections[i].length);
+ if (ret < 0) {
+ IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+ break;
+ }
+ }
+ return ret;
+}
+
+int iwl_nvm_init(struct iwl_mvm *mvm)
+{
+ int ret, section;
+ u32 size_read = 0;
+ u8 *nvm_buffer, *temp;
+
+ if (WARN_ON_ONCE(mvm->trans->mac_cfg->base->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ return -EINVAL;
+
+ /* load NVM values from nic */
+ /* Read From FW NVM */
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+ nvm_buffer = kmalloc(mvm->trans->mac_cfg->base->eeprom_size,
+ GFP_KERNEL);
+ if (!nvm_buffer)
+ return -ENOMEM;
+ for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
+ /* we override the constness for initial read */
+ ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
+ size_read);
+ if (ret == -ENODATA) {
+ ret = 0;
+ continue;
+ }
+ if (ret < 0)
+ break;
+ size_read += ret;
+ temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+ if (!temp) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ iwl_nvm_fixups(mvm->trans->info.hw_id, section, temp, ret);
+
+ mvm->nvm_sections[section].data = temp;
+ mvm->nvm_sections[section].length = ret;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ switch (section) {
+ case NVM_SECTION_TYPE_SW:
+ mvm->nvm_sw_blob.data = temp;
+ mvm->nvm_sw_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_CALIBRATION:
+ mvm->nvm_calib_blob.data = temp;
+ mvm->nvm_calib_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_PRODUCTION:
+ mvm->nvm_prod_blob.data = temp;
+ mvm->nvm_prod_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_PHY_SKU:
+ mvm->nvm_phy_sku_blob.data = temp;
+ mvm->nvm_phy_sku_blob.size = ret;
+ break;
+ case NVM_SECTION_TYPE_REGULATORY_SDP:
+ case NVM_SECTION_TYPE_REGULATORY:
+ mvm->nvm_reg_blob.data = temp;
+ mvm->nvm_reg_blob.size = ret;
+ break;
+ default:
+ if (section == mvm->trans->mac_cfg->base->nvm_hw_section_num) {
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ }
+ }
+#endif
+ }
+ if (!size_read)
+ IWL_ERR(mvm, "OTP is blank\n");
+ kfree(nvm_buffer);
+
+ /* Only if PNVM selected in the mod param - load external NVM */
+ if (mvm->nvm_file_name) {
+ /* read External NVM file from the mod param */
+ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+ mvm->nvm_sections);
+ if (ret)
+ return ret;
+ }
+
+ /* parse the relevant nvm sections */
+ mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+ if (!mvm->nvm_data)
+ return -ENODATA;
+ IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
+ mvm->nvm_data->nvm_version);
+
+ return ret < 0 ? ret : 0;
+}
+
+struct iwl_mcc_update_resp_v8 *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+ enum iwl_mcc_source src_id)
+{
+ struct iwl_mcc_update_cmd mcc_update_cmd = {
+ .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+ .source_id = (u8)src_id,
+ };
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = MCC_UPDATE_CMD,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+ .data = { &mcc_update_cmd },
+ };
+
+ int ret, resp_ver;
+ u32 status;
+ int resp_len, n_channels;
+ u16 mcc;
+
+ if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+
+ IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
+ alpha2[0], alpha2[1], src_id);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pkt = cmd.resp_pkt;
+
+ resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+
+ /* Extract MCC response */
+ if (resp_ver >= 8) {
+ struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp_v8->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v8, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
+ resp_len = struct_size(resp_cp, channels, n_channels);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+ resp_cp->status = mcc_resp_v8->status;
+ resp_cp->mcc = mcc_resp_v8->mcc;
+ resp_cp->cap = mcc_resp_v8->cap;
+ resp_cp->source_id = mcc_resp_v8->source_id;
+ resp_cp->time = mcc_resp_v8->time;
+ resp_cp->geo_info = mcc_resp_v8->geo_info;
+ resp_cp->n_channels = mcc_resp_v8->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v8->channels,
+ n_channels * sizeof(__le32));
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
+ struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp_v4->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v4, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
+ resp_len = struct_size(resp_cp, channels, n_channels);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ resp_cp->status = mcc_resp_v4->status;
+ resp_cp->mcc = mcc_resp_v4->mcc;
+ resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap));
+ resp_cp->source_id = mcc_resp_v4->source_id;
+ resp_cp->time = mcc_resp_v4->time;
+ resp_cp->geo_info = mcc_resp_v4->geo_info;
+ resp_cp->n_channels = mcc_resp_v4->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v4->channels,
+ n_channels * sizeof(__le32));
+ } else {
+ struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
+
+ n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v3, channels, n_channels)) {
+ resp_cp = ERR_PTR(-EINVAL);
+ goto exit;
+ }
+ resp_len = struct_size(resp_cp, channels, n_channels);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp_cp) {
+ resp_cp = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ resp_cp->status = mcc_resp_v3->status;
+ resp_cp->mcc = mcc_resp_v3->mcc;
+ resp_cp->cap = cpu_to_le32(mcc_resp_v3->cap);
+ resp_cp->source_id = mcc_resp_v3->source_id;
+ resp_cp->time = mcc_resp_v3->time;
+ resp_cp->geo_info = mcc_resp_v3->geo_info;
+ resp_cp->n_channels = mcc_resp_v3->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v3->channels,
+ n_channels * sizeof(__le32));
+ }
+
+ status = le32_to_cpu(resp_cp->status);
+
+ mcc = le16_to_cpu(resp_cp->mcc);
+
+ /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+ if (mcc == 0) {
+ mcc = 0x3030; /* "00" - world */
+ resp_cp->mcc = cpu_to_le16(mcc);
+ }
+
+ IWL_DEBUG_LAR(mvm,
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
+ status, mcc, mcc >> 8, mcc & 0xff, n_channels);
+
+exit:
+ iwl_free_resp(&cmd);
+ return resp_cp;
+}
+
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
+{
+ bool tlv_lar;
+ bool nvm_lar;
+ int retval;
+ struct ieee80211_regdomain *regd;
+ char mcc[3];
+
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT) {
+ tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ nvm_lar = mvm->nvm_data->lar_enabled;
+ if (tlv_lar != nvm_lar)
+ IWL_INFO(mvm,
+ "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+ tlv_lar ? "enabled" : "disabled",
+ nvm_lar ? "enabled" : "disabled");
+ }
+
+ if (!iwl_mvm_is_lar_supported(mvm))
+ return 0;
+
+ /*
+ * try to replay the last set MCC to FW. If it doesn't exist,
+ * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+ */
+ retval = iwl_mvm_init_fw_regd(mvm, true);
+ if (retval != -ENOENT)
+ return retval;
+
+ /*
+ * Driver regulatory hint for initial update, this also informs the
+ * firmware we support wifi location updates.
+ * Disallow scans that might crash the FW while the LAR regdomain
+ * is not set.
+ */
+ mvm->lar_regdom_set = false;
+
+ regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
+ !iwl_bios_get_mcc(&mvm->fwrt, mcc)) {
+ kfree(regd);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
+ MCC_SOURCE_BIOS, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+ }
+
+ retval = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
+ kfree(regd);
+ return retval;
+}
+
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+ enum iwl_mcc_source src;
+ char mcc[3];
+ struct ieee80211_regdomain *regd;
+ int wgds_tbl_idx;
+ bool changed = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
+ IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
+ return;
+ }
+
+ if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+ return;
+
+ mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+ mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+ mcc[2] = '\0';
+ src = notif->source_id;
+
+ IWL_DEBUG_LAR(mvm,
+ "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+ mcc, src);
+ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return;
+
+ if (!changed) {
+ IWL_DEBUG_LAR(mvm, "RX: No change in the regulatory data\n");
+ goto out;
+ }
+
+ wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+ if (wgds_tbl_idx < 1)
+ IWL_DEBUG_INFO(mvm,
+ "SAR WGDS is disabled or error received (%d)\n",
+ wgds_tbl_idx);
+ else
+ IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n",
+ wgds_tbl_idx);
+
+ regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+out:
+ kfree(regd);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/offloading.c b/sys/contrib/dev/iwlwifi/mvm/offloading.c
new file mode 100644
index 000000000000..15d4369678a2
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/offloading.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
+ */
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/bitops.h>
+#include "mvm.h"
+
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+ struct iwl_wowlan_config_cmd_v6 *cmd)
+{
+ int i;
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 *before* using the value while we
+ * increment after using the value (i.e. store the next value to use).
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ u16 seq = mvm_ap_sta->tid_data[i].seq_number;
+ seq -= 0x10;
+ cmd->qos_seq[i] = cpu_to_le16(seq);
+ }
+}
+
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool disable_offloading,
+ bool offload_ns,
+ u32 cmd_flags,
+ u8 sta_id)
+{
+ union {
+ struct iwl_proto_offload_cmd_v1 v1;
+ struct iwl_proto_offload_cmd_v2 v2;
+ struct iwl_proto_offload_cmd_v3_small v3s;
+ struct iwl_proto_offload_cmd_v4 v4;
+ } cmd = {};
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .flags = cmd_flags,
+ .data[0] = &cmd,
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+ struct iwl_proto_offload_cmd_common *common;
+ u32 enabled = 0, size;
+ u32 capa_flags = mvm->fw->ucode_capa.flags;
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int i;
+ /*
+ * Skip tentative address when ns offload is enabled to avoid
+ * violating RFC4862.
+ * Keep tentative address when ns offload is disabled so the NS packets
+ * will not be filtered out and will wake up the host.
+ */
+ bool skip_tentative = offload_ns;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+ capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int c;
+ int num_skipped = 0;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ nsc = cmd.v3s.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+ addrs = cmd.v3s.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+ } else {
+ nsc = cmd.v4.ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd.v4.targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+ }
+
+ /*
+ * For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < mvmvif->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ struct in6_addr solicited_addr;
+ int j;
+
+ if (skip_tentative &&
+ test_bit(i, mvmvif->tentative_addrs)) {
+ num_skipped++;
+ continue;
+ }
+
+ addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (mvmvif->num_target_ipv6_addrs - num_skipped)
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+ cmd.v3s.num_valid_ipv6_addrs =
+ cpu_to_le32(i - num_skipped);
+ else
+ cmd.v4.num_valid_ipv6_addrs =
+ cpu_to_le32(i - num_skipped);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ bool found = false;
+
+ BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
+ if (skip_tentative &&
+ test_bit(i, mvmvif->tentative_addrs))
+ continue;
+
+ memcpy(cmd.v2.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v2.target_ipv6_addr[i]));
+
+ found = true;
+ }
+ if (found) {
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+ memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+ } else {
+ bool found = false;
+ BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
+ if (skip_tentative &&
+ test_bit(i, mvmvif->tentative_addrs))
+ continue;
+
+ memcpy(cmd.v1.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v1.target_ipv6_addr[i]));
+
+ found = true;
+ }
+
+ if (found) {
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+ memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+ }
+
+ if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID))
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+#endif
+ if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+ common = &cmd.v3s.common;
+ size = sizeof(cmd.v3s);
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+ common = &cmd.v4.common;
+ size = sizeof(cmd.v4);
+ if (ver < 4) {
+ /*
+ * This basically uses iwl_proto_offload_cmd_v3_large
+ * which doesn't have the sta_id parameter before the
+ * common part.
+ */
+ size -= sizeof(cmd.v4.sta_id);
+ hcmd.data[0] = common;
+ }
+ } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ common = &cmd.v2.common;
+ size = sizeof(cmd.v2);
+ } else {
+ common = &cmd.v1.common;
+ size = sizeof(cmd.v1);
+ }
+
+ if (vif->cfg.arp_addr_cnt) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+ common->host_ipv4_addr = vif->cfg.arp_addr_list[0];
+ memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT))
+ enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
+
+ if (!disable_offloading)
+ common->enabled = cpu_to_le32(enabled);
+
+ if (ver >= 4)
+ cmd.v4.sta_id = cpu_to_le32(sta_id);
+
+ hcmd.len[0] = size;
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/ops.c b/sys/contrib/dev/iwlwifi/mvm/ops.c
new file mode 100644
index 000000000000..912fb6677a0d
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/ops.c
@@ -0,0 +1,2221 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#if defined(__FreeBSD__)
+#define LINUXKPI_PARAM_PREFIX iwlwifi_mvm_
+#endif
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+#include <net/mac80211.h>
+
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/img.h"
+#include "iwl-debug.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-phy-db.h"
+#include "iwl-nvm-utils.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "rs.h"
+#include "fw/api/scan.h"
+#include "fw/api/rfi.h"
+#include "time-event.h"
+#include "fw-api.h"
+#include "fw/acpi.h"
+#include "fw/uefi.h"
+#include "time-sync.h"
+
+#if defined(__linux__)
+#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+#elif defined(__FreeBSD__)
+#define DRV_DESCRIPTION "The new Intel(R) wireless AGN/AC/AX based driver for FreeBSD"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("BSD");
+#endif
+MODULE_IMPORT_NS("IWLWIFI");
+
+static const struct iwl_op_mode_ops iwl_mvm_ops;
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
+
+struct iwl_mvm_mod_params iwlmvm_mod_params = {
+#if defined(__linux__)
+ .power_scheme = IWL_POWER_SCHEME_BPS,
+#elif defined(__FreeBSD__)
+ .power_scheme = IWL_POWER_SCHEME_CAM, /* disable default PS */
+#endif
+};
+
+module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
+MODULE_PARM_DESC(power_scheme,
+ "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+
+/*
+ * module init and exit functions
+ */
+static int __init iwl_mvm_init(void)
+{
+ int ret;
+
+ ret = iwl_mvm_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+ if (ret) {
+ pr_err("Unable to register MVM op_mode: %d\n", ret);
+ iwl_mvm_rate_control_unregister();
+ }
+
+ return ret;
+}
+#if defined(__linux__)
+module_init(iwl_mvm_init);
+#elif defined(__FreeBSD__)
+module_init_order(iwl_mvm_init, SI_ORDER_SECOND);
+#endif
+
+static void __exit iwl_mvm_exit(void)
+{
+ iwl_opmode_deregister("iwlmvm");
+ iwl_mvm_rate_control_unregister();
+}
+module_exit(iwl_mvm_exit);
+
+static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ u32 reg_val;
+ u32 phy_config = iwl_mvm_get_phy_config(mvm);
+
+ radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
+ FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
+ FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
+ FW_PHY_CFG_RADIO_DASH_POS;
+
+ IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+ radio_cfg_step, radio_cfg_dash);
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ /* SKU control */
+ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->info.hw_rev);
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+ ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+
+ /*
+ * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
+ * sampling, and shouldn't be set to any non-zero value.
+ * The same is supposed to be true of the other HW, but unsetting
+ * them (such as the 7260) causes automatic tests to fail on seemingly
+ * unrelated errors. Need to further investigate this, but for now
+ * we'll separate cases.
+ */
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+
+ if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
+ reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
+
+ iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
+ CSR_HW_IF_CONFIG_REG_D3_DEBUG,
+ reg_val);
+
+ /*
+ * W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted), causing ME FW
+ * to lose ownership and not being able to obtain it back.
+ */
+ if (!mvm->trans->mac_cfg->base->apmg_not_supported)
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_esr_mode_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+
+ /* FW recommendations is only for entering EMLSR */
+ if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
+ return;
+
+ if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
+ else
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
+ iwl_mvm_get_primary_link(vif));
+}
+
+static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_vif *vif;
+
+ if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
+ return;
+
+ /* FIXME: should fetch the link and not the vif */
+ vif = iwl_mvm_get_vif_by_macid(mvm, notif->link_id);
+ if (!vif || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
+ return;
+
+ if (!vif->cfg.assoc)
+ return;
+
+ /* this shouldn't happen *again*, ignore it */
+ if (mvm->cca_40mhz_workaround)
+ return;
+
+ /*
+ * We'll decrement this on disconnect - so set to 2 since we'll
+ * still have to disconnect from the current AP first.
+ */
+ mvm->cca_40mhz_workaround = 2;
+
+ /*
+ * This capability manipulation isn't really ideal, but it's the
+ * easiest choice - otherwise we'd have to do some major changes
+ * in mac80211 to support this, which isn't worth it. This does
+ * mean that userspace may have outdated information, but that's
+ * actually not an issue at all.
+ */
+ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ WARN_ON(!sband->ht_cap.ht_supported);
+ WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+ WARN_ON(!he->has_he);
+ WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
+ he->he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+
+ ieee80211_disconnect(vif, true);
+}
+
+void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (!link_conf)
+ return;
+
+ if (mvm->fw_static_smps_request &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->he_support)
+ mode = IEEE80211_SMPS_STATIC;
+
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode,
+ link_conf->link_id);
+}
+
+static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ rcu_read_lock();
+
+ for_each_vif_active_link(vif, link_conf, link_id)
+ iwl_mvm_update_link_smps(vif, link_conf);
+
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
+
+ /* firmware is expected to handle that in RLC offload mode */
+ if (IWL_FW_CHECK(mvm, iwl_mvm_has_rlc_offload(mvm),
+ "Got THERMAL_DUAL_CHAIN_REQUEST (0x%x) in RLC offload mode\n",
+ req->event))
+ return;
+
+ /*
+ * We could pass it to the iterator data, but also need to remember
+ * it for new interfaces that are added while in this state.
+ */
+ mvm->fw_static_smps_request =
+ req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
+ ieee80211_iterate_interfaces(mvm->hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ iwl_mvm_intf_dual_chain_req, NULL);
+}
+
+/**
+ * enum iwl_rx_handler_context: context for Rx handler
+ * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
+ * which can't acquire mvm->mutex.
+ * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
+ * (and only in this case!), it should be set as ASYNC. In that case,
+ * it will be called from a worker with mvm->mutex held.
+ * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
+ * mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
+ */
+enum iwl_rx_handler_context {
+ RX_HANDLER_SYNC,
+ RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+};
+
+/**
+ * struct iwl_rx_handlers: handler for FW notification
+ * @cmd_id: command id
+ * @min_size: minimum size to expect for the notification
+ * @context: see &iwl_rx_handler_context
+ * @fn: the function is called when notification is received
+ */
+struct iwl_rx_handlers {
+ u16 cmd_id, min_size;
+ enum iwl_rx_handler_context context;
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+};
+
+#define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \
+ { .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
+#define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
+#define RX_HANDLER(_cmd_id, _fn, _context, _struct) \
+ { .cmd_id = _cmd_id, .fn = _fn, \
+ .context = _context, .min_size = sizeof(_struct), }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \
+ .context = _context, .min_size = sizeof(_struct), }
+
+/*
+ * Handlers for fw notifications
+ * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
+ * This list should be in order of frequency for performance purposes.
+ *
+ * The handler can be one from three contexts, see &iwl_rx_handler_context
+ */
+static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
+ RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
+ struct iwl_tx_resp),
+ RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_ba_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
+ iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
+ struct iwl_tlc_update_notif),
+
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_prof_old_notif),
+ RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+ RX_HANDLER_ASYNC_LOCKED),
+
+ RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
+ iwl_mvm_handle_rx_system_oper_stats,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_system_statistics_notif_oper),
+ RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
+ iwl_mvm_handle_rx_system_oper_part1_stats,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_system_statistics_part1_notif_oper),
+ RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF,
+ iwl_mvm_handle_rx_system_end_stats_notif,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_system_statistics_end_notif),
+
+ RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
+ iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
+ struct iwl_ba_window_status_notif),
+
+ RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
+ RX_HANDLER_SYNC, struct iwl_time_event_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+ iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
+ struct iwl_session_prot_notif),
+ RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
+ RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
+
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_eosp_notification),
+
+ RX_HANDLER(SCAN_ITERATION_COMPLETE,
+ iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
+ struct iwl_lmac_scan_complete_notif),
+ RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+ iwl_mvm_rx_lmac_scan_complete_notif,
+ RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
+ RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
+ iwl_mvm_rx_scan_match_found,
+ RX_HANDLER_SYNC),
+ RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_complete),
+ RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+ iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
+ struct iwl_umac_scan_iter_complete_notif),
+
+ RX_HANDLER(MISSED_BEACONS_NOTIFICATION,
+ iwl_mvm_rx_missed_beacons_notif_legacy,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif_v4),
+
+ RX_HANDLER_GRP(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
+ iwl_mvm_rx_missed_beacons_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif),
+ RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
+ struct iwl_error_resp),
+ RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
+ struct iwl_uapsd_misbehaving_ap_notif),
+ RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+ iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
+ RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+ iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
+ struct ct_kill_notif),
+
+ RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_tdls_channel_switch_notif),
+ RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
+ RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
+ RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
+ iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_ftm_responder_stats),
+
+ RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+ iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
+ iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
+
+ RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
+ iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
+ struct iwl_mfu_assert_dump_notif),
+ RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+ iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
+ struct iwl_stored_beacon_notif_v2),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+ iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
+ struct iwl_mu_group_mgmt_notif),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
+ iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
+ struct iwl_mvm_pm_state_notification),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
+ iwl_mvm_probe_resp_data_notif,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_probe_resp_data_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
+ iwl_mvm_channel_switch_start_notif,
+ RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+ iwl_mvm_channel_switch_error_notif,
+ RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_channel_switch_error_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+ iwl_mvm_rx_esr_mode_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_esr_mode_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
+ iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_datapath_monitor_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
+ iwl_mvm_rx_thermal_dual_chain_req,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_thermal_dual_chain_request),
+
+ RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
+ iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
+ struct iwl_rfi_deactivate_notif),
+
+ RX_HANDLER_GRP(LEGACY_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
+ iwl_mvm_time_sync_msmt_event, RX_HANDLER_SYNC,
+ struct iwl_time_msmt_notify),
+ RX_HANDLER_GRP(LEGACY_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
+ iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
+ struct iwl_time_msmt_cfm_notify),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
+ iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_roc_notif),
+ RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_channel_survey_notif),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, BEACON_FILTER_IN_NOTIF,
+ iwl_mvm_rx_beacon_filter_notif,
+ RX_HANDLER_ASYNC_LOCKED,
+ /* same size as v1 */
+ struct iwl_beacon_filter_notif),
+};
+#undef RX_HANDLER
+#undef RX_HANDLER_GRP
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
+ HCMD_NAME(UCODE_ALIVE_NTFY),
+ HCMD_NAME(REPLY_ERROR),
+ HCMD_NAME(ECHO_CMD),
+ HCMD_NAME(INIT_COMPLETE_NOTIF),
+ HCMD_NAME(PHY_CONTEXT_CMD),
+ HCMD_NAME(DBG_CFG),
+ HCMD_NAME(SCAN_CFG_CMD),
+ HCMD_NAME(SCAN_REQ_UMAC),
+ HCMD_NAME(SCAN_ABORT_UMAC),
+ HCMD_NAME(SCAN_COMPLETE_UMAC),
+ HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
+ HCMD_NAME(ADD_STA_KEY),
+ HCMD_NAME(ADD_STA),
+ HCMD_NAME(REMOVE_STA),
+ HCMD_NAME(TX_CMD),
+ HCMD_NAME(SCD_QUEUE_CFG),
+ HCMD_NAME(TXPATH_FLUSH),
+ HCMD_NAME(MGMT_MCAST_KEY),
+ HCMD_NAME(WEP_KEY),
+ HCMD_NAME(SHARED_MEM_CFG),
+ HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
+ HCMD_NAME(MAC_CONTEXT_CMD),
+ HCMD_NAME(TIME_EVENT_CMD),
+ HCMD_NAME(TIME_EVENT_NOTIFICATION),
+ HCMD_NAME(BINDING_CONTEXT_CMD),
+ HCMD_NAME(TIME_QUOTA_CMD),
+ HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
+ HCMD_NAME(LEDS_CMD),
+ HCMD_NAME(LQ_CMD),
+ HCMD_NAME(FW_PAGING_BLOCK_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
+ HCMD_NAME(HOT_SPOT_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
+ HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
+ HCMD_NAME(BT_COEX_CI),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+ HCMD_NAME(PHY_CONFIGURATION_CMD),
+ HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
+ HCMD_NAME(PHY_DB_CMD),
+ HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
+ HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+ HCMD_NAME(POWER_TABLE_CMD),
+ HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+ HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
+ HCMD_NAME(NVM_ACCESS_CMD),
+ HCMD_NAME(BEACON_NOTIFICATION),
+ HCMD_NAME(BEACON_TEMPLATE_CMD),
+ HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(BT_CONFIG),
+ HCMD_NAME(STATISTICS_CMD),
+ HCMD_NAME(STATISTICS_NOTIFICATION),
+ HCMD_NAME(EOSP_NOTIFICATION),
+ HCMD_NAME(REDUCE_TX_POWER_CMD),
+ HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+ HCMD_NAME(TDLS_CONFIG_CMD),
+ HCMD_NAME(MAC_PM_POWER_TABLE),
+ HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
+ HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+ HCMD_NAME(RSS_CONFIG_CMD),
+ HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
+ HCMD_NAME(REPLY_RX_PHY_CMD),
+ HCMD_NAME(REPLY_RX_MPDU_CMD),
+ HCMD_NAME(BAR_FRAME_RELEASE),
+ HCMD_NAME(FRAME_RELEASE),
+ HCMD_NAME(BA_NOTIF),
+ HCMD_NAME(MCC_UPDATE_CMD),
+ HCMD_NAME(MCC_CHUB_UPDATE_CMD),
+ HCMD_NAME(MARKER_CMD),
+ HCMD_NAME(BT_PROFILE_NOTIFICATION),
+ HCMD_NAME(MCAST_FILTER_CMD),
+ HCMD_NAME(REPLY_SF_CFG_CMD),
+ HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
+ HCMD_NAME(D3_CONFIG_CMD),
+ HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
+ HCMD_NAME(MATCH_FOUND_NOTIFICATION),
+ HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_PATTERNS),
+ HCMD_NAME(WOWLAN_CONFIGURATION),
+ HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
+ HCMD_NAME(WOWLAN_TKIP_PARAM),
+ HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
+ HCMD_NAME(WOWLAN_GET_STATUSES),
+ HCMD_NAME(SCAN_ITERATION_COMPLETE),
+ HCMD_NAME(D0I3_END_CMD),
+ HCMD_NAME(LTR_CONFIG),
+ HCMD_NAME(LDBG_CONFIG_CMD),
+ HCMD_NAME(DEBUG_LOG_MSG),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+ HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(SOC_CONFIGURATION_CMD),
+ HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+ HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+ HCMD_NAME(RFI_CONFIG_CMD),
+ HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
+ HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
+ HCMD_NAME(RFI_DEACTIVATE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(LOW_LATENCY_CMD),
+ HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
+ HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD),
+ HCMD_NAME(MAC_CONFIG_CMD),
+ HCMD_NAME(LINK_CONFIG_CMD),
+ HCMD_NAME(STA_CONFIG_CMD),
+ HCMD_NAME(AUX_STA_CMD),
+ HCMD_NAME(STA_REMOVE_CMD),
+ HCMD_NAME(STA_DISABLE_TX_CMD),
+ HCMD_NAME(ROC_CMD),
+ HCMD_NAME(ROC_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+ HCMD_NAME(MISSED_VAP_NOTIF),
+ HCMD_NAME(SESSION_PROTECTION_NOTIF),
+ HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
+ HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ HCMD_NAME(CTDP_CONFIG_CMD),
+ HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+ HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD),
+ HCMD_NAME(CT_KILL_NOTIFICATION),
+ HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+ HCMD_NAME(DQA_ENABLE_CMD),
+ HCMD_NAME(UPDATE_MU_GROUPS_CMD),
+ HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ HCMD_NAME(STA_HE_CTXT_CMD),
+ HCMD_NAME(RLC_CONFIG_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+ HCMD_NAME(TLC_MNG_CONFIG_CMD),
+ HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+ HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(SEC_KEY_CMD),
+ HCMD_NAME(ESR_MODE_NOTIF),
+ HCMD_NAME(MONITOR_NOTIF),
+ HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
+ HCMD_NAME(BEACON_FILTER_IN_NOTIF),
+ HCMD_NAME(STA_PM_NOTIF),
+ HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+ HCMD_NAME(RX_QUEUES_NOTIFICATION),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
+ HCMD_NAME(STATISTICS_OPER_NOTIF),
+ HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(LMAC_RD_WR),
+ HCMD_NAME(UMAC_RD_WR),
+ HCMD_NAME(HOST_EVENT_CFG),
+ HCMD_NAME(DBGC_SUSPEND_RESUME),
+ HCMD_NAME(BUFFER_ALLOCATION),
+ HCMD_NAME(GET_TAS_STATUS),
+ HCMD_NAME(FW_DUMP_COMPLETE_CMD),
+ HCMD_NAME(FW_CLEAR_BUFFER),
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(CHANNEL_SURVEY_NOTIF),
+ HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
+ HCMD_NAME(TOF_RANGE_REQ_CMD),
+ HCMD_NAME(TOF_CONFIG_CMD),
+ HCMD_NAME(TOF_RANGE_ABORT_CMD),
+ HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
+ HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
+ HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
+ HCMD_NAME(TOF_LC_NOTIF),
+ HCMD_NAME(TOF_RESPONDER_STATS),
+ HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
+ HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+ HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
+ HCMD_NAME(D3_END_NOTIFICATION),
+ HCMD_NAME(STORED_BEACON_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
+ HCMD_NAME(NVM_ACCESS_COMPLETE),
+ HCMD_NAME(NVM_GET_INFO),
+ HCMD_NAME(TAS_CONFIG),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = {
+ HCMD_NAME(PROFILE_NOTIF),
+};
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_hcmd_arr iwl_mvm_groups[] = {
+ [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+ [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+ [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+ [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
+ [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
+ [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
+ [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
+ [BT_COEX_GROUP] = HCMD_ARR(iwl_mvm_bt_coex_names),
+ [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
+ [REGULATORY_AND_NVM_GROUP] =
+ HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
+ [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names),
+ [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups);
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_mvm_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups_size);
+#endif
+
+/* this forward declaration can avoid to export the function */
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
+
+static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
+{
+ const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
+ u64 dflt_pwr_limit;
+
+ if (!backoff)
+ return 0;
+
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
+
+ while (backoff->pwr) {
+ if (dflt_pwr_limit >= backoff->pwr)
+ return backoff->backoff;
+
+ backoff++;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
+{
+ struct iwl_mvm *mvm =
+ container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
+ struct ieee80211_vif *tx_blocked_vif;
+ struct iwl_mvm_vif *mvmvif;
+
+ guard(mvm)(mvm);
+
+ tx_blocked_vif =
+ rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex));
+
+ if (!tx_blocked_vif)
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+}
+
+static void iwl_mvm_fwrt_dump_start(void *ctx)
+{
+ struct iwl_mvm *mvm = ctx;
+
+ mutex_lock(&mvm->mutex);
+}
+
+static void iwl_mvm_fwrt_dump_end(void *ctx)
+{
+ struct iwl_mvm *mvm = ctx;
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+
+ guard(mvm)(mvm);
+ return iwl_mvm_send_cmd(mvm, host_cmd);
+}
+
+static bool iwl_mvm_d3_debug_enable(void *ctx)
+{
+ return IWL_MVM_D3_DEBUG;
+}
+
+static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+ .dump_start = iwl_mvm_fwrt_dump_start,
+ .dump_end = iwl_mvm_fwrt_dump_end,
+ .send_hcmd = iwl_mvm_fwrt_send_hcmd,
+ .d3_debug_enable = iwl_mvm_d3_debug_enable,
+};
+
+static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ int ret;
+
+ if (trans->csme_own) {
+ if (WARN(!mvm->mei_registered,
+ "csme is owner, but we aren't registered to iwlmei\n"))
+ goto get_nvm_from_fw;
+
+ mvm->mei_nvm_data = iwl_mei_get_nvm();
+ if (mvm->mei_nvm_data) {
+ /*
+ * mvm->mei_nvm_data is set and because of that,
+ * we'll load the NVM from the FW when we'll get
+ * ownership.
+ */
+ mvm->nvm_data =
+ iwl_parse_mei_nvm_data(trans, trans->cfg,
+ mvm->mei_nvm_data,
+ mvm->fw,
+ mvm->set_tx_ant,
+ mvm->set_rx_ant);
+ return 0;
+ }
+
+ IWL_ERR(mvm,
+ "Got a NULL NVM from CSME, trying to get it from the device\n");
+ }
+
+get_nvm_from_fw:
+ rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
+ mutex_lock(&mvm->mutex);
+
+ ret = iwl_trans_start_hw(mvm->trans);
+ if (ret) {
+ mutex_unlock(&mvm->mutex);
+ wiphy_unlock(mvm->hw->wiphy);
+ rtnl_unlock();
+ return ret;
+ }
+
+ ret = iwl_run_init_mvm_ucode(mvm);
+ if (ret && ret != -ERFKILL)
+ iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
+ if (!ret && iwl_mvm_is_lar_supported(mvm)) {
+ mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ ret = iwl_mvm_init_mcc(mvm);
+ }
+
+ iwl_mvm_stop_device(mvm);
+
+ mutex_unlock(&mvm->mutex);
+ wiphy_unlock(mvm->hw->wiphy);
+ rtnl_unlock();
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+ /* no longer need this regardless of failure or not */
+ mvm->fw_product_reset = false;
+
+ return ret;
+}
+
+static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
+ int ret;
+
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+
+ ret = iwl_mvm_mac_setup_register(mvm);
+ if (ret)
+ return ret;
+
+ mvm->hw_registered = true;
+
+ iwl_mvm_dbgfs_register(mvm);
+
+ wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
+ mvm->mei_rfkill_blocked,
+ RFKILL_HARD_BLOCK_NOT_OWNER);
+
+ iwl_mvm_mei_set_sw_rfkill_state(mvm);
+
+ return 0;
+}
+
+struct iwl_mvm_frob_txf_data {
+ u8 *buf;
+ size_t buflen;
+};
+
+static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm_frob_txf_data *txf = data;
+ u8 keylen, match, matchend;
+ u8 *keydata;
+ size_t i;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ keydata = key->key;
+ keylen = key->keylen;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /*
+ * WEP has short keys which might show up in the payload,
+ * and then you can deduce the key, so in this case just
+ * remove all FIFO data.
+ * For TKIP, we don't know the phase 2 keys here, so same.
+ */
+ memset(txf->buf, 0xBB, txf->buflen);
+ return;
+ default:
+ return;
+ }
+
+ /* scan for key material and clear it out */
+ match = 0;
+ for (i = 0; i < txf->buflen; i++) {
+ if (txf->buf[i] != keydata[match]) {
+ match = 0;
+ continue;
+ }
+ match++;
+ if (match == keylen) {
+ memset(txf->buf + i - keylen, 0xAA, keylen);
+ match = 0;
+ }
+ }
+
+ /* we're dealing with a FIFO, so check wrapped around data */
+ matchend = match;
+ for (i = 0; match && i < keylen - match; i++) {
+ if (txf->buf[i] != keydata[match])
+ break;
+ match++;
+ if (match == keylen) {
+ memset(txf->buf, 0xAA, i + 1);
+ memset(txf->buf + txf->buflen - matchend, 0xAA,
+ matchend);
+ break;
+ }
+ }
+}
+
+static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen)
+{
+ struct iwl_mvm_frob_txf_data txf = {
+ .buf = buf,
+ .buflen = buflen,
+ };
+ struct iwl_mvm *mvm = ctx;
+
+ /* embedded key material exists only on old API */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len)
+{
+ /* we only use wide headers for commands */
+ struct iwl_cmd_header_wide *hdr = hcmd;
+ unsigned int frob_start = sizeof(*hdr), frob_end = 0;
+
+ if (len < sizeof(hdr))
+ return;
+
+ /* all the commands we care about are in LONG_GROUP */
+ if (hdr->group_id != LONG_GROUP)
+ return;
+
+ switch (hdr->cmd) {
+ case WEP_KEY:
+ case WOWLAN_TKIP_PARAM:
+ case WOWLAN_KEK_KCK_MATERIAL:
+ case ADD_STA_KEY:
+ /*
+ * blank out everything here, easier than dealing
+ * with the various versions of the command
+ */
+ frob_end = INT_MAX;
+ break;
+ case MGMT_MCAST_KEY:
+ frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
+ BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) !=
+ offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
+
+ frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
+ BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) <
+ offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
+ break;
+ }
+
+ if (frob_start >= frob_end)
+ return;
+
+ if (frob_end > len)
+ frob_end = len;
+
+ memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start);
+}
+
+static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen)
+{
+ const struct iwl_dump_exclude *excl;
+ struct iwl_mvm *mvm = ctx;
+ int i;
+
+ switch (mvm->fwrt.cur_fw_img) {
+ case IWL_UCODE_INIT:
+ default:
+ /* not relevant */
+ return;
+ case IWL_UCODE_REGULAR:
+ case IWL_UCODE_REGULAR_USNIFFER:
+ excl = mvm->fw->dump_excl;
+ break;
+ case IWL_UCODE_WOWLAN:
+ excl = mvm->fw->dump_excl_wowlan;
+ break;
+ }
+
+ BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
+ sizeof(mvm->fw->dump_excl_wowlan));
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
+ u32 start, end;
+
+ if (!excl[i].addr || !excl[i].size)
+ continue;
+
+ start = excl[i].addr;
+ end = start + excl[i].size;
+
+ if (end <= mem_addr || start >= mem_addr + buflen)
+ continue;
+
+ if (start < mem_addr)
+ start = mem_addr;
+
+ if (end > mem_addr + buflen)
+ end = mem_addr + buflen;
+
+ memset((u8 *)mem + start - mem_addr, 0xAA, end - start);
+ }
+}
+
+static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
+ .frob_txf = iwl_mvm_frob_txf,
+ .frob_hcmd = iwl_mvm_frob_hcmd,
+ .frob_mem = iwl_mvm_frob_mem,
+};
+
+#if IS_ENABLED(CONFIG_IWLMEI)
+static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
+{
+ struct iwl_mvm *mvm = priv;
+ struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
+
+ /*
+ * This is protected by the guarantee that this function will not be
+ * called twice on two different threads
+ */
+ prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
+
+ curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
+ if (!curr_conn_info)
+ return;
+
+ curr_conn_info->conn_info = *conn_info;
+
+ rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
+
+ if (prev_conn_info)
+ kfree_rcu(prev_conn_info, rcu_head);
+}
+
+static void iwl_mvm_mei_rfkill(void *priv, bool blocked,
+ bool csme_taking_ownership)
+{
+ struct iwl_mvm *mvm = priv;
+
+ if (blocked && !csme_taking_ownership)
+ return;
+
+ mvm->mei_rfkill_blocked = blocked;
+ if (!mvm->hw_registered)
+ return;
+
+ wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
+ mvm->mei_rfkill_blocked,
+ RFKILL_HARD_BLOCK_NOT_OWNER);
+}
+
+static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
+{
+ struct iwl_mvm *mvm = priv;
+
+ if (!mvm->hw_registered || !mvm->csme_vif)
+ return;
+
+ iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
+}
+#endif
+
+static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, sap_connected_wk);
+ int ret;
+
+ ret = iwl_mvm_start_get_nvm(mvm);
+ if (ret)
+ goto out_free;
+
+ ret = iwl_mvm_start_post_nvm(mvm);
+ if (ret)
+ goto out_free;
+
+ return;
+
+out_free:
+ IWL_ERR(mvm, "Couldn't get started...\n");
+ iwl_mei_start_unregister();
+ iwl_mei_unregister_complete();
+ iwl_fw_flush_dumps(&mvm->fwrt);
+ iwl_mvm_thermal_exit(mvm);
+ iwl_fw_runtime_free(&mvm->fwrt);
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+ iwl_trans_op_mode_leave(mvm->trans);
+ kfree(mvm->nvm_data);
+ kfree(mvm->mei_nvm_data);
+
+ ieee80211_free_hw(mvm->hw);
+}
+
+#if IS_ENABLED(CONFIG_IWLMEI)
+static void iwl_mvm_mei_sap_connected(void *priv)
+{
+ struct iwl_mvm *mvm = priv;
+
+ if (!mvm->hw_registered)
+ schedule_work(&mvm->sap_connected_wk);
+}
+
+static void iwl_mvm_mei_nic_stolen(void *priv)
+{
+ struct iwl_mvm *mvm = priv;
+
+ rtnl_lock();
+ cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
+ rtnl_unlock();
+}
+
+static const struct iwl_mei_ops mei_ops = {
+ .me_conn_status = iwl_mvm_me_conn_status,
+ .rfkill = iwl_mvm_mei_rfkill,
+ .roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
+ .sap_connected = iwl_mvm_mei_sap_connected,
+ .nic_stolen = iwl_mvm_mei_nic_stolen,
+};
+#endif
+
+static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ iwl_mvm_select_links(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, trig_link_selection_wk);
+
+ mutex_lock(&mvm->mutex);
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_find_link_selection_vif,
+ NULL);
+ mutex_unlock(&mvm->mutex);
+}
+
+static struct iwl_op_mode *
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
+ const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+ struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
+ struct iwl_mvm *mvm;
+ static const u8 no_reclaim_cmds[] = {
+ TX_CMD,
+ };
+ u32 max_agg;
+ size_t scan_size;
+ u32 min_backoff;
+ struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
+ int ratecheck;
+ int err;
+
+ /*
+ * We use IWL_STATION_COUNT_MAX to check the validity of the station
+ * index all over the driver - check that its value corresponds to the
+ * array size.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
+ IWL_STATION_COUNT_MAX);
+
+ /********************************
+ * 1. Allocating and configuring HW data
+ ********************************/
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+ sizeof(struct iwl_mvm),
+ iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops :
+ &iwl_mvm_hw_ops);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ max_agg = 512;
+ else
+ max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
+
+ hw->max_rx_aggregation_subframes = max_agg;
+
+ op_mode = hw->priv;
+
+ mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ mvm->dev = trans->dev;
+ mvm->trans = trans;
+ mvm->cfg = cfg;
+ mvm->fw = fw;
+ mvm->hw = hw;
+
+ iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
+ &iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
+
+ iwl_mvm_get_bios_tables(mvm);
+ iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
+ iwl_uefi_get_step_table(trans);
+ iwl_bios_setup_step(trans, &mvm->fwrt);
+
+ mvm->init_status = 0;
+
+ /* start with v1 rates */
+ mvm->fw_rates_ver = 1;
+
+ /* check for rates version 2 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 8) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 3) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 6);
+ if (ratecheck != 0 && ratecheck != 4) {
+ IWL_ERR(mvm, "Firmware has inconsistent rates\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ if (ratecheck == 4)
+ mvm->fw_rates_ver = 2;
+
+ /* check for rates version 3 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 11) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 6) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 9);
+ if (ratecheck != 0 && ratecheck != 5) {
+ IWL_ERR(mvm, "Firmware has inconsistent rates\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ if (ratecheck == 5)
+ mvm->fw_rates_ver = 3;
+
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ op_mode->ops = &iwl_mvm_ops_mq;
+ trans->conf.rx_mpdu_cmd_hdr_size =
+ (trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
+ sizeof(struct iwl_rx_mpdu_desc) :
+ IWL_RX_DESC_SIZE_V1;
+ } else {
+ op_mode->ops = &iwl_mvm_ops;
+ trans->conf.rx_mpdu_cmd_hdr_size =
+ sizeof(struct iwl_rx_mpdu_res_start);
+
+ if (WARN_ON(trans->info.num_rxqs > 1)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ /*
+ * If we have the new TX/queue allocation API initialize them
+ * all to invalid numbers. We'll rewrite the ones that we need
+ * later, but that doesn't happen for all of them all of the
+ * time (e.g. P2P Device is optional), and if a dynamic queue
+ * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
+ * iwl_mvm_is_static_queue() erroneously returns true, and we
+ * might have things getting stuck.
+ */
+ mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
+ mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
+ mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
+ mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
+ } else {
+ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+ mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
+ mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ }
+
+ mvm->sf_state = SF_UNINIT;
+ if (iwl_mvm_has_unified_ucode(mvm))
+ iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
+ else
+ iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
+ mvm->drop_bcn_ap_mode = true;
+
+ mutex_init(&mvm->mutex);
+ spin_lock_init(&mvm->async_handlers_lock);
+ INIT_LIST_HEAD(&mvm->time_event_list);
+ INIT_LIST_HEAD(&mvm->aux_roc_te_list);
+ INIT_LIST_HEAD(&mvm->async_handlers_list);
+ spin_lock_init(&mvm->time_event_lock);
+ INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
+ INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
+ INIT_LIST_HEAD(&mvm->resp_pasn_list);
+
+ INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
+ INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+ INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
+ INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+ INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
+ INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+ INIT_LIST_HEAD(&mvm->add_stream_txqs);
+ spin_lock_init(&mvm->add_stream_lock);
+
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
+
+ wiphy_work_init(&mvm->trig_link_selection_wk,
+ iwl_mvm_trig_link_selection);
+
+ init_waitqueue_head(&mvm->rx_sync_waitq);
+
+ mvm->queue_sync_state = 0;
+
+ SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+
+ spin_lock_init(&mvm->tcm.lock);
+ INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ mvm->tcm.uapsd_nonagg_ts = jiffies;
+
+ INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+
+ mvm->cmd_ver.range_resp =
+ iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_RESPONSE_NOTIF, 5);
+ /* we only support up to version 9 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ /*
+ * Populate the state variables that the transport layer needs
+ * to know about.
+ */
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+ trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size();
+
+ trans->conf.wide_cmd_header = true;
+
+ trans->conf.command_groups = iwl_mvm_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+
+ trans->conf.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ trans->conf.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+ trans->conf.scd_set_active = true;
+
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
+
+ snprintf(mvm->hw->wiphy->fw_version,
+ sizeof(mvm->hw->wiphy->fw_version),
+ "%.31s", fw->fw_version);
+
+ trans->conf.fw_reset_handshake =
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+
+ trans->conf.queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0);
+ mvm->sta_remove_requires_queue_remove =
+ trans->conf.queue_alloc_cmd_ver > 0;
+
+ mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw);
+
+ /* Configure transport layer */
+ iwl_trans_op_mode_enter(mvm->trans, op_mode);
+
+ trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
+ trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
+
+ /* set up notification wait support */
+ iwl_notification_wait_init(&mvm->notif_wait);
+
+ /* Init phy db */
+ mvm->phy_db = iwl_phy_db_init(trans);
+ if (!mvm->phy_db) {
+ IWL_ERR(mvm, "Cannot init phy_db\n");
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (iwlwifi_mod_params.nvm_file)
+ mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+ else
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "working without external nvm file\n");
+
+ scan_size = iwl_mvm_scan_size(mvm);
+
+ mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
+ if (!mvm->scan_cmd) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ mvm->scan_cmd_size = scan_size;
+
+ /* invalidate ids to prevent accidental removal of sta_id 0 */
+ mvm->aux_sta.sta_id = IWL_INVALID_STA;
+ mvm->snif_sta.sta_id = IWL_INVALID_STA;
+
+ /* Set EBS as successful as long as not stated otherwise by the FW. */
+ mvm->last_ebs_successful = true;
+
+ min_backoff = iwl_mvm_min_backoff(mvm);
+ iwl_mvm_thermal_initialize(mvm, min_backoff);
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm))
+ memset(&mvm->rx_stats_v3, 0,
+ sizeof(struct mvm_statistics_rx_v3));
+ else
+ memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+
+ iwl_mvm_ftm_initiator_smooth_config(mvm);
+
+ iwl_mvm_init_time_sync(&mvm->time_sync);
+
+ mvm->debugfs_dir = dbgfs_dir;
+
+#if IS_ENABLED(CONFIG_IWLMEI)
+ mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
+#else
+ mvm->mei_registered = false;
+#endif
+
+ iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter);
+
+ err = iwl_mvm_start_get_nvm(mvm);
+ if (err) {
+ /*
+ * Getting NVM failed while CSME is the owner, but we are
+ * registered to MEI, we'll get the NVM later when it'll be
+ * possible to get it from CSME.
+ */
+ if (trans->csme_own && mvm->mei_registered)
+ return op_mode;
+
+ goto out_thermal_exit;
+ }
+
+
+ err = iwl_mvm_start_post_nvm(mvm);
+ if (err)
+ goto out_thermal_exit;
+
+ return op_mode;
+
+ out_thermal_exit:
+ iwl_mvm_thermal_exit(mvm);
+ if (mvm->mei_registered) {
+ iwl_mei_start_unregister();
+ iwl_mei_unregister_complete();
+ }
+ out_free:
+ iwl_fw_flush_dumps(&mvm->fwrt);
+ iwl_fw_runtime_free(&mvm->fwrt);
+
+ iwl_phy_db_free(mvm->phy_db);
+ kfree(mvm->scan_cmd);
+ iwl_trans_op_mode_leave(trans);
+
+ ieee80211_free_hw(mvm->hw);
+ return ERR_PTR(err);
+}
+
+void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_fw_cancel_timestamp(&mvm->fwrt);
+
+ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+ iwl_mvm_pause_tcm(mvm, false);
+
+ iwl_fw_dbg_stop_sync(&mvm->fwrt);
+ iwl_trans_stop_device(mvm->trans);
+ iwl_free_fw_paging(&mvm->fwrt);
+ iwl_fw_dump_conf_clear(&mvm->fwrt);
+ iwl_mvm_mei_device_state(mvm, false);
+}
+
+static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ int i;
+
+ if (mvm->mei_registered) {
+ rtnl_lock();
+ iwl_mei_set_netdev(NULL);
+ rtnl_unlock();
+ iwl_mei_start_unregister();
+ }
+
+ /*
+ * After we unregister from mei, the worker can't be scheduled
+ * anymore.
+ */
+ cancel_work_sync(&mvm->sap_connected_wk);
+
+ iwl_mvm_leds_exit(mvm);
+
+ iwl_mvm_thermal_exit(mvm);
+
+ /*
+ * If we couldn't get ownership on the device and we couldn't
+ * get the NVM from CSME, we haven't registered to mac80211.
+ * In that case, we didn't fail op_mode_start, because we are
+ * waiting for CSME to allow us to get the NVM to register to
+ * mac80211. If that didn't happen, we haven't registered to
+ * mac80211, hence the if below.
+ */
+ if (mvm->hw_registered)
+ ieee80211_unregister_hw(mvm->hw);
+
+ kfree(mvm->scan_cmd);
+ kfree(mvm->mcast_filter_cmd);
+ mvm->mcast_filter_cmd = NULL;
+
+ kfree(mvm->error_recovery_buf);
+ mvm->error_recovery_buf = NULL;
+
+ iwl_mvm_ptp_remove(mvm);
+
+ iwl_trans_op_mode_leave(mvm->trans);
+
+ iwl_phy_db_free(mvm->phy_db);
+ mvm->phy_db = NULL;
+
+ kfree(mvm->nvm_data);
+ kfree(mvm->mei_nvm_data);
+ kfree(rcu_access_pointer(mvm->csme_conn_info));
+ kfree(mvm->temp_nvm_data);
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
+ kfree(mvm->nvm_sections[i].data);
+ kfree(mvm->acs_survey);
+
+ cancel_delayed_work_sync(&mvm->tcm.work);
+
+ iwl_fw_runtime_free(&mvm->fwrt);
+ mutex_destroy(&mvm->mutex);
+
+ if (mvm->mei_registered)
+ iwl_mei_unregister_complete();
+
+ ieee80211_free_hw(mvm->hw);
+}
+
+struct iwl_async_handler_entry {
+ struct list_head list;
+ struct iwl_rx_cmd_buffer rxb;
+ enum iwl_rx_handler_context context;
+ void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+};
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mvm->async_handlers_lock);
+}
+
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+ LIST_HEAD(local_list);
+
+ /*
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
+ */
+ spin_lock_bh(&mvm->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
+ spin_unlock_bh(&mvm->async_handlers_lock);
+
+ list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
+ mutex_lock(&mvm->mutex);
+ entry->fn(mvm, &entry->rxb);
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
+ mutex_unlock(&mvm->mutex);
+ kfree(entry);
+ }
+}
+
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_cmd *cmds_trig;
+ int i;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+ FW_DBG_TRIGGER_FW_NOTIF);
+ if (!trig)
+ return;
+
+ cmds_trig = (void *)trig->data;
+
+ for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
+ /* don't collect on CMD 0 */
+ if (!cmds_trig->cmds[i].cmd_id)
+ break;
+
+ if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+ cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
+ continue;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "CMD 0x%02x.%02x received",
+ pkt->hdr.group_id, pkt->hdr.cmd);
+ break;
+ }
+}
+
+static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_rx_packet *pkt)
+{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ int i;
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
+ iwl_mvm_rx_check_trigger(mvm, pkt);
+
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+ const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
+ struct iwl_async_handler_entry *entry;
+
+ if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
+ continue;
+
+ if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size,
+ "unexpected notification 0x%04x size %d, need %d\n",
+ rx_h->cmd_id, pkt_len, rx_h->min_size))
+ return;
+
+ if (rx_h->context == RX_HANDLER_SYNC) {
+ rx_h->fn(mvm, rxb);
+ return;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ /* we can't do much... */
+ if (!entry)
+ return;
+
+ entry->rxb._page = rxb_steal_page(rxb);
+ entry->rxb._offset = rxb->_offset;
+ entry->rxb._rx_page_order = rxb->_rx_page_order;
+ entry->fn = rx_h->fn;
+ entry->context = rx_h->context;
+ spin_lock(&mvm->async_handlers_lock);
+ list_add_tail(&entry->list, &mvm->async_handlers_list);
+ spin_unlock(&mvm->async_handlers_lock);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
+ break;
+ }
+}
+
+static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+ else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
+ iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+ else
+ iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
+ else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+ else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
+ iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0);
+ else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
+ iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
+ else
+ iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
+{
+ return queue == mvm->aux_queue || queue == mvm->probe_queue ||
+ queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
+}
+
+static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ int hw_queue, bool start)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_sta *sta;
+ struct ieee80211_txq *txq;
+ struct iwl_mvm_txq *mvmtxq;
+ int i;
+ unsigned long tid_bitmap;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+
+ sta_id = iwl_mvm_has_new_tx_api(mvm) ?
+ mvm->tvqm_info[hw_queue].sta_id :
+ mvm->queue_info[hw_queue].ra_sta_id;
+
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
+ return;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ goto out;
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
+ if (!start)
+ ieee80211_stop_queues(mvm->hw);
+ else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ ieee80211_wake_queues(mvm->hw);
+
+ goto out;
+ }
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int tid = mvm->tvqm_info[hw_queue].txq_tid;
+
+ tid_bitmap = BIT(tid);
+ } else {
+ tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
+ }
+
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int tid = i;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IEEE80211_NUM_TIDS;
+
+ txq = sta->txq[tid];
+ mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ if (start)
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+ else
+ set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+
+ if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) {
+ local_bh_disable();
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ local_bh_enable();
+ }
+ }
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mvm_queue_state_change(op_mode, hw_queue, false);
+}
+
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mvm_queue_state_change(op_mode, hw_queue, true);
+}
+
+static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
+{
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
+}
+
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
+{
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+
+ iwl_mvm_set_rfkill_state(mvm);
+}
+
+struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
+{
+ return rcu_dereference_protected(mvm->csme_conn_info,
+ lockdep_is_held(&mvm->mutex));
+}
+
+static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
+ bool unified = iwl_mvm_has_unified_ucode(mvm);
+
+ if (state)
+ set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ else
+ clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+
+ iwl_mvm_set_rfkill_state(mvm);
+
+ /* iwl_run_init_mvm_ucode is waiting for results, abort it. */
+ if (rfkill_safe_init_done)
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * Don't ask the transport to stop the firmware. We'll do it
+ * after cfg80211 takes us down.
+ */
+ if (unified)
+ return false;
+
+ /*
+ * Stop the device if we run OPERATIONAL firmware or if we are in the
+ * middle of the calibrations.
+ */
+ return state && rfkill_safe_init_done;
+}
+
+static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ ieee80211_free_txskb(mvm->hw, skb);
+}
+
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_abort_notification_waits(&mvm->notif_wait);
+ iwl_dbg_tlv_del_timers(mvm->trans);
+
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
+ IWL_ERR(mvm, "Command queue full!\n");
+ else if (!iwl_trans_is_dead(mvm->trans) &&
+ !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
+ &mvm->status))
+ iwl_mvm_dump_nic_error_log(mvm);
+
+ /*
+ * This should be first thing before trying to collect any
+ * data to avoid endless loops if any HW error happens while
+ * collecting debug data.
+ * It might not actually be true that we'll restart, but the
+ * setting of the bit doesn't matter if we're going to be
+ * unbound either.
+ */
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+}
+
+static void iwl_mvm_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /* if we come in from opmode we have the mutex held */
+ if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
+ lockdep_assert_held(&mvm->mutex);
+ iwl_fw_error_collect(&mvm->fwrt);
+ } else {
+ mutex_lock(&mvm->mutex);
+ if (mode->context != IWL_ERR_CONTEXT_ABORT)
+ iwl_fw_error_collect(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+ }
+}
+
+static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ /*
+ * If the firmware crashes while we're already considering it
+ * to be dead then don't ask for a restart, that cannot do
+ * anything useful anyway.
+ */
+ if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
+ return false;
+
+ /*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted scan when that was already done which
+ * is not a problem. It is necessary to abort any os scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211. If restart isn't called
+ * (no fw restart) scan status will stay busy.
+ */
+ iwl_mvm_report_scan_aborted(mvm);
+
+ /*
+ * If INIT fw asserted, it will likely fail again.
+ * If WoWLAN fw asserted, don't restart either, mac80211
+ * can't recover this since we're already half suspended.
+ */
+ if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) {
+ if (mvm->fw->ucode_capa.error_log_size) {
+ u32 src_size = mvm->fw->ucode_capa.error_log_size;
+ u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
+ u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);
+
+ if (recover_buf) {
+ mvm->error_recovery_buf = recover_buf;
+ iwl_trans_read_mem_bytes(mvm->trans,
+ src_addr,
+ recover_buf,
+ src_size);
+ }
+ }
+
+ if (mvm->fwrt.trans->dbg.restart_required) {
+ IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
+ mvm->fwrt.trans->dbg.restart_required = false;
+ ieee80211_restart_hw(mvm->hw);
+ return true;
+ } else if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
+ ieee80211_restart_hw(mvm->hw);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
+ enum iwl_fw_ini_time_point tp_id,
+ union iwl_dbg_tlv_tp_data *tp_data)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
+}
+
+static void iwl_mvm_dump(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct iwl_fw_runtime *fwrt = &mvm->fwrt;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ iwl_mvm_stop_device(mvm);
+ mvm->fast_resume = false;
+ mutex_unlock(&mvm->mutex);
+}
+#else
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
+
+#define IWL_MVM_COMMON_OPS \
+ /* these could be differentiated */ \
+ .queue_full = iwl_mvm_stop_sw_queue, \
+ .queue_not_full = iwl_mvm_wake_sw_queue, \
+ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
+ .free_skb = iwl_mvm_free_skb, \
+ .nic_error = iwl_mvm_nic_error, \
+ .dump_error = iwl_mvm_dump_error, \
+ .sw_reset = iwl_mvm_sw_reset, \
+ .nic_config = iwl_mvm_nic_config, \
+ /* as we only register one, these MUST be common! */ \
+ .start = iwl_op_mode_mvm_start, \
+ .stop = iwl_op_mode_mvm_stop, \
+ .time_point = iwl_op_mode_mvm_time_point, \
+ .device_powered_off = iwl_op_mode_mvm_device_powered_off
+
+static const struct iwl_op_mode_ops iwl_mvm_ops = {
+ IWL_MVM_COMMON_OPS,
+ .rx = iwl_mvm_rx,
+};
+
+static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
+ struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb,
+ unsigned int queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (unlikely(queue >= mvm->trans->info.num_rxqs))
+ return;
+
+ if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
+ else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
+ else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
+ IWL_MVM_COMMON_OPS,
+ .rx = iwl_mvm_rx_mq,
+ .rx_rss = iwl_mvm_rx_mq_rss,
+ .dump = iwl_mvm_dump,
+};
diff --git a/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c b/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c
new file mode 100644
index 000000000000..5e7e2926be0c
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+/* Maps the driver specific channel width definition to the fw values */
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IWL_PHY_CHANNEL_MODE20;
+ case NL80211_CHAN_WIDTH_40:
+ return IWL_PHY_CHANNEL_MODE40;
+ case NL80211_CHAN_WIDTH_80:
+ return IWL_PHY_CHANNEL_MODE80;
+ case NL80211_CHAN_WIDTH_160:
+ return IWL_PHY_CHANNEL_MODE160;
+ case NL80211_CHAN_WIDTH_320:
+ return IWL_PHY_CHANNEL_MODE320;
+ default:
+ WARN(1, "Invalid channel width=%u", chandef->width);
+ return IWL_PHY_CHANNEL_MODE20;
+ }
+}
+
+/*
+ * Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the fw values
+ */
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
+{
+ int offs = chandef->chan->center_freq - chandef->center_freq1;
+ int abs_offs = abs(offs);
+ u8 ret;
+
+ if (offs == 0) {
+ /*
+ * The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one.
+ */
+ return 0;
+ }
+
+ /* this results in a value 0-7, i.e. fitting into 0b0111 */
+ ret = (abs_offs - 10) / 20;
+ /*
+ * But we need the value to be in 0b1011 because 0b0100 is
+ * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
+ * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
+ */
+ ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
+ ((ret & BIT(2)) << 1);
+ /* and add the above bit */
+ ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
+
+ return ret;
+}
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+ struct iwl_phy_context_cmd *cmd,
+ u32 action)
+{
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd->action = cpu_to_le32(action);
+}
+
+static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ __le32 *rxchain_info,
+ u8 chains_static,
+ u8 chains_dynamic)
+{
+ u8 active_cnt, idle_cnt;
+
+ /* Set rx the chains */
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ /* In scenarios where we only ever use a single-stream rates,
+ * i.e. legacy 11b/g/a associations, single-stream APs or even
+ * static SMPS, enable both chains to get diversity, improving
+ * the case where we're far enough from the AP that attenuation
+ * between the two antennas is sufficiently different to impact
+ * performance.
+ */
+ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
+ idle_cnt = 2;
+ active_cnt = 2;
+ }
+
+ *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ PHY_RX_CHAIN_VALID_POS);
+ *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ *rxchain_info |= cpu_to_le32(active_cnt <<
+ PHY_RX_CHAIN_MIMO_CNT_POS);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (unlikely(mvm->dbgfs_rx_phyinfo))
+ *rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+#endif
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ struct iwl_phy_context_cmd_v1 *cmd,
+ const struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct iwl_phy_context_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
+ chains_static, chains_dynamic);
+
+ tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ struct iwl_phy_context_cmd *cmd,
+ const struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
+ chandef->chan->band));
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+
+ /* we only support RLC command version 2 */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2)
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
+ chains_static, chains_dynamic);
+}
+
+int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct iwl_rlc_config_cmd cmd = {
+ .phy_id = cpu_to_le32(ctxt->id),
+ };
+
+ /* From version 3, RLC is offloaded to firmware, so the driver no
+ * longer needs to send cmd.rlc, note that we are not using any
+ * other fields in the command - don't send it.
+ */
+ if (iwl_mvm_has_rlc_offload(mvm) || ctxt->rlc_disabled)
+ return 0;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP,
+ RLC_CONFIG_CMD), 0) < 2)
+ return 0;
+
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE !=
+ PHY_RX_CHAIN_DRIVER_FORCE_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_VALID !=
+ PHY_RX_CHAIN_VALID_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE !=
+ PHY_RX_CHAIN_FORCE_SEL_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE_MIMO !=
+ PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_COUNT != PHY_RX_CHAIN_CNT_MSK);
+ BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_MIMO_COUNT !=
+ PHY_RX_CHAIN_MIMO_CNT_MSK);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info,
+ chains_static, chains_dynamic);
+
+ IWL_DEBUG_FW(mvm, "Send RLC command: phy=%d, rx_chain_info=0x%x\n",
+ ctxt->id, cmd.rlc.rx_chain_info);
+
+ return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD,
+ DATA_PATH_GROUP, 2),
+ 0, sizeof(cmd), &cmd);
+}
+
+/*
+ * Send a command to apply the current phy configuration. The command is send
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
+ u8 chains_static, u8 chains_dynamic,
+ u32 action)
+{
+ int ret;
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
+
+ if (ver < 5 || !ap || !ap->chan)
+ ap = NULL;
+
+ if (ver >= 3 && ver <= 6) {
+ struct iwl_phy_context_cmd cmd = {};
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
+ chains_static,
+ chains_dynamic);
+
+ if (ap) {
+ cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap);
+ cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap);
+ }
+
+ if (ver == 6)
+ cmd.puncture_mask = cpu_to_le16(chandef->punctured);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+ 0, sizeof(cmd), &cmd);
+ } else if (ver < 3) {
+ struct iwl_phy_context_cmd_v1 cmd = {};
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt,
+ (struct iwl_phy_context_cmd *)&cmd,
+ action);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
+ chains_static,
+ chains_dynamic);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+ 0, len, &cmd);
+ } else {
+ IWL_ERR(mvm, "PHY ctxt cmd error ver %d not supported\n", ver);
+ return -EOPNOTSUPP;
+ }
+
+
+ if (ret) {
+ IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+ return ret;
+ }
+
+ if (action != FW_CTXT_ACTION_REMOVE)
+ return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static,
+ chains_dynamic);
+
+ return 0;
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
+ u8 chains_static, u8 chains_dynamic)
+{
+ int ret;
+
+ WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ ctxt->ref);
+ lockdep_assert_held(&mvm->mutex);
+
+ ctxt->channel = chandef->chan;
+ ctxt->width = chandef->width;
+ ctxt->center_freq1 = chandef->center_freq1;
+
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD);
+
+ if (ret)
+ return ret;
+
+ ctxt->ref++;
+
+ return 0;
+}
+
+/*
+ * Update the number of references to the given PHY context. This is valid only
+ * in case the PHY context was already created, i.e., its reference count > 0.
+ */
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* If we were taking the first ref, we should have
+ * called iwl_mvm_phy_ctxt_add.
+ */
+ WARN_ON(!ctxt->ref);
+ ctxt->ref++;
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
+ u8 chains_static, u8 chains_dynamic)
+{
+ enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!ctxt->ref))
+ return -EINVAL;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP,
+ RLC_CONFIG_CMD), 0) >= 2 &&
+ ctxt->channel == chandef->chan &&
+ ctxt->width == chandef->width &&
+ ctxt->center_freq1 == chandef->center_freq1)
+ return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static,
+ chains_dynamic);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ ctxt->channel->band != chandef->chan->band) {
+ int ret;
+
+ /* ... remove it here ...*/
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_REMOVE);
+ if (ret)
+ return ret;
+
+ /* ... and proceed to add it again */
+ action = FW_CTXT_ACTION_ADD;
+ }
+
+ ctxt->channel = chandef->chan;
+ ctxt->width = chandef->width;
+ ctxt->center_freq1 = chandef->center_freq1;
+
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
+ chains_static, chains_dynamic,
+ action);
+}
+
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+ struct cfg80211_chan_def chandef;
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(!ctxt))
+ return;
+
+ ctxt->ref--;
+
+ if (ctxt->ref)
+ return;
+
+ cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT);
+
+ iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1,
+ FW_CTXT_ACTION_REMOVE);
+}
+
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ unsigned long *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!mvmvif->deflink.phy_ctxt)
+ return;
+
+ if (vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_AP)
+ __set_bit(mvmvif->deflink.phy_ctxt->id, data);
+}
+
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
+{
+ unsigned long phy_ctxt_counter = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_binding_iterator,
+ &phy_ctxt_counter);
+
+ return hweight8(phy_ctxt_counter);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/power.c b/sys/contrib/dev/iwlwifi/mvm/power.c
new file mode 100644
index 000000000000..610de29b7be0
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/power.c
@@ -0,0 +1,991 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw/api/power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ u16 len;
+
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ le32_to_cpu(cmd->ba_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ le32_to_cpu(cmd->bf_debug_flag));
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ le32_to_cpu(cmd->bf_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
+ IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_low is: %d, %d\n",
+ le32_to_cpu(cmd->bf_threshold_absolute_low[0]),
+ le32_to_cpu(cmd->bf_threshold_absolute_low[1]));
+
+ IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_high is: %d, %d\n",
+ le32_to_cpu(cmd->bf_threshold_absolute_high[0]),
+ le32_to_cpu(cmd->bf_threshold_absolute_high[1]));
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BEACON_FILTER_V4))
+ len = sizeof(struct iwl_beacon_filter_cmd);
+ else
+ len = offsetof(struct iwl_beacon_filter_cmd,
+ bf_threshold_absolute_low);
+
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0,
+ len, cmd);
+}
+
+static
+void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->bss_conf.cqm_rssi_thold) {
+ cmd->bf_energy_delta =
+ cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+ /* fw uses an absolute value for this */
+ cmd->bf_roaming_state =
+ cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+ }
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->ba_enabled);
+}
+
+static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+ struct iwl_mac_power_cmd *cmd)
+{
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd->id_and_color, iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd->flags));
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+ le16_to_cpu(cmd->keep_alive_seconds));
+
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+ IWL_DEBUG_POWER(mvm, "Disable power management\n");
+ return;
+ }
+
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ cmd->skip_dtim_periods);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ cmd->lprx_rssi_threshold);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+ IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+ IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+ IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
+ }
+}
+
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
+ cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+ if (mvmvif->dbgfs_pm.use_ps_poll) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ return;
+ }
+#endif
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->deflink.queue_params[ac].uapsd)
+ continue;
+
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->deflink.queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window =
+ test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+ } else {
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ }
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+}
+
+struct iwl_allow_uapsd_iface_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool allow_uapsd;
+};
+
+static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_allow_uapsd_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ data->allow_uapsd = false;
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* allow UAPSD if P2P interface and BSS station interface share
+ * the same channel.
+ */
+ if (vif->cfg.assoc && other_mvmvif->deflink.phy_ctxt &&
+ curr_mvmvif->deflink.phy_ctxt &&
+ other_mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
+ data->allow_uapsd = false;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_allow_uapsd_iface_iterator_data data = {
+ .current_vif = vif,
+ .allow_uapsd = true,
+ };
+
+ if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr,
+ vif->cfg.ap_addr))
+ return false;
+
+ /*
+ * Avoid using uAPSD if P2P client is associated to GO that uses
+ * opportunistic power save. This is due to current FW limitation.
+ */
+ if (vif->p2p &&
+ (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return false;
+
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_allow_uapsd_iterator,
+ &data);
+
+ return data.allow_uapsd;
+}
+
+static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+
+ /* this happens on link switching, just ignore inactive ones */
+ if (!chanctx_conf)
+ return false;
+
+ return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
+}
+
+static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
+{
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int min_link_skip = ~0;
+ unsigned int link_id;
+
+ /* disable, in case we're supposed to override */
+ cmd->skip_dtim_periods = 0;
+ cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ return;
+ cmd->skip_dtim_periods = 2;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ return;
+ }
+
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ unsigned int dtimper = link_conf->dtim_period ?: 1;
+ unsigned int dtimper_tu = dtimper * link_conf->beacon_int;
+ unsigned int skip;
+
+ if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (WARN_ON(!dtimper_tu))
+ continue;
+
+ /* configure skip over dtim up to 900 TU DTIM interval */
+ skip = max_t(int, 1, 900 / dtimper_tu);
+ min_link_skip = min(min_link_skip, skip);
+ }
+ rcu_read_unlock();
+
+ /* no WARN_ON, can only happen with WARN_ON above */
+ if (min_link_skip == ~0)
+ return;
+
+ cmd->skip_dtim_periods = min_link_skip;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
+{
+ int dtimper, bi;
+ int keep_alive;
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ dtimper = vif->bss_conf.dtim_period;
+ bi = vif->bss_conf.beacon_int;
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
+ */
+ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+ USEC_PER_SEC);
+ keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+ if (mvm->ps_disabled)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ if (!vif->cfg.ps || !mvmvif->pm_enabled)
+ return;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK);
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+ (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
+ !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ if (vif->bss_conf.beacon_rate &&
+ (vif->bss_conf.beacon_rate->bitrate == 10 ||
+ vif->bss_conf.beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+ }
+
+ iwl_mvm_power_config_skip_dtim(mvm, vif, cmd);
+
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
+ }
+
+ if (iwl_mvm_power_allow_uapsd(mvm, vif))
+ iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+ cmd->keep_alive_seconds =
+ cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+ if (mvmvif->dbgfs_pm.skip_over_dtim)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+ cmd->rx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+ cmd->tx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+ cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+ if (mvmvif->dbgfs_pm.lprx_ena)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ else
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+ cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+ if (mvmvif->dbgfs_pm.snooze_ena)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+ u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+ if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+ cmd->flags |= cpu_to_le16(flag);
+ else
+ cmd->flags &= cpu_to_le16(flag);
+ }
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+}
+
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mac_power_cmd cmd = {};
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ iwl_mvm_power_log(mvm, &cmd);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
+#endif
+
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
+ sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+ struct iwl_device_power_cmd cmd = {
+ .flags = 0,
+ };
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ mvm->ps_disabled = true;
+
+ if (!mvm->ps_disabled)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ?
+ mvm->disable_power_off_d3 : mvm->disable_power_off)
+ cmd.flags &=
+ cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ if (mvm->ext_clock_valid)
+ cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK);
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, POWER_TABLE_CMD, 0) >= 7 &&
+ test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
+ cmd.flags |=
+ cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK);
+
+ IWL_DEBUG_POWER(mvm,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
+ &cmd);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr,
+ vif->cfg.ap_addr))
+ eth_zero_addr(mvmvif->uapsd_misbehaving_ap_addr);
+}
+
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u8 *ap_sta_id = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+
+ /* The ap_sta_id is not expected to change during current
+ * association so no explicit protection is needed
+ */
+ if (link_info->ap_sta_id == *ap_sta_id) {
+ ether_addr_copy(mvmvif->uapsd_misbehaving_ap_addr,
+ vif->cfg.ap_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+}
+
+struct iwl_power_vifs {
+ struct iwl_mvm *mvm;
+ struct ieee80211_vif *bss_vif;
+ struct ieee80211_vif *p2p_vif;
+ struct ieee80211_vif *ap_vif;
+ struct ieee80211_vif *monitor_vif;
+ bool p2p_active;
+ bool bss_active;
+ bool ap_active;
+ bool monitor_active;
+};
+
+static void iwl_mvm_power_disable_pm_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvmvif->pm_enabled = false;
+}
+
+static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool *disable_ps = _data;
+
+ if (iwl_mvm_vif_is_active(mvmvif))
+ *disable_ps |= mvmvif->ps_disabled;
+}
+
+static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_power_vifs *power_iterator = _data;
+ bool active;
+
+ if (!mvmvif->uploaded)
+ return;
+
+ active = iwl_mvm_vif_is_active(mvmvif);
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->ap_vif);
+ power_iterator->ap_vif = vif;
+ if (active)
+ power_iterator->ap_active = true;
+ break;
+
+ case NL80211_IFTYPE_MONITOR:
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->monitor_vif);
+ power_iterator->monitor_vif = vif;
+ if (active)
+ power_iterator->monitor_active = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ /* only a single MAC of the same type */
+ WARN_ON(power_iterator->p2p_vif);
+ power_iterator->p2p_vif = vif;
+ if (active)
+ power_iterator->p2p_active = true;
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ power_iterator->bss_vif = vif;
+ if (active)
+ power_iterator->bss_active = true;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+ struct iwl_power_vifs *vifs)
+{
+ struct iwl_mvm_vif *bss_mvmvif = NULL;
+ struct iwl_mvm_vif *p2p_mvmvif = NULL;
+ struct iwl_mvm_vif *ap_mvmvif = NULL;
+ bool client_same_channel = false;
+ bool ap_same_channel = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* set pm_enable to false */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_disable_pm_iterator,
+ NULL);
+
+ if (vifs->bss_vif)
+ bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
+
+ if (vifs->p2p_vif)
+ p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
+
+ if (vifs->ap_vif)
+ ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+
+ /* don't allow PM if any TDLS stations exist */
+ if (iwl_mvm_tdls_sta_count(mvm, NULL))
+ return;
+
+ /* enable PM on bss if bss stand alone */
+ if (bss_mvmvif && vifs->bss_active && !vifs->p2p_active &&
+ !vifs->ap_active) {
+ bss_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ /* enable PM on p2p if p2p stand alone */
+ if (p2p_mvmvif && vifs->p2p_active && !vifs->bss_active &&
+ !vifs->ap_active) {
+ p2p_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ if (p2p_mvmvif && bss_mvmvif && vifs->bss_active && vifs->p2p_active)
+ client_same_channel =
+ iwl_mvm_have_links_same_channel(bss_mvmvif, p2p_mvmvif);
+
+ if (bss_mvmvif && ap_mvmvif && vifs->bss_active && vifs->ap_active)
+ ap_same_channel =
+ iwl_mvm_have_links_same_channel(bss_mvmvif, ap_mvmvif);
+
+ /* clients are not stand alone: enable PM if DCM */
+ if (!(client_same_channel || ap_same_channel)) {
+ if (bss_mvmvif && vifs->bss_active)
+ bss_mvmvif->pm_enabled = true;
+ if (p2p_mvmvif && vifs->p2p_active)
+ p2p_mvmvif->pm_enabled = true;
+ return;
+ }
+
+ /*
+ * There is only one channel in the system and there are only
+ * bss and p2p clients that share it
+ */
+ if (client_same_channel && !vifs->ap_active) {
+ /* share same channel*/
+ bss_mvmvif->pm_enabled = true;
+ p2p_mvmvif->pm_enabled = true;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
+ int pos = 0;
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+ mutex_unlock(&mvm->mutex);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ le16_to_cpu(cmd.keep_alive_seconds));
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ }
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+ 1 : 0);
+
+ if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+ return pos;
+
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+ cmd.snooze_window);
+
+ return pos;
+}
+
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
+ cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
+ cmd->bf_roaming_energy_delta =
+ cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
+ cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
+ cmd->bf_temp_threshold =
+ cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
+ cmd->bf_temp_fast_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
+ cmd->bf_temp_slow_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
+ cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
+ cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
+ cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
+ cmd->ba_enable_beacon_abort =
+ cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
+}
+#endif
+
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
+ vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
+
+ if (!ret)
+ mvmvif->bf_enabled = true;
+
+ return ret;
+}
+
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
+}
+
+static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_beacon_filter_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+
+ if (!ret)
+ mvmvif->bf_enabled = false;
+
+ return ret;
+}
+
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return _iwl_mvm_disable_beacon_filter(mvm, vif);
+}
+
+static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
+{
+ bool disable_ps;
+ int ret;
+
+ /* disable PS if CAM */
+ disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+ /* ...or if any of the vifs require PS to be off */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_ps_disabled_iterator,
+ &disable_ps);
+
+ /* update device power state if it has changed */
+ if (mvm->ps_disabled != disable_ps) {
+ bool old_ps_disabled = mvm->ps_disabled;
+
+ mvm->ps_disabled = disable_ps;
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret) {
+ mvm->ps_disabled = old_ps_disabled;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ if (!mvmvif->bf_enabled)
+ return 0;
+
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ mvmvif->ba_enabled = !(!mvmvif->pm_enabled ||
+ mvm->ps_disabled ||
+ !vif->cfg.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
+}
+
+int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
+{
+ struct iwl_power_vifs vifs = {
+ .mvm = mvm,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* get vifs info */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_get_vifs_iterator, &vifs);
+
+ ret = iwl_mvm_power_set_ps(mvm);
+ if (ret)
+ return ret;
+
+ if (vifs.bss_vif)
+ return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+ return 0;
+}
+
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
+{
+ struct iwl_power_vifs vifs = {
+ .mvm = mvm,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* get vifs info */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_get_vifs_iterator, &vifs);
+
+ iwl_mvm_power_set_pm(mvm, &vifs);
+
+ ret = iwl_mvm_power_set_ps(mvm);
+ if (ret)
+ return ret;
+
+ if (vifs.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (vifs.p2p_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (vifs.bss_vif)
+ return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+ return 0;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/ptp.c b/sys/contrib/dev/iwlwifi/mvm/ptp.c
new file mode 100644
index 000000000000..06a4c9f74797
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/ptp.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2021 - 2023, 2025 Intel Corporation
+ */
+
+#include "mvm.h"
+#include "iwl-debug.h"
+#include <linux/timekeeping.h>
+#include <linux/math64.h>
+
+#define IWL_PTP_GP2_WRAP 0x100000000ULL
+#define IWL_PTP_WRAP_TIME (3600 * HZ)
+
+/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional
+ * part, which means that a value of 1 in one of those fields actually means
+ * 2^-16 ppm, and 2^16=65536 is 1 ppm.
+ */
+#define SCALE_FACTOR 65536000000ULL
+#define IWL_PTP_WRAP_THRESHOLD_USEC (5000)
+
+#define IWL_PTP_GET_CROSS_TS_NUM 5
+
+static void iwl_mvm_ptp_update_new_read(struct iwl_mvm *mvm, u32 gp2)
+{
+ /* If the difference is above the threshold, assume it's a wraparound.
+ * Otherwise assume it's an old read and ignore it.
+ */
+ if (gp2 < mvm->ptp_data.last_gp2 &&
+ mvm->ptp_data.last_gp2 - gp2 < IWL_PTP_WRAP_THRESHOLD_USEC) {
+ IWL_DEBUG_INFO(mvm,
+ "PTP: ignore old read (gp2=%u, last_gp2=%u)\n",
+ gp2, mvm->ptp_data.last_gp2);
+ return;
+ }
+
+ if (gp2 < mvm->ptp_data.last_gp2) {
+ mvm->ptp_data.wrap_counter++;
+ IWL_DEBUG_INFO(mvm,
+ "PTP: wraparound detected (new counter=%u)\n",
+ mvm->ptp_data.wrap_counter);
+ }
+
+ mvm->ptp_data.last_gp2 = gp2;
+ schedule_delayed_work(&mvm->ptp_data.dwork, IWL_PTP_WRAP_TIME);
+}
+
+u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time_ns)
+{
+ struct ptp_data *data = &mvm->ptp_data;
+ u64 last_gp2_ns = mvm->ptp_data.scale_update_gp2 * NSEC_PER_USEC;
+ u64 res;
+ u64 diff;
+
+ iwl_mvm_ptp_update_new_read(mvm,
+ div64_u64(base_time_ns, NSEC_PER_USEC));
+
+ IWL_DEBUG_INFO(mvm, "base_time_ns=%llu, wrap_counter=%u\n",
+ (unsigned long long)base_time_ns, data->wrap_counter);
+
+ base_time_ns = base_time_ns +
+ (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC);
+
+ /* It is possible that a GP2 timestamp was received from fw before the
+ * last scale update. Since we don't know how to scale - ignore it.
+ */
+ if (base_time_ns < last_gp2_ns) {
+ IWL_DEBUG_INFO(mvm, "Time before scale update - ignore\n");
+ return 0;
+ }
+
+ diff = base_time_ns - last_gp2_ns;
+ IWL_DEBUG_INFO(mvm, "diff ns=%llu\n", (unsigned long long)diff);
+
+ diff = mul_u64_u64_div_u64(diff, data->scaled_freq,
+ SCALE_FACTOR);
+ IWL_DEBUG_INFO(mvm, "scaled diff ns=%llu\n", (unsigned long long)diff);
+
+ res = data->scale_update_adj_time_ns + data->delta + diff;
+
+ IWL_DEBUG_INFO(mvm, "base=%llu delta=%lld adj=%llu\n",
+ (unsigned long long)base_time_ns, (long long)data->delta,
+ (unsigned long long)res);
+ return res;
+}
+
+static int
+iwl_mvm_get_crosstimestamp_fw(struct iwl_mvm *mvm, u32 *gp2, u64 *sys_time)
+{
+ struct iwl_synced_time_cmd synced_time_cmd = {
+ .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH)
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD),
+ .flags = CMD_WANT_SKB,
+ .data[0] = &synced_time_cmd,
+ .len[0] = sizeof(synced_time_cmd),
+ };
+ struct iwl_synced_time_rsp *resp;
+ struct iwl_rx_packet *pkt;
+ int ret;
+ u64 gp2_10ns;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) {
+ IWL_ERR(mvm, "PTP: Invalid command response\n");
+ iwl_free_resp(&cmd);
+ return -EIO;
+ }
+
+ resp = (void *)pkt->data;
+
+ gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 |
+ le32_to_cpu(resp->gp2_timestamp_lo);
+ *gp2 = div_u64(gp2_10ns, 100);
+
+ *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 |
+ le32_to_cpu(resp->platform_timestamp_lo);
+
+ return ret;
+}
+
+static void iwl_mvm_phc_get_crosstimestamp_loop(struct iwl_mvm *mvm,
+ ktime_t *sys_time, u32 *gp2)
+{
+ u64 diff = 0, new_diff;
+ u64 tmp_sys_time;
+ u32 tmp_gp2;
+ int i;
+
+ for (i = 0; i < IWL_PTP_GET_CROSS_TS_NUM; i++) {
+ iwl_mvm_get_sync_time(mvm, CLOCK_REALTIME, &tmp_gp2, NULL,
+ &tmp_sys_time);
+ new_diff = tmp_sys_time - ((u64)tmp_gp2 * NSEC_PER_USEC);
+ if (!diff || new_diff < diff) {
+ *sys_time = tmp_sys_time;
+ *gp2 = tmp_gp2;
+ diff = new_diff;
+ IWL_DEBUG_INFO(mvm, "PTP: new times: gp2=%u sys=%lld\n",
+ *gp2, *sys_time);
+ }
+ }
+}
+
+static int
+iwl_mvm_phc_get_crosstimestamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm,
+ ptp_data.ptp_clock_info);
+ int ret = 0;
+ /* Raw value read from GP2 register in usec */
+ u32 gp2;
+ /* GP2 value in ns*/
+ s64 gp2_ns;
+ /* System (wall) time */
+ ktime_t sys_time;
+
+ memset(xtstamp, 0, sizeof(struct system_device_crosststamp));
+
+ if (!mvm->ptp_data.ptp_clock) {
+ IWL_ERR(mvm, "No PHC clock registered\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SYNCED_TIME)) {
+ ret = iwl_mvm_get_crosstimestamp_fw(mvm, &gp2, &sys_time);
+
+ if (ret)
+ goto out;
+ } else {
+ iwl_mvm_phc_get_crosstimestamp_loop(mvm, &sys_time, &gp2);
+ }
+
+ gp2_ns = iwl_mvm_ptp_get_adj_time(mvm, (u64)gp2 * NSEC_PER_USEC);
+
+ IWL_INFO(mvm, "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n",
+ gp2, mvm->ptp_data.last_gp2, gp2_ns, (s64)sys_time);
+
+ /* System monotonic raw time is not used */
+ xtstamp->device = (ktime_t)gp2_ns;
+ xtstamp->sys_realtime = sys_time;
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static void iwl_mvm_ptp_work(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+ ptp_data.dwork.work);
+ u32 gp2;
+
+ mutex_lock(&mvm->mutex);
+ gp2 = iwl_mvm_get_systime(mvm);
+ iwl_mvm_ptp_update_new_read(mvm, gp2);
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm,
+ ptp_data.ptp_clock_info);
+ u64 gp2;
+ u64 ns;
+
+ mutex_lock(&mvm->mutex);
+ gp2 = iwl_mvm_get_systime(mvm);
+ ns = iwl_mvm_ptp_get_adj_time(mvm, gp2 * NSEC_PER_USEC);
+ mutex_unlock(&mvm->mutex);
+
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int iwl_mvm_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = container_of(ptp, struct ptp_data,
+ ptp_clock_info);
+
+ mutex_lock(&mvm->mutex);
+ data->delta += delta;
+ IWL_DEBUG_INFO(mvm, "delta=%lld, new delta=%lld\n", (long long)delta,
+ (long long)data->delta);
+ mutex_unlock(&mvm->mutex);
+ return 0;
+}
+
+static int iwl_mvm_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mvm->ptp_data;
+ u32 gp2;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Must call _iwl_mvm_ptp_get_adj_time() before updating
+ * data->scale_update_gp2 or data->scaled_freq since
+ * scale_update_adj_time_ns should reflect the previous scaled_freq.
+ */
+ gp2 = iwl_mvm_get_systime(mvm);
+ data->scale_update_adj_time_ns =
+ iwl_mvm_ptp_get_adj_time(mvm, gp2 * NSEC_PER_USEC);
+ data->scale_update_gp2 = gp2;
+ data->wrap_counter = 0;
+ data->delta = 0;
+
+ data->scaled_freq = SCALE_FACTOR + scaled_ppm;
+ IWL_DEBUG_INFO(mvm, "adjfine: scaled_ppm=%ld new=%llu\n",
+ scaled_ppm, (unsigned long long)data->scaled_freq);
+
+ mutex_unlock(&mvm->mutex);
+ return 0;
+}
+
+/* iwl_mvm_ptp_init - initialize PTP for devices which support it.
+ * @mvm: internal mvm structure, see &struct iwl_mvm.
+ *
+ * Performs the required steps for enabling PTP support.
+ */
+void iwl_mvm_ptp_init(struct iwl_mvm *mvm)
+{
+ /* Warn if the interface already has a ptp_clock defined */
+ if (WARN_ON(mvm->ptp_data.ptp_clock))
+ return;
+
+ mvm->ptp_data.ptp_clock_info.owner = THIS_MODULE;
+ mvm->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
+ mvm->ptp_data.ptp_clock_info.getcrosststamp =
+ iwl_mvm_phc_get_crosstimestamp;
+ mvm->ptp_data.ptp_clock_info.adjfine = iwl_mvm_ptp_adjfine;
+ mvm->ptp_data.ptp_clock_info.adjtime = iwl_mvm_ptp_adjtime;
+ mvm->ptp_data.ptp_clock_info.gettime64 = iwl_mvm_ptp_gettime;
+ mvm->ptp_data.scaled_freq = SCALE_FACTOR;
+
+ /* Give a short 'friendly name' to identify the PHC clock */
+ snprintf(mvm->ptp_data.ptp_clock_info.name,
+ sizeof(mvm->ptp_data.ptp_clock_info.name),
+ "%s", "iwlwifi-PTP");
+
+ INIT_DELAYED_WORK(&mvm->ptp_data.dwork, iwl_mvm_ptp_work);
+
+ mvm->ptp_data.ptp_clock =
+ ptp_clock_register(&mvm->ptp_data.ptp_clock_info, mvm->dev);
+
+ if (IS_ERR(mvm->ptp_data.ptp_clock)) {
+ IWL_ERR(mvm, "Failed to register PHC clock (%ld)\n",
+ PTR_ERR(mvm->ptp_data.ptp_clock));
+ mvm->ptp_data.ptp_clock = NULL;
+ } else if (mvm->ptp_data.ptp_clock) {
+ IWL_DEBUG_INFO(mvm, "Registered PHC clock: %s, with index: %d\n",
+ mvm->ptp_data.ptp_clock_info.name,
+ ptp_clock_index(mvm->ptp_data.ptp_clock));
+ }
+}
+
+/* iwl_mvm_ptp_remove - disable PTP device.
+ * @mvm: internal mvm structure, see &struct iwl_mvm.
+ *
+ * Disable PTP support.
+ */
+void iwl_mvm_ptp_remove(struct iwl_mvm *mvm)
+{
+ if (mvm->ptp_data.ptp_clock) {
+ IWL_DEBUG_INFO(mvm, "Unregistering PHC clock: %s, with index: %d\n",
+ mvm->ptp_data.ptp_clock_info.name,
+ ptp_clock_index(mvm->ptp_data.ptp_clock));
+
+ ptp_clock_unregister(mvm->ptp_data.ptp_clock);
+ mvm->ptp_data.ptp_clock = NULL;
+ memset(&mvm->ptp_data.ptp_clock_info, 0,
+ sizeof(mvm->ptp_data.ptp_clock_info));
+ mvm->ptp_data.last_gp2 = 0;
+ cancel_delayed_work_sync(&mvm->ptp_data.dwork);
+ }
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/quota.c b/sys/contrib/dev/iwlwifi/mvm/quota.c
new file mode 100644
index 000000000000..798a7e4bea83
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/quota.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018, 2021-2022, 2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+#define QUOTA_100 IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
+struct iwl_mvm_quota_iterator_data {
+ int n_interfaces[MAX_BINDINGS];
+ int colors[MAX_BINDINGS];
+ int low_latency[MAX_BINDINGS];
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ int dbgfs_min[MAX_BINDINGS];
+#endif
+ int n_low_latency_bindings;
+ struct ieee80211_vif *disabled_vif;
+};
+
+static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_quota_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 id;
+
+ /* skip disabled interfaces here immediately */
+ if (vif == data->disabled_vif)
+ return;
+
+ if (!mvmvif->deflink.phy_ctxt)
+ return;
+
+ /* currently, PHY ID == binding ID */
+ id = mvmvif->deflink.phy_ctxt->id;
+
+ /* need at least one binding per PHY */
+ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+
+ if (WARN_ON_ONCE(id >= MAX_BINDINGS))
+ return;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->cfg.assoc)
+ break;
+ return;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ if (mvmvif->ap_ibss_active)
+ break;
+ return;
+ case NL80211_IFTYPE_MONITOR:
+ if (mvmvif->monitor_active)
+ break;
+ return;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (data->colors[id] < 0)
+ data->colors[id] = mvmvif->deflink.phy_ctxt->color;
+ else
+ WARN_ON_ONCE(data->colors[id] !=
+ mvmvif->deflink.phy_ctxt->color);
+
+ data->n_interfaces[id]++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_quota_min)
+ data->dbgfs_min[id] = max(data->dbgfs_min[id],
+ mvmvif->dbgfs_quota_min);
+#endif
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+ data->n_low_latency_bindings++;
+ data->low_latency[id] = true;
+ }
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+ bool force_update,
+ struct ieee80211_vif *disabled_vif)
+{
+ struct iwl_time_quota_cmd cmd = {};
+ int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
+ struct iwl_mvm_quota_iterator_data data = {
+ .n_interfaces = {},
+ .colors = { -1, -1, -1, -1 },
+ .disabled_vif = disabled_vif,
+ };
+ struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd;
+ struct iwl_time_quota_data *qdata, *last_data;
+ bool send = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+ return 0;
+
+ /* update all upon completion */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ /* iterator data above must match */
+ BUILD_BUG_ON(MAX_BINDINGS != 4);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_quota_iterator, &data);
+
+ /*
+ * The FW's scheduling session consists of
+ * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+ * equally between all the bindings that require quota
+ */
+ num_active_macs = 0;
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ num_active_macs += data.n_interfaces[i];
+ }
+
+ n_non_lowlat = num_active_macs;
+
+ if (data.n_low_latency_bindings == 1) {
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (data.low_latency[i]) {
+ n_non_lowlat -= data.n_interfaces[i];
+ break;
+ }
+ }
+ }
+
+ if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+ /*
+ * Reserve quota for the low latency binding in case that
+ * there are several data bindings but only a single
+ * low latency one. Split the rest of the quota equally
+ * between the other data interfaces.
+ */
+ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+ quota_rem = QUOTA_100 - n_non_lowlat * quota -
+ QUOTA_LOWLAT_MIN;
+ IWL_DEBUG_QUOTA(mvm,
+ "quota: low-latency binding active, remaining quota per other binding: %d\n",
+ quota);
+ } else if (num_active_macs) {
+ /*
+ * There are 0 or more than 1 low latency bindings, or all the
+ * data interfaces belong to the single low latency binding.
+ * Split the quota equally between the data interfaces.
+ */
+ quota = QUOTA_100 / num_active_macs;
+ quota_rem = QUOTA_100 % num_active_macs;
+ IWL_DEBUG_QUOTA(mvm,
+ "quota: splitting evenly per binding: %d\n",
+ quota);
+ } else {
+ /* values don't really matter - won't be used */
+ quota = 0;
+ quota_rem = 0;
+ }
+
+ for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+ if (data.colors[i] < 0)
+ continue;
+
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx);
+
+ qdata->id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+
+ if (data.n_interfaces[i] <= 0)
+ qdata->quota = cpu_to_le32(0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ else if (data.dbgfs_min[i])
+ qdata->quota =
+ cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+#endif
+ else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+ data.low_latency[i])
+ /*
+ * There is more than one binding, but only one of the
+ * bindings is in low latency. For this case, allocate
+ * the minimal required quota for the low latency
+ * binding.
+ */
+ qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+ else
+ qdata->quota =
+ cpu_to_le32(quota * data.n_interfaces[i]);
+
+ WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100,
+ "Binding=%d, quota=%u > max=%u\n",
+ idx, le32_to_cpu(qdata->quota), QUOTA_100);
+
+ qdata->max_duration = cpu_to_le32(0);
+
+ idx++;
+ }
+
+ /* Give the remainder of the session to the first data binding */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ if (le32_to_cpu(qdata->quota) != 0) {
+ le32_add_cpu(&qdata->quota, quota_rem);
+ IWL_DEBUG_QUOTA(mvm,
+ "quota: giving remainder of %d to binding %d\n",
+ quota_rem, i);
+ break;
+ }
+ }
+
+ /* check that we have non-zero quota for all valid bindings */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i);
+ if (qdata->id_and_color != last_data->id_and_color)
+ send = true;
+ if (qdata->max_duration != last_data->max_duration)
+ send = true;
+ if (abs((int)le32_to_cpu(qdata->quota) -
+ (int)le32_to_cpu(last_data->quota))
+ > IWL_MVM_QUOTA_THRESHOLD)
+ send = true;
+ if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID))
+ continue;
+ WARN_ONCE(qdata->quota == 0,
+ "zero quota on binding %d\n", i);
+ }
+
+ if (!send && !force_update) {
+ /* don't send a practically unchanged command, the firmware has
+ * to re-initialize a lot of state and that can have an adverse
+ * impact on it
+ */
+ return 0;
+ }
+
+ err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+ iwl_mvm_quota_cmd_size(mvm), &cmd);
+
+ if (err)
+ IWL_ERR(mvm, "Failed to send quota: %d\n", err);
+ else
+ mvm->last_quota_cmd = cmd;
+ return err;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/rfi.c b/sys/contrib/dev/iwlwifi/mvm/rfi.c
new file mode 100644
index 000000000000..045c862a8fc4
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/rfi.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2020 - 2022 Intel Corporation
+ */
+
+#include "mvm.h"
+#include "fw/api/commands.h"
+#include "fw/api/phy-ctxt.h"
+
+/*
+ * DDR needs frequency in units of 16.666MHz, so provide FW with the
+ * frequency values in the adjusted format.
+ */
+static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
+ /* frequency 2667MHz */
+ {cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 2933MHz */
+ {cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169,
+ 171, 173, 175},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 3200MHz */
+ {cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
+
+ /* frequency 3733MHz */
+ {cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 4000MHz */
+ {cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 4267MHz */
+ {cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+
+ /* frequency 4400MHz */
+ {cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}},
+
+ /* frequency 5200MHz */
+ {cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 5600MHz */
+ {cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122},
+ {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,
+ PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}},
+
+ /* frequency 6000MHz */
+ {cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+
+ /* frequency 6400MHz */
+ {cpu_to_le16(384), {79, 83, 85, 87, 89, 91, 93,},
+ {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,
+ PHY_BAND_6, PHY_BAND_6,}},
+};
+
+bool iwl_rfi_supported(struct iwl_mvm *mvm)
+{
+ /* The feature depends on a platform bugfix, so for now
+ * it's always disabled.
+ * When the platform support detection is implemented we should
+ * check FW TLV and platform support instead.
+ */
+ return false;
+}
+
+int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
+{
+ int ret;
+ struct iwl_rfi_config_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, RFI_CONFIG_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+
+ if (!iwl_rfi_supported(mvm))
+ return -EOPNOTSUPP;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* in case no table is passed, use the default one */
+ if (!rfi_table) {
+ memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
+ } else {
+ memcpy(cmd.table, rfi_table, sizeof(cmd.table));
+ /* notify FW the table is not the default one */
+ cmd.oem = 1;
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
+
+ return ret;
+}
+
+struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
+{
+ struct iwl_rfi_freq_table_resp_cmd *resp;
+ int resp_size = sizeof(*resp);
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, RFI_GET_FREQ_TABLE_CMD),
+ .flags = CMD_WANT_SKB,
+ };
+
+ if (!iwl_rfi_supported(mvm))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ mutex_unlock(&mvm->mutex);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
+ resp_size)) {
+ iwl_free_resp(&cmd);
+ return ERR_PTR(-EIO);
+ }
+
+ resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
+ iwl_free_resp(&cmd);
+
+ if (!resp)
+ return ERR_PTR(-ENOMEM);
+
+ return resp;
+}
+
+void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data;
+
+ IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/rs-fw.c b/sys/contrib/dev/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 000000000000..23a9f1a59ad3
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta)
+{
+ switch (link_sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
+ return IWL_TLC_MNG_CH_WIDTH_320MHZ;
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+ }
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+ u8 fw_chains = 0;
+
+ if (chains & ANT_A)
+ fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+ if (chains & ANT_B)
+ fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+
+ return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ u8 supp = 0;
+
+ if (he_cap->has_he)
+ return 0;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+
+ return supp;
+}
+
+static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *sband_he_cap)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ bool vht_ena = vht_cap->vht_supported;
+ u16 flags = 0;
+
+ /* get STBC flags */
+ if (mvm->cfg->ht_params.stbc &&
+ (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
+ if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ }
+
+ if (mvm->cfg->ht_params.ldpc &&
+ ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
+ (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ /* consider LDPC support in case of HE */
+ if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (sband_he_cap &&
+ !(sband_he_cap->he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (he_cap->has_he &&
+ (he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+ sband_he_cap &&
+ sband_he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+
+ return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ return IWL_TLC_MNG_HT_RATE_MCS7;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ return IWL_TLC_MNG_HT_RATE_MCS8;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ return IWL_TLC_MNG_HT_RATE_MCS9;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+rs_fw_vht_set_enabled_rates(const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ u16 supp;
+ int i, highest_mcs;
+ u8 max_nss = link_sta->rx_nss;
+ struct ieee80211_vht_cap ieee_vht_cap = {
+ .vht_cap_info = cpu_to_le32(vht_cap->cap),
+ .supp_mcs = vht_cap->vht_mcs,
+ };
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ max_nss = 1;
+
+ for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
+ int nss = i + 1;
+
+ highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, nss);
+ if (!highest_mcs)
+ continue;
+
+ supp = BIT(highest_mcs + 1) - 1;
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+ /*
+ * Check if VHT extended NSS indicates that the bandwidth/NSS
+ * configuration is supported - only for MCS 0 since we already
+ * decoded the MCS bits anyway ourselves.
+ */
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ ieee80211_get_vht_max_nss(&ieee_vht_cap,
+ IEEE80211_VHT_CHANWIDTH_160MHZ,
+ 0, true, nss) >= nss)
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
+ }
+}
+
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *sband_he_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ u16 tx_mcs_80 = le16_to_cpu(sband_he_cap->he_mcs_nss_supp.tx_mcs_80);
+ u16 tx_mcs_160 = le16_to_cpu(sband_he_cap->he_mcs_nss_supp.tx_mcs_160);
+ int i;
+ u8 nss = link_sta->rx_nss;
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ nss = 1;
+
+ for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_80 > _tx_mcs_80)
+ _mcs_80 = _tx_mcs_80;
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_160 > _tx_mcs_160)
+ _mcs_160 = _tx_mcs_160;
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+ }
+}
+
+static u8 rs_fw_eht_max_nss(u8 rx_nss, u8 tx_nss)
+{
+ u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
+ /* the max nss that can be used,
+ * is the min with our tx capa and the peer rx capa.
+ */
+ return min(tx, rx);
+}
+
+#define MAX_NSS_MCS(mcs_num, rx, tx) \
+ rs_fw_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
+ (tx)->rx_tx_mcs ##mcs_num## _max_nss)
+
+static void rs_fw_set_eht_mcs_nss(__le16 ht_rates[][3],
+ enum IWL_TLC_MCS_PER_BW bw,
+ u8 max_nss, u16 mcs_msk)
+{
+ if (max_nss >= 2)
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+
+ if (max_nss >= 1)
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+}
+
+static const
+struct ieee80211_eht_mcs_nss_supp_bw *
+rs_fw_rs_mcs2eht_mcs(enum IWL_TLC_MCS_PER_BW bw,
+ const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
+{
+ switch (bw) {
+ case IWL_TLC_MCS_PER_BW_80:
+ return &eht_mcs->bw._80;
+ case IWL_TLC_MCS_PER_BW_160:
+ return &eht_mcs->bw._160;
+ case IWL_TLC_MCS_PER_BW_320:
+ return &eht_mcs->bw._320;
+ default:
+ return NULL;
+ }
+}
+
+static void
+rs_fw_eht_set_enabled_rates(struct ieee80211_vif *vif,
+ const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *sband_he_cap,
+ const struct ieee80211_sta_eht_cap *sband_eht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ /* peer RX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
+ &link_sta->eht_cap.eht_mcs_nss_supp;
+ /* our TX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
+ &sband_eht_cap->eht_mcs_nss_supp;
+
+ enum IWL_TLC_MCS_PER_BW bw;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
+
+ /* peer is 20Mhz only */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_rx_20 = eht_rx_mcs->only_20mhz;
+ } else {
+ mcs_rx_20.rx_tx_mcs7_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs9_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs11_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_rx_20.rx_tx_mcs13_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* nic is 20Mhz only */
+ if (!(sband_he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_tx_20 = eht_tx_mcs->only_20mhz;
+ } else {
+ mcs_tx_20.rx_tx_mcs7_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs9_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs11_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_tx_20.rx_tx_mcs13_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* rates for 20/40/80 bw */
+ bw = IWL_TLC_MCS_PER_BW_80;
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), GENMASK(7, 0));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), GENMASK(9, 8));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), GENMASK(11, 10));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), GENMASK(13, 12));
+
+ /* rate for 160/320 bw */
+ for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
+ rs_fw_rs_mcs2eht_mcs(bw, eht_rx_mcs);
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
+ rs_fw_rs_mcs2eht_mcs(bw, eht_tx_mcs);
+
+ /* got unsupported index for bw */
+ if (!mcs_rx || !mcs_tx)
+ continue;
+
+ /* break out if we don't support the bandwidth */
+ if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ))
+ break;
+
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, mcs_rx, mcs_tx), GENMASK(9, 0));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, mcs_rx, mcs_tx), GENMASK(11, 10));
+ rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, mcs_rx, mcs_tx), GENMASK(13, 12));
+ }
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC ||
+ link_sta->rx_nss < 2)
+ memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
+ sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_sta_he_cap *sband_he_cap,
+ const struct ieee80211_sta_eht_cap *sband_eht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ int i;
+ u16 supp = 0;
+ unsigned long tmp; /* must be unsigned long for for_each_set_bit */
+ const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+
+ /* non HT rates */
+ tmp = link_sta->supp_rates[sband->band];
+ for_each_set_bit(i, &tmp, BITS_PER_LONG)
+ supp |= BIT(sband->bitrates[i].hw_value);
+
+ cmd->non_ht_rates = cpu_to_le16(supp);
+ cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+ /* HT/VHT rates */
+ if (link_sta->eht_cap.has_eht && sband_he_cap && sband_eht_cap) {
+ cmd->mode = IWL_TLC_MNG_MODE_EHT;
+ rs_fw_eht_set_enabled_rates(vif, link_sta, sband_he_cap,
+ sband_eht_cap, cmd);
+ } else if (he_cap->has_he && sband_he_cap) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ rs_fw_he_set_enabled_rates(link_sta, sband_he_cap, cmd);
+ } else if (vht_cap->vht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_VHT;
+ rs_fw_vht_set_enabled_rates(link_sta, vht_cap, cmd);
+ } else if (ht_cap->ht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_HT;
+ cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+ 0;
+ else
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ }
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tlc_update_notif *notif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ struct iwl_lq_sta_rs_fw *lq_sta;
+ u32 flags;
+
+ rcu_read_lock();
+
+ notif = (void *)pkt->data;
+ link_sta = rcu_dereference(mvm->fw_id_to_link_sta[notif->sta_id]);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+ if (IS_ERR_OR_NULL(sta) || !link_sta) {
+ /* can happen in remove station flow where mvm removed internally
+ * the station before removing from FW
+ */
+ IWL_DEBUG_RATE(mvm,
+ "Invalid mvm RCU pointer for sta id (%d) in TLC notification\n",
+ notif->sta_id);
+ goto out;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ flags = le32_to_cpu(notif->flags);
+
+ mvm_link_sta = rcu_dereference(mvmsta->link[link_sta->link_id]);
+ if (!mvm_link_sta) {
+ IWL_DEBUG_RATE(mvm,
+ "Invalid mvmsta RCU pointer for link (%d) of sta id (%d) in TLC notification\n",
+ link_sta->link_id, notif->sta_id);
+ goto out;
+ }
+ lq_sta = &mvm_link_sta->lq_sta.rs_fw;
+
+ if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ char pretty_rate[100];
+
+ lq_sta->last_rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(notif->rate, mvm->fw_rates_ver);
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+ lq_sta->last_rate_n_flags);
+ IWL_DEBUG_RATE(mvm, "rate: %s\n", pretty_rate);
+ }
+
+ if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
+ u32 enabled = le32_to_cpu(notif->amsdu_enabled);
+ u16 size = le32_to_cpu(notif->amsdu_size);
+ int i;
+
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
+ if (link_sta->agg.max_amsdu_len < size) {
+ /*
+ * In debug link_sta->agg.max_amsdu_len < size
+ * so also check with orig_amsdu_len which holds the
+ * original data before debugfs changed the value
+ */
+ WARN_ON(mvm_link_sta->orig_amsdu_len < size);
+ goto out;
+ }
+
+ mvmsta->amsdu_enabled = enabled;
+ mvmsta->max_amsdu_len = size;
+ link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
+
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (mvmsta->amsdu_enabled & BIT(i))
+ link_sta->agg.max_tid_amsdu_len[i] =
+ iwl_mvm_max_amsdu_size(mvm, sta, i);
+ else
+ /*
+ * Not so elegant, but this will effectively
+ * prevent AMSDU on this TID
+ */
+ link_sta->agg.max_tid_amsdu_len[i] = 1;
+ }
+
+ ieee80211_sta_recalc_aggregates(sta);
+
+ IWL_DEBUG_RATE(mvm,
+ "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+ le32_to_cpu(notif->amsdu_size), size,
+ mvmsta->amsdu_enabled);
+ }
+out:
+ rcu_read_unlock();
+}
+
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta)
+{
+#if defined(__linux__)
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+#endif
+ const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
+
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+ return IEEE80211_MAX_MPDU_LEN_VHT_3895;
+
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
+ switch (le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ return IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ return IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ default:
+ return IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+ } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ &&
+ eht_cap->has_eht) {
+ switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
+ case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454:
+ return IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991:
+ return IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ default:
+ return IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+ } else if (vht_cap->vht_supported) {
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ return IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ return IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ default:
+ return IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+ } else if (ht_cap->ht_supported) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
+ /*
+ * agg is offloaded so we need to assume that agg
+ * are enabled and max mpdu in ampdu is 4095
+ * (spec 802.11-2016 9.3.2.1)
+ */
+ return IEEE80211_MAX_MPDU_LEN_HT_BA;
+ else
+ return IEEE80211_MAX_MPDU_LEN_HT_3839;
+ }
+
+ /* in legacy mode no amsdu is enabled so return zero */
+ return 0;
+}
+
+void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ enum nl80211_band band)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta, link_conf, link_sta);
+ const struct ieee80211_sta_he_cap *sband_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, vif);
+ const struct ieee80211_sta_eht_cap *sband_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, vif);
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ struct iwl_lq_sta_rs_fw *lq_sta;
+ struct iwl_tlc_config_cmd_v4 cfg_cmd = {
+ .max_ch_width = mvmsta->authorized ?
+ rs_fw_bw_from_sta_bw(link_sta) : IWL_TLC_MNG_CH_WIDTH_20MHZ,
+ .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, vif, link_sta,
+ sband_he_cap)),
+ .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+ .sgi_ch_width_supp = rs_fw_sgi_cw_support(link_sta),
+ .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ?
+ cpu_to_le16(max_amsdu_len) : 0,
+ };
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ int cmd_ver;
+ int ret;
+
+ /* Enable extra EHT LTF if there's mutual support by AP and client */
+ if (sband_eht_cap &&
+ sband_eht_cap->eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF &&
+ link_sta->eht_cap.has_eht &&
+ link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) {
+ IWL_DEBUG_RATE(mvm, "Set support for Extra EHT LTF\n");
+ cfg_cmd.flags |=
+ cpu_to_le16(IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK);
+ }
+
+ rcu_read_lock();
+ mvm_link_sta = rcu_dereference(mvmsta->link[link_id]);
+ if (WARN_ON_ONCE(!mvm_link_sta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ cfg_cmd.sta_id = mvm_link_sta->sta_id;
+
+ lq_sta = &mvm_link_sta->lq_sta.rs_fw;
+ memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+ rcu_read_unlock();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm);
+#endif
+ rs_fw_set_supp_rates(vif, link_sta, sband,
+ sband_he_cap, sband_eht_cap,
+ &cfg_cmd);
+
+ /*
+ * since TLC offload works with one mode we can assume
+ * that only vht/ht is used and also set it as station max amsdu
+ */
+ link_sta->agg.max_amsdu_len = max_amsdu_len;
+ ieee80211_sta_recalc_aggregates(sta);
+
+ cfg_cmd.max_tx_op = cpu_to_le16(mvmvif->max_tx_op);
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+ cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
+ cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n",
+ cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n",
+ cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n",
+ cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]);
+ IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n",
+ cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]);
+ if (cmd_ver == 4) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC,
+ sizeof(cfg_cmd), &cfg_cmd);
+ } else if (cmd_ver < 4) {
+ struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = {
+ .sta_id = cfg_cmd.sta_id,
+ .max_ch_width = cfg_cmd.max_ch_width,
+ .mode = cfg_cmd.mode,
+ .chains = cfg_cmd.chains,
+ .amsdu = !!cfg_cmd.max_mpdu_len,
+ .flags = cfg_cmd.flags,
+ .non_ht_rates = cfg_cmd.non_ht_rates,
+ .ht_rates[0][0] = cfg_cmd.ht_rates[0][0],
+ .ht_rates[0][1] = cfg_cmd.ht_rates[0][1],
+ .ht_rates[1][0] = cfg_cmd.ht_rates[1][0],
+ .ht_rates[1][1] = cfg_cmd.ht_rates[1][1],
+ .sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp,
+ .max_mpdu_len = cfg_cmd.max_mpdu_len,
+ };
+
+ u16 cmd_size = sizeof(cfg_cmd_v3);
+
+ /* In old versions of the API the struct is 4 bytes smaller */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) < 3)
+ cmd_size -= 4;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
+ &cfg_cmd_v3);
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
+{
+ /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+ return 0;
+}
+
+void iwl_mvm_rs_add_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_link_sta *link_sta)
+{
+ struct iwl_lq_sta_rs_fw *lq_sta;
+
+ lq_sta = &link_sta->lq_sta.rs_fw;
+
+ lq_sta->pers.drv = mvm;
+ lq_sta->pers.sta_id = link_sta->sta_id;
+ lq_sta->pers.chains = 0;
+ memset(lq_sta->pers.chain_signal, 0,
+ sizeof(lq_sta->pers.chain_signal));
+ lq_sta->pers.last_rssi = S8_MIN;
+ lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+ unsigned int link_id;
+
+ IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+ for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
+ struct iwl_mvm_link_sta *link =
+ rcu_dereference_protected(mvmsta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!link)
+ continue;
+
+ iwl_mvm_rs_add_sta_link(mvm, link);
+ }
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/rs.c b/sys/contrib/dev/iwlwifi/mvm/rs.c
new file mode 100644
index 000000000000..f460624871ee
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/rs.c
@@ -0,0 +1,214 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * XXX-BZ:
+ * This file is left as a wrapper to make mvm compile and we will only
+ * deal with it on a need basis. Most newer chipsets do this in firmware.
+ */
+#include <sys/param.h>
+#include <net/cfg80211.h> /* LinuxKPI 802.11 TODO() calls. */
+
+#include "rs.h"
+#include "mvm.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ * Fill struct iwl_mvm_frame_stats.
+ * Deal with various RATE_MCS_*_MSK. See rx.c, fw/api/rs.h, et al.
+ * XXX-BZ consider calling iwl_new_rate_from_v1() in rx.c so we can also
+ * use this in rxmq.c.
+ */
+void
+iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
+{
+ uint8_t nss;
+
+ spin_lock_bh(&mvm->drv_stats_lock);
+ mvm->drv_rx_stats.success_frames++;
+
+ if (rate & RATE_MCS_HT_MSK_V1) {
+ mvm->drv_rx_stats.ht_frames++;
+ nss = 1 + ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1);
+ } else if (rate & RATE_MCS_VHT_MSK_V1) {
+ mvm->drv_rx_stats.vht_frames++;
+ nss = 1 + FIELD_GET(RATE_MCS_NSS_MSK, rate);
+ } else {
+ mvm->drv_rx_stats.legacy_frames++;
+ nss = 0;
+ }
+
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ mvm->drv_rx_stats.bw_20_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ mvm->drv_rx_stats.bw_40_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ mvm->drv_rx_stats.bw_80_frames++;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ mvm->drv_rx_stats.bw_160_frames++;
+ break;
+ }
+
+ if ((rate & RATE_MCS_CCK_MSK_V1) == 0 &&
+ (rate & RATE_MCS_SGI_MSK_V1) != 0)
+ mvm->drv_rx_stats.sgi_frames++;
+ else
+ mvm->drv_rx_stats.ngi_frames++;
+
+ switch (nss) {
+ case 1:
+ mvm->drv_rx_stats.siso_frames++;
+ break;
+ case 2:
+ mvm->drv_rx_stats.mimo2_frames++;
+ break;
+ }
+
+ if (agg)
+ mvm->drv_rx_stats.agg_frames++;
+
+ /* ampdu_count? */
+ /* fail_frames? */
+
+ mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
+ mvm->drv_rx_stats.last_frame_idx++;
+ mvm->drv_rx_stats.last_frame_idx %=
+ ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
+
+ spin_unlock_bh(&mvm->drv_stats_lock);
+}
+
+void
+iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+{
+ /* Apply same locking rx.c does; debugfs seems to read unloked? */
+ spin_lock_bh(&mvm->drv_stats_lock);
+ memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
+ spin_unlock_bh(&mvm->drv_stats_lock);
+}
+#endif
+
+int
+iwl_mvm_rate_control_register(void)
+{
+ TODO("This likely has to call into net80211 unless we gain compat code in LinuxKPI");
+ return (0);
+}
+
+void
+iwl_mvm_rate_control_unregister(void)
+{
+ TODO("This likely has to call into net80211 unless we gain compat code in LinuxKPI");
+}
+
+int
+iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ return (rs_fw_tx_protection(mvm, mvmsta, enable));
+ else {
+ TODO();
+ return (0);
+ }
+}
+
+static void
+iwl_mvm_rs_sw_rate_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf, struct ieee80211_link_sta *link_sta,
+ enum nl80211_band band)
+{
+ TODO();
+}
+
+void
+iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf, struct ieee80211_link_sta *link_sta,
+ enum nl80211_band band)
+{
+ if (iwl_mvm_has_tlc_offload(mvm))
+ iwl_mvm_rs_fw_rate_init(mvm, vif, sta, link_conf, link_sta, band);
+ else
+ iwl_mvm_rs_sw_rate_init(mvm, vif, sta, link_conf, link_sta, band);
+}
+
+void
+iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid,
+ struct ieee80211_tx_info *ba_info, bool t)
+{
+ TODO();
+}
+
+void
+rs_update_last_rssi(struct iwl_mvm *mvm __unused, struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct iwl_lq_sta *lq_sta;
+ int i;
+
+ if (mvmsta == NULL || rx_status == NULL)
+ return;
+
+ /*
+ * Assumption based on mvm/sta.h is that this should update
+ * mvmsta->lq_sta.rs_drv but so far we only saw a iwl_lq_cmd (lq)
+ * access in that struct so nowhere to put rssi information.
+ * So the only thing would be if this is required internally
+ * to functions in this file.
+ * The "FW" version accesses more fields. We assume they
+ * are the same for now.
+ */
+
+ lq_sta = &mvmsta->deflink.lq_sta.rs_drv;
+
+ lq_sta->pers.last_rssi = S8_MIN;
+ lq_sta->pers.chains = rx_status->chains;
+
+ for (i = 0; i < nitems(lq_sta->pers.chain_signal); i++) {
+ if ((rx_status->chains & BIT(i)) == 0)
+ continue;
+
+ lq_sta->pers.chain_signal[i] = rx_status->chain_signal[i];
+ if (rx_status->chain_signal[i] > lq_sta->pers.last_rssi)
+ lq_sta->pers.last_rssi = rx_status->chain_signal[i];
+ }
+}
+
+int
+rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate)
+{
+ TODO();
+ return (0);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/rs.h b/sys/contrib/dev/iwlwifi/mvm/rs.h
new file mode 100644
index 000000000000..71c0744f017c
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/rs.h
@@ -0,0 +1,112 @@
+/*-
+ * Copyright (c) 2020-2025 The FreeBSD Foundation
+ *
+ * This software was developed by Björn Zeeb under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * XXX-BZ:
+ * This file is left as a wrapper to make mvm compile and we will only
+ * deal with it on a need basis. Most newer chipsets do this in firmware.
+ */
+
+#ifndef _IWLWIFI_MVM_RS_H
+#define _IWLWIFI_MVM_RS_H
+
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "fw-api.h"
+
+#define RS_NAME "XXX_unknown"
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (64-1)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (256-1)
+
+struct iwl_mvm;
+
+struct iwl_lq_sta_rs_fw {
+ int last_rate_n_flags;
+ struct {
+ struct iwl_mvm *drv;
+ uint8_t sta_id;
+ uint8_t chains;
+ uint8_t chain_signal[IEEE80211_MAX_CHAINS];
+ uint8_t last_rssi;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ uint32_t dbg_fixed_rate;
+ uint32_t dbg_agg_frame_count_lim;
+#endif
+ } pers;
+};
+
+struct iwl_lq_sta {
+ struct iwl_lq_cmd lq;
+ struct {
+ spinlock_t lock;
+ uint16_t max_agg_bufsize;
+ /*
+ * Based on the assumption that these are in "FW" too and
+ * there is a f() to set last_rssi add them here too.
+ */
+ uint8_t chains;
+ uint8_t chain_signal[IEEE80211_MAX_CHAINS];
+ uint8_t last_rssi;
+ } pers;
+};
+
+#define RS_DRV_DATA_PACK(_c, _f) ((void *)(uintptr_t)(_c | (uintptr_t)(_f) << sizeof(_c))) /* XXX TODO | ? */
+
+struct iwl_mvm_sta;
+struct iwl_mvm_link_sta;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *, struct iwl_mvm_sta *);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *, struct iwl_rx_cmd_buffer *);
+u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *,
+ struct ieee80211_bss_conf *, struct ieee80211_link_sta *);
+void rs_fw_rate_init(struct iwl_mvm *, struct ieee80211_sta *,
+ enum nl80211_band, bool);
+int rs_fw_tx_protection(struct iwl_mvm *, struct iwl_mvm_sta *, bool);
+int iwl_mvm_tx_protection(struct iwl_mvm *, struct iwl_mvm_sta *, bool);
+
+int iwl_mvm_rate_control_register(void);
+void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_bss_conf *,
+ struct ieee80211_link_sta *, enum nl80211_band);
+void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *, struct ieee80211_vif *,
+ struct ieee80211_sta *, struct ieee80211_bss_conf *,
+ struct ieee80211_link_sta *, enum nl80211_band);
+void iwl_mvm_rs_tx_status(struct iwl_mvm *, struct ieee80211_sta *,
+ int, struct ieee80211_tx_info *, bool);
+void iwl_mvm_rs_add_sta_link(struct iwl_mvm *, struct iwl_mvm_link_sta *);
+
+#endif /* _IWLWIFI_MVM_RS_H */
diff --git a/sys/contrib/dev/iwlwifi/mvm/rx.c b/sys/contrib/dev/iwlwifi/mvm/rx.c
new file mode 100644
index 000000000000..f6127c0b5344
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/rx.c
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/unaligned.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "fw-api.h"
+
+/*
+ * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
+ *
+ * Copies the phy information in mvm->last_phy_info, it will be used when the
+ * actual data will come from the fw in the next packet.
+ */
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (unlikely(pkt_len < sizeof(mvm->last_phy_info)))
+ return;
+
+ memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+ mvm->ampdu_ref++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ spin_lock(&mvm->drv_stats_lock);
+ mvm->drv_rx_stats.ampdu_count++;
+ spin_unlock(&mvm->drv_stats_lock);
+ }
+#endif
+}
+
+/*
+ * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
+ *
+ * Adds the rxb to a new skb and give it to mac80211
+ */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len,
+ u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ unsigned int fraglen;
+
+ /*
+ * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+ * but those are all multiples of 4 long) all goes away, but we
+ * want the *end* of it, which is going to be the start of the IP
+ * header, to be aligned when it gets pulled in.
+ * The beginning of the skb->data is aligned on at least a 4-byte
+ * boundary after allocation. Everything here is aligned at least
+ * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+ * the result.
+ */
+ skb_reserve(skb, hdrlen & 3);
+
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
+ */
+ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+
+ skb_put_data(skb, hdr, hdrlen);
+ fraglen = len - hdrlen;
+
+ if (fraglen) {
+ int offset = (u8 *)hdr + hdrlen -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+}
+
+/*
+ * iwl_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM. Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ int energy_a, energy_b, max_energy;
+ u32 val;
+
+ val =
+ le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+ energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_A_POS;
+ energy_a = energy_a ? -energy_a : S8_MIN;
+ energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_B_POS;
+ energy_b = energy_b ? -energy_b : S8_MIN;
+ max_energy = max(energy_a, energy_b);
+
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d , and max %d\n",
+ energy_a, energy_b, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+}
+
+/*
+ * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
+ * @mvm: the mvm object
+ * @hdr: 80211 header
+ * @stats: status in mac80211's format
+ * @rx_pkt_status: status coming from fw
+ *
+ * returns non 0 value if the packet should be dropped
+ */
+static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *stats,
+ u32 rx_pkt_status,
+ u8 *crypt_len)
+{
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_NO_ENC)
+ return 0;
+
+ /* packet was encrypted with unknown alg */
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_ENC_ERR)
+ return 0;
+
+ switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+ case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
+ /* alg is CCM: check MIC only */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ *crypt_len = IEEE80211_CCMP_HDR_LEN;
+ return 0;
+
+ case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
+ /* Don't drop the frame and decrypt it in SW */
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+ return 0;
+ *crypt_len = IEEE80211_TKIP_IV_LEN;
+ fallthrough;
+
+ case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ RX_MPDU_RES_STATUS_SEC_WEP_ENC)
+ *crypt_len = IEEE80211_WEP_IV_LEN;
+ return 0;
+
+ case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+ return -1;
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+
+ default:
+ /* Expected in monitor (not having the keys) */
+#if defined(__linux__)
+ if (!mvm->monitor_on)
+ IWL_WARN(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+#elif defined(__FreeBSD__)
+ if (!mvm->monitor_on && net_ratelimit())
+ IWL_WARN(mvm, "%s: Unhandled alg: 0x%x\n", __func__, rx_pkt_status);
+#endif
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr, u32 len,
+ struct iwl_rx_phy_info *phy_info,
+ u32 rate_n_flags)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_tcm_mac *mdata;
+ int mac;
+ int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+ struct iwl_mvm_vif *mvmvif;
+ /* expected throughput in 100Kbps, single stream, 20 MHz */
+ static const u8 thresh_tpt[] = {
+ 9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
+ };
+ u16 thr;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 tid = ieee80211_get_tid(hdr);
+
+ if (tid < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tid];
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+ mdata = &mvm->tcm.data[mac];
+ mdata->rx.pkts[ac]++;
+
+ /* count the airtime only once for each ampdu */
+ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+ mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+ mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+ }
+
+ if (!(rate_n_flags & (RATE_MCS_HT_MSK_V1 | RATE_MCS_VHT_MSK_V1)))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mdata->opened_rx_ba_sessions ||
+ mdata->uapsd_nonagg_detect.detected ||
+ (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
+ !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
+ !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
+ !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd) ||
+ mvmsta->deflink.sta_id != mvmvif->deflink.ap_sta_id)
+ return;
+
+ if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
+ thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1];
+ thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK_V1) >>
+ RATE_HT_MCS_NSS_POS_V1);
+ } else {
+ if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >=
+ ARRAY_SIZE(thresh_tpt)))
+ return;
+ thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
+ thr *= 1 + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags);
+ }
+
+ thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) >>
+ RATE_MCS_CHAN_WIDTH_POS);
+
+ mdata->uapsd_nonagg_detect.rx_bytes += len;
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
+}
+
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u32 status)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+ status & RX_MPDU_RES_STATUS_CSUM_OK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/*
+ * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_info *phy_info;
+ struct iwl_rx_mpdu_res_start *rx_res;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb;
+ u32 len, pkt_len = iwl_rx_packet_payload_len(pkt);
+ u32 rate_n_flags;
+ u32 rx_pkt_status;
+ u8 crypt_len = 0;
+
+ if (unlikely(pkt_len < sizeof(*rx_res))) {
+ IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n");
+ return;
+ }
+
+ phy_info = &mvm->last_phy_info;
+ rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+ len = le16_to_cpu(rx_res->byte_count);
+
+ if (unlikely(len + sizeof(*rx_res) + sizeof(__le32) > pkt_len)) {
+ IWL_DEBUG_DROP(mvm, "FW lied about packet len\n");
+ return;
+ }
+
+ rx_pkt_status = get_unaligned_le32((__le32 *)
+ (pkt->data + sizeof(*rx_res) + len));
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /*
+ * Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+ !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+ IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status->mactime = le64_to_cpu(phy_info->timestamp);
+ rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+ rx_status->band =
+ (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ rx_status->freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
+ rx_status->band);
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+
+ iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
+
+ IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
+ (unsigned long long)rx_status->mactime);
+
+ rcu_read_lock();
+ if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) {
+ u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK;
+
+ id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
+
+ if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+ if (IS_ERR(sta))
+ sta = NULL;
+ }
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /* This is fine since we prevent two stations with the same
+ * address from being added.
+ */
+ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mvmsta->vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /*
+ * Don't even try to decrypt a MCAST frame that was received
+ * before the managed vif is authorized, we'd fail anyway.
+ */
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ vif->type == NL80211_IFTYPE_STATION &&
+ !mvmvif->authorized &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n");
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ /*
+ * drop the packet if it has failed being decrypted by HW
+ */
+ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+ &crypt_len)) {
+ IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+ rx_pkt_status);
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *tx_blocked_vif =
+ rcu_dereference(mvm->csa_tx_blocked_vif);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct ieee80211_vif *vif = mvmsta->vif;
+
+ /* We have tx blocked stations (with CS bit). If we heard
+ * frames from a blocked station on a new channel we can
+ * TX to it again.
+ */
+ if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ if (mvmvif->csa_target_freq == rx_status->freq)
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+ false);
+ }
+
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_RSSI);
+
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
+ struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+ s32 rssi;
+
+ rssi_trig = (void *)trig->data;
+ rssi = le32_to_cpu(rssi_trig->rssi);
+
+ if (rx_status->signal < rssi)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+#if defined(__linux__)
+ NULL);
+#elif defined(__FreeBSD__)
+ "");
+#endif
+ }
+
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
+ rate_n_flags);
+
+ if (ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+ }
+ rcu_read_unlock();
+
+ /* set the preamble flag if appropriate */
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+ /*
+ * We know which subframes of an A-MPDU belong
+ * together since we get a single PHY response
+ * from the firmware for all of them
+ */
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ }
+
+ /* Set up the HT phy flags */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) &&
+ rate_n_flags & RATE_MCS_SGI_MSK_V1)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_LDPC_MSK_V1)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else {
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ return;
+ }
+ rx_status->rate_idx = rate;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_update_frame_stats(mvm, rate_n_flags,
+ rx_status->flag & RX_FLAG_AMPDU_DETAILS);
+#endif
+
+ if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
+ crypt_len, rxb);
+}
+
+struct iwl_mvm_stat_data {
+ struct iwl_mvm *mvm;
+ __le32 flags;
+ __le32 mac_id;
+ u8 beacon_filter_average_energy;
+ __le32 *beacon_counter;
+ u8 *beacon_average_energy;
+};
+
+struct iwl_mvm_stat_data_all_macs {
+ struct iwl_mvm *mvm;
+ __le32 flags;
+ struct iwl_stats_ntfy_per_mac *per_mac;
+};
+
+static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
+ struct iwl_mvm_vif_link_info *link_info,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ int thold = bss_conf->cqm_rssi_thold;
+ int hyst = bss_conf->cqm_rssi_hyst;
+ int last_event;
+ s8 exit_esr_thresh;
+
+ if (sig == 0) {
+ IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
+ return;
+ }
+
+ link_info->bf_data.ave_beacon_signal = sig;
+
+ /* BT Coex */
+ if (link_info->bf_data.bt_coex_min_thold !=
+ link_info->bf_data.bt_coex_max_thold) {
+ last_event = link_info->bf_data.last_bt_coex_event;
+ if (sig > link_info->bf_data.bt_coex_max_thold &&
+ (last_event <= link_info->bf_data.bt_coex_min_thold ||
+ last_event == 0)) {
+ link_info->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+ } else if (sig < link_info->bf_data.bt_coex_min_thold &&
+ (last_event >= link_info->bf_data.bt_coex_max_thold ||
+ last_event == 0)) {
+ link_info->bf_data.last_bt_coex_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+ sig);
+ iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+ }
+ }
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ /* CQM Notification */
+ last_event = link_info->bf_data.last_cqm_event;
+ if (thold && sig < thold && (last_event == 0 ||
+ sig < last_event - hyst)) {
+ link_info->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ sig,
+ GFP_KERNEL);
+ } else if (sig > thold &&
+ (last_event == 0 || sig > last_event + hyst)) {
+ link_info->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ sig,
+ GFP_KERNEL);
+ }
+
+ /* ESR recalculation */
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ /* We're not in EMLSR and our signal is bad, try to switch link maybe */
+ if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) {
+ iwl_mvm_int_mlo_scan(mvm, vif);
+ return;
+ }
+
+ /* We are in EMLSR, check if we need to exit */
+ exit_esr_thresh =
+ iwl_mvm_get_esr_rssi_thresh(mvm,
+ &bss_conf->chanreq.oper,
+ true);
+
+ if (sig < exit_esr_thresh)
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
+ iwl_mvm_get_other_link(vif,
+ bss_conf->link_id));
+}
+
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data *data = _data;
+ int sig = -data->beacon_filter_average_energy;
+ u16 id = le32_to_cpu(data->mac_id);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 vif_id = mvmvif->id;
+
+ /* This doesn't need the MAC ID check since it's not taking the
+ * data copied into the "data" struct, but rather the data from
+ * the notification directly.
+ */
+ mvmvif->deflink.beacon_stats.num_beacons =
+ le32_to_cpu(data->beacon_counter[vif_id]);
+ mvmvif->deflink.beacon_stats.avg_signal =
+ -data->beacon_average_energy[vif_id];
+
+ if (mvmvif->id != id)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->deflink.beacon_stats.accu_num_beacons +=
+ mvmvif->deflink.beacon_stats.num_beacons;
+
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
+}
+
+static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data_all_macs *data = _data;
+ struct iwl_stats_ntfy_per_mac *mac_stats;
+ int sig;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 vif_id = mvmvif->id;
+
+ if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ mac_stats = &data->per_mac[vif_id];
+
+ mvmvif->deflink.beacon_stats.num_beacons =
+ le32_to_cpu(mac_stats->beacon_counter);
+ mvmvif->deflink.beacon_stats.avg_signal =
+ -le32_to_cpu(mac_stats->beacon_average_energy);
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ mvmvif->deflink.beacon_stats.accu_num_beacons +=
+ mvmvif->deflink.beacon_stats.num_beacons;
+
+ sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
+
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
+}
+
+static inline void
+iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_stats *trig_stats;
+ u32 trig_offset, trig_thold;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
+ if (!trig)
+ return;
+
+ trig_stats = (void *)trig->data;
+
+ trig_offset = le32_to_cpu(trig_stats->stop_offset);
+ trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+
+ if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
+ return;
+
+#if defined(__linux__)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
+#elif defined(__FreeBSD__)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "");
+#endif
+}
+
+static void iwl_mvm_stats_energy_iter(void *_data,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u8 *energy = _data;
+ u32 sta_id = mvmsta->deflink.sta_id;
+
+ if (WARN_ONCE(sta_id >= IWL_STATION_COUNT_MAX, "sta_id %d >= %d",
+ sta_id, IWL_STATION_COUNT_MAX))
+ return;
+
+ if (energy[sta_id])
+ mvmsta->deflink.avg_energy = energy[sta_id];
+
+}
+
+static void
+iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
+ __le32 *rx_bytes_le)
+{
+ int i;
+
+ spin_lock(&mvm->tcm.lock);
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+ u32 rx_bytes = le32_to_cpu(rx_bytes_le[i]);
+ u32 airtime = le32_to_cpu(air_time_le[i]);
+
+ mdata->rx.airtime += airtime;
+ mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+ if (airtime) {
+ /* re-init every time to store rate from FW */
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
+ rx_bytes * 8 / airtime);
+ }
+ }
+ spin_unlock(&mvm->tcm.lock);
+}
+
+static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ int i;
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ if (!mvm->phy_ctxts[i].ref)
+ continue;
+ mvm->phy_ctxts[i].channel_load_by_us =
+ le32_to_cpu(per_phy[i].channel_load_by_us);
+ mvm->phy_ctxts[i].channel_load_not_by_us =
+ le32_to_cpu(per_phy[i].channel_load_not_by_us);
+ }
+}
+
+static void
+iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
+ struct iwl_statistics_operational_ntfy *stats)
+{
+ struct iwl_mvm_stat_data_all_macs data = {
+ .mvm = mvm,
+ .flags = stats->flags,
+ .per_mac = stats->per_mac,
+ };
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator_all_macs,
+ &data);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
+}
+
+static void
+iwl_mvm_stats_ver_14(struct iwl_mvm *mvm,
+ struct iwl_statistics_operational_ntfy_ver_14 *stats)
+{
+ struct iwl_mvm_stat_data data = {
+ .mvm = mvm,
+ };
+
+ u8 beacon_average_energy[MAC_INDEX_AUX];
+ __le32 flags;
+ int i;
+
+ flags = stats->flags;
+
+ data.mac_id = stats->mac_id;
+ data.beacon_filter_average_energy =
+ le32_to_cpu(stats->beacon_filter_average_energy);
+ data.flags = flags;
+ data.beacon_counter = stats->beacon_counter;
+
+ for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++)
+ beacon_average_energy[i] =
+ le32_to_cpu(stats->beacon_average_energy[i]);
+
+ data.beacon_average_energy = beacon_average_energy;
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
+}
+
+static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt,
+ u32 expected_size)
+{
+ struct iwl_statistics_ntfy_hdr *hdr;
+
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
+ "received invalid statistics size (%d)!, expected_size: %d\n",
+ iwl_rx_packet_payload_len(pkt), expected_size))
+ return false;
+
+ hdr = (void *)&pkt->data;
+
+ if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL ||
+ hdr->version !=
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0),
+ "received unsupported hdr type %d, version %d\n",
+ hdr->type, hdr->version))
+ return false;
+
+ if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size,
+ "received invalid statistics size in header (%d)!, expected_size: %d\n",
+ le16_to_cpu(hdr->size), expected_size))
+ return false;
+
+ return true;
+}
+
+static void
+iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_link *per_link)
+{
+ u32 air_time[MAC_INDEX_AUX] = {};
+ u32 rx_bytes[MAC_INDEX_AUX] = {};
+ int fw_link_id;
+
+ /* driver uses link ID == MAC ID */
+ for (fw_link_id = 0; fw_link_id < ARRAY_SIZE(mvm->vif_id_to_mac);
+ fw_link_id++) {
+ struct iwl_stats_ntfy_per_link *link_stats;
+ struct iwl_mvm_vif_link_info *link_info;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+ int link_id;
+ int sig;
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, fw_link_id, false);
+ if (!vif)
+ continue;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ continue;
+
+ link_id = vif->bss_conf.link_id;
+ if (link_id >= ARRAY_SIZE(mvmvif->link))
+ continue;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
+ continue;
+
+ link_stats = &per_link[fw_link_id];
+
+ link_info->beacon_stats.num_beacons =
+ le32_to_cpu(link_stats->beacon_counter);
+
+ /* we basically just use the u8 to store 8 bits and then treat
+ * it as a s8 whenever we take it out to a different type.
+ */
+ link_info->beacon_stats.avg_signal =
+ -le32_to_cpu(link_stats->beacon_average_energy);
+
+ if (link_info->phy_ctxt &&
+ link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (mvm->statistics_clear)
+ mvmvif->link[link_id]->beacon_stats.accu_num_beacons +=
+ mvmvif->link[link_id]->beacon_stats.num_beacons;
+
+ sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
+ iwl_mvm_update_link_sig(vif, sig, link_info, &vif->bss_conf);
+
+ if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX,
+ "invalid mvmvif id: %d", mvmvif->id))
+ continue;
+
+ air_time[mvmvif->id] +=
+ le32_to_cpu(per_link[fw_link_id].air_time);
+ rx_bytes[mvmvif->id] +=
+ le32_to_cpu(per_link[fw_link_id].rx_bytes);
+ }
+
+ /* Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (mvm->statistics_clear) {
+ __le32 air_time_le[MAC_INDEX_AUX];
+ __le32 rx_bytes_le[MAC_INDEX_AUX];
+ int vif_id;
+
+ for (vif_id = 0; vif_id < ARRAY_SIZE(air_time_le); vif_id++) {
+ air_time_le[vif_id] = cpu_to_le32(air_time[vif_id]);
+ rx_bytes_le[vif_id] = cpu_to_le32(rx_bytes[vif_id]);
+ }
+
+ iwl_mvm_update_tcm_from_stats(mvm, air_time_le, rx_bytes_le);
+ }
+}
+
+#define SEC_LINK_MIN_PERC 10
+#define SEC_LINK_MIN_TX 3000
+#define SEC_LINK_MIN_RX 400
+
+/* Accept a ~20% short window to avoid issues due to jitter */
+#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5)
+
+static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long total_tx = 0, total_rx = 0;
+ unsigned long sec_link_tx = 0, sec_link_rx = 0;
+ u8 sec_link_tx_perc, sec_link_rx_perc;
+ u8 sec_link;
+ bool skip = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
+
+ if (!mvmvif->esr_active || !mvmvif->ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
+ /* We only count for the AP sta in a MLO connection */
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ /* Get the FW ID of the secondary link */
+ sec_link = iwl_mvm_get_other_link(bss_vif,
+ iwl_mvm_get_primary_link(bss_vif));
+ if (WARN_ON(!mvmvif->link[sec_link]))
+ return;
+ sec_link = mvmvif->link[sec_link]->fw_link_id;
+
+ /* Sum up RX and TX MPDUs from the different queues/links */
+ for (int q = 0; q < mvm->trans->info.num_rxqs; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+
+ /* The link IDs that doesn't exist will contain 0 */
+ for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) {
+ total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
+ total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
+ }
+
+ sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
+ sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
+
+ /*
+ * In EMLSR we have statistics every 5 seconds, so we can reset
+ * the counters upon every statistics notification.
+ * The FW sends the notification regularly, but it will be
+ * misaligned at the start. Skipping the measurement if it is
+ * short will synchronize us.
+ */
+ if (jiffies - mvmsta->mpdu_counters[q].window_start <
+ IWL_MVM_TPT_MIN_COUNT_WINDOW)
+ skip = true;
+ mvmsta->mpdu_counters[q].window_start = jiffies;
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+
+ if (skip) {
+ IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n");
+ return;
+ }
+
+ IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
+
+ /* If we don't have enough MPDUs - exit EMLSR */
+ if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
+ total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
+ iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
+ iwl_mvm_get_primary_link(bss_vif));
+ return;
+ }
+
+ IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
+ sec_link, sec_link_tx, sec_link_rx);
+
+ /* Calculate the percentage of the secondary link TX/RX */
+ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+ sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+ /*
+ * The TX/RX percentage is checked only if it exceeds the required
+ * minimum. In addition, RX is checked only if the TX check failed.
+ */
+ if ((total_tx > SEC_LINK_MIN_TX &&
+ sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
+ (total_rx > SEC_LINK_MIN_RX &&
+ sec_link_rx_perc < SEC_LINK_MIN_PERC))
+ iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
+ iwl_mvm_get_primary_link(bss_vif));
+}
+
+void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ u8 average_energy[IWL_STATION_COUNT_MAX];
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_system_statistics_notif_oper *stats;
+ int i;
+ u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP,
+ STATISTICS_OPER_NOTIF, 0);
+
+ if (notif_ver != 3) {
+ IWL_FW_CHECK_FAILED(mvm,
+ "Oper stats notif ver %d is not supported\n",
+ notif_ver);
+ return;
+ }
+
+ stats = (void *)&pkt->data;
+ iwl_mvm_stat_iterator_all_links(mvm, stats->per_link);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] =
+ le32_to_cpu(stats->per_sta[i].average_energy);
+
+ ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
+ average_energy);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
+
+ iwl_mvm_update_esr_mode_tpt(mvm);
+}
+
+void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_system_statistics_part1_notif_oper *part1_stats;
+ int i;
+ u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP,
+ STATISTICS_OPER_PART1_NOTIF, 0);
+
+ if (notif_ver != 4) {
+ IWL_FW_CHECK_FAILED(mvm,
+ "Part1 stats notif ver %d is not supported\n",
+ notif_ver);
+ return;
+ }
+
+ part1_stats = (void *)&pkt->data;
+ mvm->radio_stats.rx_time = 0;
+ mvm->radio_stats.tx_time = 0;
+ for (i = 0; i < ARRAY_SIZE(part1_stats->per_link); i++) {
+ mvm->radio_stats.rx_time +=
+ le64_to_cpu(part1_stats->per_link[i].rx_time);
+ mvm->radio_stats.tx_time +=
+ le64_to_cpu(part1_stats->per_link[i].tx_time);
+ }
+}
+
+static void
+iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ u8 average_energy[IWL_STATION_COUNT_MAX];
+ __le32 air_time[MAC_INDEX_AUX];
+ __le32 rx_bytes[MAC_INDEX_AUX];
+ __le32 flags = 0;
+ int i;
+ u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ STATISTICS_NOTIFICATION, 0);
+
+ if (WARN_ONCE(notif_ver > 15,
+ "invalid statistics version id: %d\n", notif_ver))
+ return;
+
+ if (notif_ver == 14) {
+ struct iwl_statistics_operational_ntfy_ver_14 *stats =
+ (void *)pkt->data;
+
+ if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats)))
+ return;
+
+ iwl_mvm_stats_ver_14(mvm, stats);
+
+ flags = stats->flags;
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
+ mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->on_time_scan);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] = le32_to_cpu(stats->average_energy[i]);
+
+ for (i = 0; i < ARRAY_SIZE(air_time); i++) {
+ air_time[i] = stats->air_time[i];
+ rx_bytes[i] = stats->rx_bytes[i];
+ }
+ }
+
+ if (notif_ver == 15) {
+ struct iwl_statistics_operational_ntfy *stats =
+ (void *)pkt->data;
+
+ if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats)))
+ return;
+
+ iwl_mvm_stats_ver_15(mvm, stats);
+
+ flags = stats->flags;
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
+ mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->on_time_scan);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] =
+ le32_to_cpu(stats->per_sta[i].average_energy);
+
+ for (i = 0; i < ARRAY_SIZE(air_time); i++) {
+ air_time[i] = stats->per_mac[i].air_time;
+ rx_bytes[i] = stats->per_mac[i].rx_bytes;
+ }
+ }
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+ ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
+ average_energy);
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes);
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_stat_data data = {
+ .mvm = mvm,
+ };
+ __le32 *bytes, *air_time, flags;
+ int expected_size;
+ u8 *energy;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return;
+
+ /* From ver 14 and up we use TLV statistics format */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ STATISTICS_NOTIFICATION, 0) >= 14)
+ return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt);
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ if (iwl_mvm_has_new_rx_api(mvm))
+ expected_size = sizeof(struct iwl_notif_statistics_v11);
+ else
+ expected_size = sizeof(struct iwl_notif_statistics_v10);
+ } else {
+ expected_size = sizeof(struct iwl_notif_statistics);
+ }
+
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
+ "received invalid statistics size (%d)!\n",
+ iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
+
+ data.mac_id = stats->rx.general.mac_id;
+ data.beacon_filter_average_energy =
+ stats->general.common.beacon_filter_average_energy;
+
+ mvm->rx_stats_v3 = stats->rx;
+
+ mvm->radio_stats.rx_time =
+ le64_to_cpu(stats->general.common.rx_time);
+ mvm->radio_stats.tx_time =
+ le64_to_cpu(stats->general.common.tx_time);
+ mvm->radio_stats.on_time_rf =
+ le64_to_cpu(stats->general.common.on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->general.common.on_time_scan);
+
+ data.beacon_counter = stats->general.beacon_counter;
+ data.beacon_average_energy =
+ stats->general.beacon_average_energy;
+ flags = stats->flag;
+ } else {
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
+
+ data.mac_id = stats->rx.general.mac_id;
+ data.beacon_filter_average_energy =
+ stats->general.common.beacon_filter_average_energy;
+
+ mvm->rx_stats = stats->rx;
+
+ mvm->radio_stats.rx_time =
+ le64_to_cpu(stats->general.common.rx_time);
+ mvm->radio_stats.tx_time =
+ le64_to_cpu(stats->general.common.tx_time);
+ mvm->radio_stats.on_time_rf =
+ le64_to_cpu(stats->general.common.on_time_rf);
+ mvm->radio_stats.on_time_scan =
+ le64_to_cpu(stats->general.common.on_time_scan);
+
+ data.beacon_counter = stats->general.beacon_counter;
+ data.beacon_average_energy =
+ stats->general.beacon_average_energy;
+ flags = stats->flag;
+ }
+ data.flags = flags;
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+ struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data;
+
+ energy = (void *)&v11->load_stats.avg_energy;
+ bytes = (void *)&v11->load_stats.byte_count;
+ air_time = (void *)&v11->load_stats.air_time;
+ } else {
+ struct iwl_notif_statistics *stats = (void *)&pkt->data;
+
+ energy = (void *)&stats->load_stats.avg_energy;
+ bytes = (void *)&stats->load_stats.byte_count;
+ air_time = (void *)&stats->load_stats.air_time;
+ }
+ ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
+ energy);
+
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ iwl_mvm_update_tcm_from_stats(mvm, air_time, bytes);
+
+}
+
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
+}
+
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(notif->ra_tid) != BA_WINDOW_STREAMS_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(notif->mpdu_rx_count) != BA_WINDOW_STREAMS_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(notif->bitmap) != BA_WINDOW_STREAMS_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(notif->start_seq_num) != BA_WINDOW_STREAMS_MAX);
+
+ rcu_read_lock();
+ for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
+ struct ieee80211_sta *sta;
+ u8 sta_id, tid;
+ u64 bitmap;
+ u32 ssn;
+ u16 ratid;
+ u16 received_mpdu;
+
+ ratid = le16_to_cpu(notif->ra_tid[i]);
+ /* check that this TID is valid */
+ if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
+ continue;
+
+ received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
+ if (received_mpdu == 0)
+ continue;
+
+ tid = ratid & BA_WINDOW_STATUS_TID_MSK;
+ /* get the station */
+ sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
+ >> BA_WINDOW_STATUS_STA_ID_POS;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+ bitmap = le64_to_cpu(notif->bitmap[i]);
+ ssn = le32_to_cpu(notif->start_seq_num[i]);
+
+ /* update mac80211 with the bitmap for the reordering buffer */
+ ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
+ received_mpdu);
+ }
+ rcu_read_unlock();
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/rxmq.c b/sys/contrib/dev/iwlwifi/mvm/rxmq.c
new file mode 100644
index 000000000000..71210b8b9edd
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/rxmq.c
@@ -0,0 +1,2658 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#if defined(__FreeBSD__)
+#include <net/ieee80211_radiotap.h>
+#endif
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "fw-api.h"
+#include "time-sync.h"
+
+static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
+ int queue, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+ struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
+ struct iwl_mvm_key_pn *ptk_pn;
+ int res;
+ u8 tid, keyidx;
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ u8 *extiv;
+
+ /* do PN checking */
+
+ /* multicast and non-data only arrives on default queue */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return 0;
+
+ /* do not check PN for open AP */
+ if (!(stats->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ /*
+ * avoid checking for default queue - we don't want to replicate
+ * all the logic that's necessary for checking the PN on fragmented
+ * frames, leave that to mac80211
+ */
+ if (queue == 0)
+ return 0;
+
+ /* if we are here - this for sure is either CCMP or GCMP */
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_DEBUG_DROP(mvm,
+ "expected hw-decrypted unicast frame for station\n");
+ return -1;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ keyidx = extiv[3] >> 6;
+
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
+ if (!ptk_pn)
+ return -1;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = 0;
+
+ /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
+ if (tid >= IWL_MAX_TID_COUNT)
+ return -1;
+
+ /* load pn */
+ pn[0] = extiv[7];
+ pn[1] = extiv[6];
+ pn[2] = extiv[5];
+ pn[3] = extiv[4];
+ pn[4] = extiv[1];
+ pn[5] = extiv[0];
+
+ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+ if (res < 0)
+ return -1;
+ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
+ return -1;
+
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ stats->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+/* iwl_mvm_create_skb Adds the rxb to a new skb */
+static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ unsigned int headlen, fraglen, pad_len = 0;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ len -= 2;
+ pad_len = 2;
+ }
+
+ /*
+ * For non monitor interface strip the bytes the RADA might not have
+ * removed (it might be disabled, e.g. for mgmt frames). As a monitor
+ * interface cannot exist with other interfaces, this removal is safe
+ * and sufficient, in monitor mode there's no decryption being done.
+ */
+ if (len > mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS))
+ len -= mic_crc_len;
+
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
+ */
+ headlen = (len <= skb_tailroom(skb)) ? len :
+ hdrlen + crypt_len + 8;
+
+ /* The firmware may align the packet to DWORD.
+ * The padding is inserted after the IV.
+ * After copying the header + IV skip the padding if
+ * present before copying packet data.
+ */
+ hdrlen += crypt_len;
+
+ if (unlikely(headlen < hdrlen))
+ return -EINVAL;
+
+ /* Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
+ */
+ skb_set_mac_header(skb, skb->len);
+ skb_put_data(skb, hdr, hdrlen);
+ skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+ /*
+ * If we did CHECKSUM_COMPLETE, the hardware only does it right for
+ * certain cases and starts the checksum after the SNAP. Check if
+ * this is the case - it's easier to just bail out to CHECKSUM_NONE
+ * in the cases the hardware didn't handle, since it's rare to see
+ * such packets, even though the hardware did calculate the checksum
+ * in this case, just starting after the MAC header instead.
+ *
+ * Starting from Bz hardware, it calculates starting directly after
+ * the MAC header, so that matches mac80211's expectation.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ struct {
+ u8 hdr[6];
+ __be16 type;
+ } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
+
+ if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
+ !ether_addr_equal(shdr->hdr, rfc1042_header) ||
+ (shdr->type != htons(ETH_P_IP) &&
+ shdr->type != htons(ETH_P_ARP) &&
+ shdr->type != htons(ETH_P_IPV6) &&
+ shdr->type != htons(ETH_P_8021Q) &&
+ shdr->type != htons(ETH_P_PAE) &&
+ shdr->type != htons(ETH_P_TDLS))))
+ skb->ip_summed = CHECKSUM_NONE;
+ else if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ /* mac80211 assumes full CSUM including SNAP header */
+ skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
+ }
+
+ fraglen = len - headlen;
+
+ if (fraglen) {
+ int offset = (u8 *)hdr + headlen + pad_len -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ return 0;
+}
+
+/* put a TLV on the skb and return data pointer
+ *
+ * Also pad to 4 the len and zero out all data part
+ */
+static void *
+iwl_mvm_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
+{
+ struct ieee80211_radiotap_tlv *tlv;
+
+ tlv = skb_put(skb, sizeof(*tlv));
+ tlv->type = cpu_to_le16(type);
+ tlv->len = cpu_to_le16(len);
+ return skb_put_zero(skb, ALIGN(len, 4));
+}
+
+static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_vendor_content *radiotap;
+ const u16 vendor_data_len = sizeof(mvm->cur_aid);
+
+ if (!mvm->cur_aid)
+ return;
+
+ radiotap = iwl_mvm_radiotap_put_tlv(skb,
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
+ sizeof(*radiotap) + vendor_data_len);
+
+ /* Intel OUI */
+ radiotap->oui[0] = 0xf6;
+ radiotap->oui[1] = 0x54;
+ radiotap->oui[2] = 0x25;
+ /* radiotap sniffer config sub-namespace */
+ radiotap->oui_subtype = 1;
+ radiotap->vendor_type = 0;
+
+ /* fill the data now */
+ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid));
+
+ rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+}
+
+/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta)
+{
+ if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
+ kfree_skb(skb);
+ return;
+ }
+
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+}
+
+static bool iwl_mvm_used_average_energy(struct iwl_mvm *mvm,
+ struct iwl_rx_mpdu_desc *desc,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct iwl_mvm_vif *mvm_vif;
+ struct ieee80211_vif *vif;
+ u32 id;
+
+ if (unlikely(!hdr || !desc))
+ return false;
+
+ if (likely(!ieee80211_is_beacon(hdr->frame_control)))
+ return false;
+
+ /* for the link conf lookup */
+ guard(rcu)();
+
+ /* MAC or link ID depending on FW, but driver has them equal */
+ id = u8_get_bits(desc->mac_phy_band,
+ IWL_RX_MPDU_MAC_PHY_BAND_MAC_MASK);
+
+ /* >= means AUX MAC/link ID, no energy correction needed then */
+ if (id >= ARRAY_SIZE(mvm->vif_id_to_mac))
+ return false;
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif)
+ return false;
+
+ mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ /*
+ * If we know the MAC by MAC or link ID then the frame was
+ * received for the link, so by filtering it means it was
+ * from the AP the link is connected to.
+ */
+
+ /* skip also in case we don't have it (yet) */
+ if (!mvm_vif->deflink.average_beacon_energy)
+ return false;
+
+ IWL_DEBUG_STATS(mvm, "energy override by average %d\n",
+ mvm_vif->deflink.average_beacon_energy);
+ rx_status->signal = -mvm_vif->deflink.average_beacon_energy;
+ return true;
+}
+
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ struct iwl_rx_mpdu_desc *desc,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u32 rate_n_flags, int energy_a,
+ int energy_b)
+{
+ int max_energy;
+
+ energy_a = energy_a ? -energy_a : S8_MIN;
+ energy_b = energy_b ? -energy_b : S8_MIN;
+ max_energy = max(energy_a, energy_b);
+
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
+ energy_a, energy_b, max_energy);
+
+ if (iwl_mvm_used_average_energy(mvm, desc, hdr, rx_status))
+ return;
+
+ rx_status->signal = max_energy;
+ rx_status->chains = u32_get_bits(rate_n_flags, RATE_MCS_ANT_AB_MSK);
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+}
+
+static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 status,
+ struct ieee80211_rx_status *stats)
+{
+ struct wireless_dev *wdev;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif;
+ u8 keyid;
+ struct ieee80211_key_conf *key;
+ u32 len = le16_to_cpu(desc->mpdu_len);
+ const u8 *frame = (void *)hdr;
+
+ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ /*
+ * For non-beacon, we don't really care. But beacons may
+ * be filtered out, and we thus need the firmware's replay
+ * detection, otherwise beacons the firmware previously
+ * filtered could be replayed, or something like that, and
+ * it can filter a lot - though usually only if nothing has
+ * changed.
+ */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return 0;
+
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ /* key mismatch - will also report !MIC_OK but we shouldn't count it */
+ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
+ goto report;
+
+ /* good cases */
+ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+ !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ }
+
+ /*
+ * both keys will have the same cipher and MIC length, use
+ * whichever one is available
+ */
+ key = rcu_dereference(mvmvif->bcn_prot.keys[0]);
+ if (!key) {
+ key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
+ if (!key)
+ goto report;
+ }
+
+ if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+ goto report;
+
+ /* get the real key ID */
+ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+ /* and if that's the other key, look it up */
+ if (keyid != key->keyidx) {
+ /*
+ * shouldn't happen since firmware checked, but be safe
+ * in case the MIC length is wrong too, for example
+ */
+ if (keyid != 6 && keyid != 7)
+ return -1;
+ key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
+ if (!key)
+ goto report;
+ }
+
+ /* Report status to mac80211 */
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ ieee80211_key_mic_failure(key);
+ else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+ ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
+
+ return -1;
+}
+
+static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *stats, u16 phy_info,
+ struct iwl_rx_mpdu_desc *desc,
+ u32 pkt_flags, int queue, u8 *crypt_len)
+{
+ u32 status = le32_to_cpu(desc->status);
+
+ /*
+ * Drop UNKNOWN frames in aggregation, unless in monitor mode
+ * (where we don't have the keys).
+ * We limit this to aggregation because in TKIP this is a valid
+ * scenario, since we may not have the (correct) TTAK (phase 1
+ * key) in the firmware.
+ */
+ if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) {
+ IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n");
+ return -1;
+ }
+
+ if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_protected(hdr->frame_control)))
+ return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats);
+
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ /* TODO: handle packets encrypted with unknown alg */
+#if defined(__FreeBSD__)
+ /* XXX-BZ do similar to rx.c for now as these are plenty. */
+ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_ENC_ERR)
+ return (0);
+#endif
+
+ switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
+ case IWL_RX_MPDU_STATUS_SEC_CCM:
+ case IWL_RX_MPDU_STATUS_SEC_GCM:
+ BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
+ /* alg is CCM: check MIC only */
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mvm,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
+ return -1;
+ }
+
+ stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+ *crypt_len = IEEE80211_CCMP_HDR_LEN;
+ return 0;
+ case IWL_RX_MPDU_STATUS_SEC_TKIP:
+ /* Don't drop the frame and decrypt it in SW */
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+ !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
+ return 0;
+
+ if (mvm->trans->mac_cfg->gen2 &&
+ !(status & RX_MPDU_RES_STATUS_MIC_OK))
+ stats->flag |= RX_FLAG_MMIC_ERROR;
+
+ *crypt_len = IEEE80211_TKIP_IV_LEN;
+ fallthrough;
+ case IWL_RX_MPDU_STATUS_SEC_WEP:
+ if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
+ return -1;
+
+ stats->flag |= RX_FLAG_DECRYPTED;
+ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_WEP)
+ *crypt_len = IEEE80211_WEP_IV_LEN;
+
+ if (pkt_flags & FH_RSCSR_RADA_EN) {
+ stats->flag |= RX_FLAG_ICV_STRIPPED;
+ if (mvm->trans->mac_cfg->gen2)
+ stats->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+
+ return 0;
+ case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ return -1;
+ stats->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
+ break;
+ default:
+ /*
+ * Sometimes we can get frames that were not decrypted
+ * because the firmware didn't have the keys yet. This can
+ * happen after connection where we can get multicast frames
+ * before the GTK is installed.
+ * Silently drop those frames.
+ * Also drop un-decrypted frames in monitor mode.
+ */
+ if (!is_multicast_ether_addr(hdr->addr1) &&
+ !mvm->monitor_on && net_ratelimit())
+#if defined(__linux__)
+ IWL_WARN(mvm, "Unhandled alg: 0x%x\n", status);
+#elif defined(__FreeBSD__)
+ IWL_WARN(mvm, "%s: Unhandled alg: 0x%x\n", __func__, status);
+#endif
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
+ u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~(__force __sum16)hwsum);
+ }
+ } else {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif;
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+/*
+ * returns true if a packet is a duplicate or invalid tid and should be dropped.
+ * Updates AMSDU PN tracking info
+ */
+static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_rxq_dup_data *dup_data;
+ u8 tid, sub_frame_idx;
+
+ if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mvm_sta->dup_data))
+ return false;
+
+ dup_data = &mvm_sta->dup_data[queue];
+
+ /*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ */
+ if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ /* frame has qos control */
+ tid = ieee80211_get_tid(hdr);
+ if (tid >= IWL_MAX_TID_COUNT)
+ return true;
+ } else {
+ tid = IWL_MAX_TID_COUNT;
+ }
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ dup_data->last_sub_frame[tid] >= sub_frame_idx))
+ return true;
+
+ /* Allow same PN as the first subframe for following sub frames */
+ if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ sub_frame_idx > dup_data->last_sub_frame[tid] &&
+ desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
+ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
+ dup_data->last_seq[tid] = hdr->seq_ctrl;
+ dup_data->last_sub_frame[tid] = sub_frame_idx;
+
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+ return false;
+}
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mvm_baid_data *baid_data,
+ struct iwl_mvm_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[reorder_buf->queue *
+ baid_data->entries_per_queue];
+ u16 ssn = reorder_buf->head_sn;
+
+ lockdep_assert_held(&reorder_buf->lock);
+
+ while (ieee80211_sn_less(ssn, nssn)) {
+ int index = ssn % baid_data->buf_size;
+ struct sk_buff_head *skb_list = &entries[index].frames;
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /*
+ * Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+ struct iwl_mvm_delba_data *data)
+{
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ u8 baid = data->baid;
+ u32 sta_id;
+
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ /* pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ ba_data->buf_size));
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ u8 baid, u16 nssn, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+ u32 sta_id;
+
+ IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
+ baid, nssn);
+
+ if (IWL_FW_CHECK(mvm,
+ baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mvm->baid_map),
+ "invalid BAID from FW: %d\n", baid))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (!ba_data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID %d but not allocated, invalid frame release!\n",
+ baid);
+ goto out;
+ }
+
+ /* pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data,
+ reorder_buf, nssn);
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rxq_sync_notification *notif;
+ struct iwl_mvm_internal_rxq_notif *internal_notif;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+
+ notif = (void *)pkt->data;
+ internal_notif = (void *)notif->payload;
+
+ if (WARN_ONCE(len < sizeof(*notif) + sizeof(*internal_notif),
+ "invalid notification size %d (%d)",
+ len, (int)(sizeof(*notif) + sizeof(*internal_notif))))
+ return;
+ len -= sizeof(*notif) + sizeof(*internal_notif);
+
+ if (WARN_ONCE(internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie,
+ "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
+ internal_notif->cookie, mvm->queue_sync_cookie, queue))
+ return;
+
+ switch (internal_notif->type) {
+ case IWL_MVM_RXQ_EMPTY:
+ WARN_ONCE(len, "invalid empty notification size %d", len);
+ break;
+ case IWL_MVM_RXQ_NOTIF_DEL_BA:
+ if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data),
+ "invalid delba notification size %d (%d)",
+ len, (int)sizeof(struct iwl_mvm_delba_data)))
+ break;
+ iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
+ }
+
+ if (internal_notif->sync) {
+ WARN_ONCE(!test_and_clear_bit(queue, &mvm->queue_sync_state),
+ "queue sync: queue %d responded a second time!\n",
+ queue);
+ if (READ_ONCE(mvm->queue_sync_state) == 0)
+ wake_up(&mvm->rx_sync_waitq);
+ }
+}
+
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ int queue,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+ struct iwl_mvm_baid_data *baid_data;
+ struct iwl_mvm_reorder_buffer *buffer;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ bool last_subframe =
+ desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+#if defined(__linux__)
+ u8 tid = ieee80211_get_tid(hdr);
+#elif defined(__FreeBSD__)
+ u8 tid;
+#endif
+ struct iwl_mvm_reorder_buf_entry *entries;
+ u32 sta_mask;
+ int index;
+ u16 nssn, sn;
+ u8 baid;
+
+ baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000)
+ return false;
+
+ /*
+ * This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ * This also covers pure monitor mode, in which case we won't
+ * have any BA sessions.
+ */
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return false;
+
+ /* no sta yet */
+ if (WARN_ONCE(IS_ERR_OR_NULL(sta),
+ "Got valid BAID without a valid station assigned\n"))
+ return false;
+
+ /* not a data packet or a bar */
+ if (!ieee80211_is_back_req(hdr->frame_control) &&
+ (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)))
+ return false;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return false;
+
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
+ if (!baid_data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder);
+ return false;
+ }
+
+#if defined(__FreeBSD__)
+ tid = ieee80211_get_tid(hdr);
+#endif
+ sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
+
+ if (IWL_FW_CHECK(mvm,
+ tid != baid_data->tid ||
+ !(sta_mask & baid_data->sta_mask),
+ "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
+ baid, baid_data->sta_mask, baid_data->tid,
+ sta_mask, tid))
+ return false;
+
+ nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+ IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &baid_data->reorder_buf[queue];
+ entries = &baid_data->entries[queue * baid_data->entries_per_queue];
+
+ spin_lock_bh(&buffer->lock);
+
+ if (!buffer->valid) {
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+ buffer->valid = true;
+ }
+
+ /* drop any duplicated packets */
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE))
+ goto drop;
+
+ /* drop any oudated packets */
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)
+ goto drop;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = nssn;
+
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ /*
+ * release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ /* put in reorder buffer */
+ index = sn % baid_data->buf_size;
+ __skb_queue_tail(&entries[index].frames, skb);
+ buffer->num_stored++;
+
+ /*
+ * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+ * The reason is that NSSN advances on the first sub-frame, and may
+ * cause the reorder buffer to advance before all the sub-frames arrive.
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ * If this is the first frame that is stored in the buffer, the head_sn
+ * may be outdated. Update it based on the last NSSN to make sure it
+ * will be released when the frame release notification arrives.
+ */
+ if (!amsdu || last_subframe)
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
+ buffer, nssn);
+ else if (buffer->num_stored == 1)
+ buffer->head_sn = nssn;
+
+ spin_unlock_bh(&buffer->lock);
+ return true;
+
+drop:
+ kfree_skb(skb);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
+ u32 reorder_data, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mvm_baid_data *data;
+
+ rcu_read_lock();
+
+ data = rcu_dereference(mvm->baid_map[baid]);
+ if (!data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+ baid, reorder_data);
+ goto out;
+ }
+
+ if (!data->timeout)
+ goto out;
+
+ timeout = data->timeout;
+ /*
+ * Do not update last rx all the time to avoid cache bouncing
+ * between the rx queues.
+ * Update it every timeout. Worst case is the session will
+ * expire after ~ 2 * timeout, which doesn't matter that much.
+ */
+ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+ /* Update is atomic */
+ data->last_rx = now;
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_flip_address(u8 *addr)
+{
+ int i;
+ u8 mac_addr[ETH_ALEN];
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = addr[ETH_ALEN - i - 1];
+ ether_addr_copy(addr, mac_addr);
+}
+
+struct iwl_mvm_rx_phy_data {
+ enum iwl_rx_phy_info_type info_type;
+ __le32 d0, d1, d2, d3, eht_d4, d5;
+ __le16 d4;
+ bool with_data;
+ bool first_subframe;
+ __le32 rx_vec[4];
+
+ u32 rate_n_flags;
+ u32 gp2_on_air_rise;
+ u16 phy_info;
+ u8 energy_a, energy_b;
+ u8 channel;
+};
+
+static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he_mu *he_mu)
+{
+ u32 phy_data2 = le32_to_cpu(phy_data->d2);
+ u32 phy_data3 = le32_to_cpu(phy_data->d3);
+ u16 phy_data4 = le16_to_cpu(phy_data->d4);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+
+ if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
+
+ he_mu->flags1 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU,
+ phy_data4),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
+
+ he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0,
+ phy_data2);
+ he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1,
+ phy_data3);
+ he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2,
+ phy_data2);
+ he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3,
+ phy_data3);
+ }
+
+ if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) &&
+ (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
+
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU,
+ phy_data4),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
+
+ he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0,
+ phy_data2);
+ he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1,
+ phy_data3);
+ he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2,
+ phy_data2);
+ he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3,
+ phy_data3);
+ }
+}
+
+static void
+iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status)
+{
+ /*
+ * Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the HE phy data (due
+ * to the bits being used for TSF). This shouldn't
+ * happen though as management frames where we need
+ * the TSF/timers are not be transmitted in HE-MU.
+ */
+ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |= le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+ if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+#define CHECK_BW(bw) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+ CHECK_BW(20);
+ CHECK_BW(40);
+ CHECK_BW(80);
+ CHECK_BW(160);
+
+ if (he_mu)
+ he_mu->flags2 |=
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ rate_n_flags),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ he->data6 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ rate_n_flags),
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
+}
+
+static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status,
+ int queue)
+{
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_NONE:
+ case IWL_RX_PHY_INFO_TYPE_CCK:
+ case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+ return;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ /* HE common */
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+ if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
+ phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_UPLINK),
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+ }
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_DOPPLER),
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ he_mu->flags1 |=
+ le16_encode_bits(le16_get_bits(phy_data->d4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(le16_get_bits(phy_data->d4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(le16_get_bits(phy_data->d4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+ iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ he_mu->flags2 |=
+ le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags2 |=
+ le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
+ break;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+}
+
+#define LE32_DEC_ENC(value, dec_bits, enc_bits) \
+ le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define IWL_MVM_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
+ typeof(enc_bits) _enc_bits = enc_bits; \
+ typeof(usig) _usig = usig; \
+ (_usig)->mask |= cpu_to_le32(_enc_bits); \
+ (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
+} while (0)
+
+#define __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+ eht->data[(rt_data)] |= \
+ (cpu_to_le32 \
+ (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
+ LE32_DEC_ENC(data ## fw_data, \
+ IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
+ IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
+
+#define _IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+ __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
+
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4
+
+#define IWL_RX_RU_DATA_A1 2
+#define IWL_RX_RU_DATA_A2 2
+#define IWL_RX_RU_DATA_B1 2
+#define IWL_RX_RU_DATA_B2 4
+#define IWL_RX_RU_DATA_C1 3
+#define IWL_RX_RU_DATA_C2 3
+#define IWL_RX_RU_DATA_D1 4
+#define IWL_RX_RU_DATA_D2 4
+
+#define IWL_MVM_ENC_EHT_RU(rt_ru, fw_ru) \
+ _IWL_MVM_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \
+ rt_ru, \
+ IWL_RX_RU_DATA_ ## fw_ru, \
+ fw_ru)
+
+static void iwl_mvm_decode_eht_ext_mu(struct iwl_mvm *mvm,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+{
+ if (phy_data->with_data) {
+ __le32 data1 = phy_data->d1;
+ __le32 data2 = phy_data->d2;
+ __le32 data3 = phy_data->d3;
+ __le32 data4 = phy_data->eht_d4;
+ __le32 data5 = phy_data->d5;
+ u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, data4,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+ IWL_MVM_ENC_USIG_VALUE_MASK
+ (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
+ LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
+ eht->data[7] |= LE32_DEC_ENC
+ (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+
+ /*
+ * Hardware labels the content channels/RU allocation values
+ * as follows:
+ * Content Channel 1 Content Channel 2
+ * 20 MHz: A1
+ * 40 MHz: A1 B1
+ * 80 MHz: A1 C1 B1 D1
+ * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2
+ * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4
+ *
+ * However firmware can only give us A1-D2, so the higher
+ * frequencies are missing.
+ */
+
+ switch (phy_bw) {
+ case RATE_MCS_CHAN_WIDTH_320:
+ /* additional values are missing in RX metadata */
+ case RATE_MCS_CHAN_WIDTH_160:
+ /* content channel 1 */
+ IWL_MVM_ENC_EHT_RU(1_2_1, A2);
+ IWL_MVM_ENC_EHT_RU(1_2_2, C2);
+ /* content channel 2 */
+ IWL_MVM_ENC_EHT_RU(2_2_1, B2);
+ IWL_MVM_ENC_EHT_RU(2_2_2, D2);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_80:
+ /* content channel 1 */
+ IWL_MVM_ENC_EHT_RU(1_1_2, C1);
+ /* content channel 2 */
+ IWL_MVM_ENC_EHT_RU(2_1_2, D1);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_40:
+ /* content channel 2 */
+ IWL_MVM_ENC_EHT_RU(2_1_1, B1);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_20:
+ IWL_MVM_ENC_EHT_RU(1_1_1, A1);
+ break;
+ }
+ } else {
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ __le32 usig_a2 = phy_data->rx_vec[1];
+
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_VALIDATE,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_SIG_MCS,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+ IWL_MVM_ENC_USIG_VALUE_MASK
+ (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_CRC_OK,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
+ }
+}
+
+static void iwl_mvm_decode_eht_ext_tb(struct iwl_mvm *mvm,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+{
+ if (phy_data->with_data) {
+ __le32 data5 = phy_data->d5;
+
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+ } else {
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ __le32 usig_a2 = phy_data->rx_vec[1];
+
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
+ IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_CRC_OK,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
+ }
+}
+
+static void iwl_mvm_decode_eht_ru(struct iwl_mvm *mvm,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht)
+{
+ u32 ru = le32_get_bits(eht->data[8],
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+ enum nl80211_eht_ru_alloc nl_ru;
+
+ /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
+ * in an EHT variant User Info field
+ */
+
+ switch (ru) {
+ case 0 ... 36:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ break;
+ case 37 ... 52:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+ break;
+ case 53 ... 60:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+ break;
+ case 61 ... 64:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+ break;
+ case 65 ... 66:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+ break;
+ case 67:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+ break;
+ case 68:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+ break;
+ case 69:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+ break;
+ case 70 ... 81:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+ break;
+ case 82 ... 89:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+ break;
+ case 90 ... 93:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+ break;
+ case 94 ... 95:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+ break;
+ case 96 ... 99:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+ break;
+ case 100 ... 103:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+ break;
+ case 104:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+ break;
+ case 105 ... 106:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+ break;
+ default:
+ return;
+ }
+
+ rx_status->bw = RATE_INFO_BW_EHT_RU;
+ rx_status->eht.ru = nl_ru;
+}
+
+static void iwl_mvm_decode_eht_phy_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+
+{
+ __le32 data0 = phy_data->d0;
+ __le32 data1 = phy_data->d1;
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ u8 info_type = phy_data->info_type;
+
+ /* Not in EHT range */
+ if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
+ info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
+ return;
+
+ usig->common |= cpu_to_le32
+ (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
+ if (phy_data->with_data) {
+ usig->common |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_EHT_UPLINK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+ usig->common |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+ } else {
+ usig->common |= LE32_DEC_ENC(usig_a1,
+ IWL_RX_USIG_A1_UL_FLAG,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+ usig->common |= LE32_DEC_ENC(usig_a1,
+ IWL_RX_USIG_A1_BSS_COLOR,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT)) {
+ usig->common |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
+ usig->common |=
+ LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
+ }
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
+ eht->data[0] |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+ /* All RU allocating size/index is in TB format */
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
+ eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
+ eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
+ eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+
+ iwl_mvm_decode_eht_ru(mvm, rx_status, eht);
+
+ /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ * which is on only in case of monitor mode so no need to check monitor
+ * mode
+ */
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
+ eht->data[1] |=
+ le32_encode_bits(mvm->monitor_p80,
+ IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
+
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
+ if (phy_data->with_data)
+ usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+ else
+ usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+ /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
+
+ if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
+ usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
+
+ /*
+ * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
+ * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
+ */
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
+ eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
+ info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
+ iwl_mvm_decode_eht_ext_tb(mvm, phy_data, rx_status, eht, usig);
+
+ if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
+ info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
+ iwl_mvm_decode_eht_ext_mu(mvm, phy_data, rx_status, eht, usig);
+}
+
+static void iwl_mvm_rx_eht(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+ struct ieee80211_radiotap_eht *eht;
+ struct ieee80211_radiotap_eht_usig *usig;
+ size_t eht_len = sizeof(*eht);
+
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ /* EHT and HE have the same valus for LTF */
+ u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+ u16 phy_info = phy_data->phy_info;
+ u32 bw;
+
+ /* u32 for 1 user_info */
+ if (phy_data->with_data)
+ eht_len += sizeof(u32);
+
+ eht = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
+
+ usig = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
+ sizeof(*usig));
+ rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+ usig->common |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
+
+ /* specific handling for 320MHz */
+ bw = FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags);
+ if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
+ bw += FIELD_GET(IWL_RX_PHY_DATA0_EHT_BW320_SLOT,
+ le32_to_cpu(phy_data->d0));
+
+ usig->common |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
+
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mvm_decode_eht_phy_data(mvm, phy_data, rx_status, eht, usig);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ switch (FIELD_GET(RATE_MCS_HE_GI_LTF_MSK, rate_n_flags)) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ } else {
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 1:
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+ else
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+ break;
+ case 3:
+ if (he_type != RATE_MCS_HE_TYPE_TRIG) {
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+ }
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
+ eht->data[0] |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
+ ltf) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
+ rx_status->eht.gi));
+ }
+
+
+ if (!phy_data->with_data) {
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
+ eht->data[7] |=
+ le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ eht->data[7] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+ } else {
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
+
+ eht->user_info[0] |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
+ FIELD_GET(RATE_VHT_MCS_RATE_CODE_MSK,
+ rate_n_flags)) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
+ FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags)));
+ }
+}
+
+static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 ltf;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ u16 phy_info = phy_data->phy_info;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
+ phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
+ queue);
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ /* actually data is filled in mac80211 */
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU)
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ if (he_type == RATE_MCS_HE_TYPE_MU)
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ else
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ break;
+ case 1:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ } else {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 3:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ case 4:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ default:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+ }
+
+ he->data5 |= le16_encode_bits(ltf,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+}
+
+static void iwl_mvm_decode_lsig(struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_lsig *lsig;
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+ lsig = skb_put(skb, sizeof(*lsig));
+ lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
+ lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
+ rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
+ break;
+ default:
+ break;
+ }
+}
+
+struct iwl_rx_sta_csa {
+ bool all_sta_unblocked;
+ struct ieee80211_vif *vif;
+};
+
+static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_rx_sta_csa *rx_sta_csa = data;
+
+ if (mvmsta->vif != rx_sta_csa->vif)
+ return;
+
+ if (mvmsta->disable_tx)
+ rx_sta_csa->all_sta_unblocked = false;
+}
+
+/*
+ * Note: requires also rx_status->band to be prefilled, as well
+ * as phy_data (apart from phy_data->info_type)
+ * Note: desc/hdr may be NULL
+ */
+static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
+ struct iwl_rx_mpdu_desc *desc,
+ struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ bool is_sgi;
+
+ phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+ if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ phy_data->info_type =
+ le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rx_status->bw = RATE_INFO_BW_320;
+ break;
+ }
+
+ /* must be before L-SIG data */
+ if (format == RATE_MCS_MOD_TYPE_HE)
+ iwl_mvm_rx_he(mvm, skb, phy_data, queue);
+
+ iwl_mvm_decode_lsig(skb, phy_data);
+
+ rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+ if (mvm->rx_ts_ptp && mvm->monitor_on) {
+ u64 adj_time =
+ iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC);
+
+ rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
+ rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
+ rx_status->flag &= ~RX_FLAG_MACTIME;
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->band);
+ iwl_mvm_get_signal_strength(mvm, desc, hdr, rx_status, rate_n_flags,
+ phy_data->energy_a, phy_data->energy_b);
+
+ /* using TLV format and must be after all fixed len fields */
+ if (format == RATE_MCS_MOD_TYPE_EHT)
+ iwl_mvm_rx_eht(mvm, skb, phy_data, queue);
+
+ if (unlikely(mvm->monitor_on))
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
+
+ is_sgi = format == RATE_MCS_MOD_TYPE_HE ?
+ iwl_he_is_sgi(rate_n_flags) :
+ rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (!(format == RATE_MCS_MOD_TYPE_CCK) && is_sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_VHT:
+ rx_status->encoding = RX_ENC_VHT;
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_EHT:
+ rx_status->encoding = RX_ENC_EHT;
+ break;
+ }
+
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_HT:
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
+ rx_status->nss =
+ u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ default: {
+ int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ rx_status->rate_idx = rate;
+
+ if ((rate < 0 || rate > 0xFF)) {
+ rx_status->rate_idx = 0;
+ if (net_ratelimit())
+ IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band);
+ }
+
+ break;
+ }
+ }
+}
+
+void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ struct ieee80211_hdr *hdr;
+ u32 len;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb;
+ u8 crypt_len = 0;
+ u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
+ size_t desc_size;
+ struct iwl_mvm_rx_phy_data phy_data = {};
+ u32 format;
+
+ if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return;
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ desc_size = sizeof(*desc);
+ else
+ desc_size = IWL_RX_DESC_SIZE_V1;
+
+ if (unlikely(pkt_len < desc_size)) {
+ IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n");
+ return;
+ }
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ phy_data.rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(desc->v3.rate_n_flags,
+ mvm->fw_rates_ver);
+ phy_data.channel = desc->v3.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ phy_data.energy_a = desc->v3.energy_a;
+ phy_data.energy_b = desc->v3.energy_b;
+
+ phy_data.d0 = desc->v3.phy_data0;
+ phy_data.d1 = desc->v3.phy_data1;
+ phy_data.d2 = desc->v3.phy_data2;
+ phy_data.d3 = desc->v3.phy_data3;
+ phy_data.eht_d4 = desc->phy_eht_data4;
+ phy_data.d5 = desc->v3.phy_data5;
+ } else {
+ phy_data.rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(desc->v1.rate_n_flags,
+ mvm->fw_rates_ver);
+ phy_data.channel = desc->v1.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ phy_data.energy_a = desc->v1.energy_a;
+ phy_data.energy_b = desc->v1.energy_b;
+
+ phy_data.d0 = desc->v1.phy_data0;
+ phy_data.d1 = desc->v1.phy_data1;
+ phy_data.d2 = desc->v1.phy_data2;
+ phy_data.d3 = desc->v1.phy_data3;
+ }
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ len = le16_to_cpu(desc->mpdu_len);
+
+ if (unlikely(len + desc_size > pkt_len)) {
+ IWL_DEBUG_DROP(mvm, "FW lied about packet len\n");
+ return;
+ }
+
+ phy_data.with_data = true;
+ phy_data.phy_info = le16_to_cpu(desc->phy_info);
+ phy_data.d4 = desc->phy_data4;
+
+ hdr = (void *)(pkt->data + desc_size);
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ /*
+ * If the device inserted padding it means that (it thought)
+ * the 802.11 header wasn't a multiple of 4 bytes long. In
+ * this case, reserve two bytes at the start of the SKB to
+ * align the payload properly in case we end up copying it.
+ */
+ skb_reserve(skb, 2);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /*
+ * Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+ IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(desc->status));
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ /* set the preamble flag if appropriate */
+ if (format == RATE_MCS_MOD_TYPE_CCK &&
+ phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+ if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ u64 tsf_on_air_rise;
+
+ if (mvm->trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210)
+ tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+ else
+ tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+ rx_status->mactime = tsf_on_air_rise;
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ }
+
+ if (iwl_mvm_is_band_in_rx_supported(mvm)) {
+ u8 band = u8_get_bits(desc->mac_phy_band,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK);
+
+ rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
+ } else {
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ }
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit;
+
+ toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ /*
+ * Toggle is switched whenever new aggregation starts. Make
+ * sure ampdu_reference is never 0 so we can later use it to
+ * see if the frame was really part of an A-MPDU or not.
+ */
+ if (toggle_bit != mvm->ampdu_toggle) {
+ mvm->ampdu_ref++;
+ if (mvm->ampdu_ref == 0)
+ mvm->ampdu_ref++;
+ mvm->ampdu_toggle = toggle_bit;
+ phy_data.first_subframe = true;
+ }
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ }
+
+ rcu_read_lock();
+
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
+ struct ieee80211_link_sta *link_sta;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR(sta))
+ sta = NULL;
+ link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
+
+ if (sta && sta->valid_links && link_sta) {
+ rx_status->link_valid = 1;
+ rx_status->link_id = link_sta->link_id;
+ }
+ }
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /*
+ * This is fine since we prevent two stations with the same
+ * address from being added.
+ */
+ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+ }
+
+ if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc,
+ le32_to_cpu(pkt->len_n_flags), queue,
+ &crypt_len)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ iwl_mvm_rx_fill_status(mvm, desc, hdr, skb, &phy_data, queue);
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *tx_blocked_vif =
+ rcu_dereference(mvm->csa_tx_blocked_vif);
+ u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+ IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT);
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct ieee80211_vif *vif = mvmsta->vif;
+
+ if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ ieee80211_is_data(hdr->frame_control) &&
+ time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
+ /*
+ * We have tx blocked stations (with CS bit). If we heard
+ * frames from a blocked station on a new channel we can
+ * TX to it again.
+ */
+ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+ struct iwl_rx_sta_csa rx_sta_csa = {
+ .all_sta_unblocked = true,
+ .vif = tx_blocked_vif,
+ };
+
+ if (mvmvif->csa_target_freq == rx_status->freq)
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+ false);
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_rx_get_sta_block_tx,
+ &rx_sta_csa);
+
+ if (rx_sta_csa.all_sta_unblocked) {
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+ /* Unblock BCAST / MCAST station */
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
+ }
+ }
+
+ rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_RSSI);
+
+ if (trig && ieee80211_is_beacon(hdr->frame_control)) {
+ struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+ s32 rssi;
+
+ rssi_trig = (void *)trig->data;
+ rssi = le32_to_cpu(rssi_trig->rssi);
+
+ if (rx_status->signal < rssi)
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+#if defined(__linux__)
+ NULL);
+#elif defined(__FreeBSD__)
+ "");
+#endif
+ }
+
+ if (ieee80211_is_data(hdr->frame_control))
+ iwl_mvm_rx_csum(mvm, sta, skb, pkt);
+
+ if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
+ IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n",
+ le16_to_cpu(hdr->seq_ctrl));
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /*
+ * Our hardware de-aggregates AMSDUs but copies the mac header
+ * as it to the de-aggregated MPDUs. We need to turn off the
+ * AMSDU bit in the QoS control ourselves.
+ * In addition, HW reverses addr3 and addr4 - reverse it back.
+ */
+ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+ !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ if (mvm->trans->mac_cfg->device_family ==
+ IWL_DEVICE_FAMILY_9000) {
+ iwl_mvm_flip_address(hdr->addr3);
+
+ if (ieee80211_has_a4(hdr->frame_control))
+ iwl_mvm_flip_address(hdr->addr4);
+ }
+ }
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
+ u32 reorder_data = le32_to_cpu(desc->reorder_data);
+
+ iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
+ }
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+ if (!sub_frame_idx || sub_frame_idx == 1)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
+ queue);
+ }
+ }
+
+ /* management stuff on default queue */
+ if (!queue) {
+ if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ mvm->sched_scan_pass_all ==
+ SCHED_SCAN_PASS_ALL_ENABLED))
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+ }
+
+ if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
+ likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
+ likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+ !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ }
+out:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
+ u32 rssi;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb;
+ struct iwl_mvm_rx_phy_data phy_data;
+ u32 format;
+
+ if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return;
+
+ if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(struct iwl_rx_no_data)))
+ return;
+
+ rssi = le32_to_cpu(desc->rssi);
+ phy_data.d0 = desc->phy_info[0];
+ phy_data.d1 = desc->phy_info[1];
+ phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
+ phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
+ phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
+ phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
+ phy_data.with_data = false;
+ phy_data.rx_vec[0] = desc->rx_vec[0];
+ phy_data.rx_vec[1] = desc->rx_vec[1];
+
+ phy_data.rate_n_flags = iwl_mvm_v3_rate_from_fw(desc->rate,
+ mvm->fw_rates_ver);
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 3) {
+ if (unlikely(iwl_rx_packet_payload_len(pkt) <
+ sizeof(struct iwl_rx_no_data_ver_3)))
+ /* invalid len for ver 3 */
+ return;
+ phy_data.rx_vec[2] = desc->rx_vec[2];
+ phy_data.rx_vec[3] = desc->rx_vec[3];
+ } else {
+ if (format == RATE_MCS_MOD_TYPE_EHT)
+ /* no support for EHT before version 3 API */
+ return;
+ }
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* 0-length PSDU */
+ rx_status->flag |= RX_FLAG_NO_PSDU;
+
+ /* mark as failed PLCP on any errors to skip checks in mac80211 */
+ if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+ RX_NO_DATA_INFO_ERR_NONE)
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+ switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
+ case RX_NO_DATA_INFO_TYPE_NDP:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+ break;
+ case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
+ case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
+ break;
+ default:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
+ break;
+ }
+
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+
+ iwl_mvm_rx_fill_status(mvm, NULL, NULL, skb, &phy_data, queue);
+
+ /* no more radio tap info should be put after this point.
+ *
+ * We mark it as mac header, for upper layers to know where
+ * all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
+ */
+ skb_set_mac_header(skb, skb->len);
+
+ /*
+ * Override the nss from the rx_vec since the rate_n_flags has
+ * only 2 bits for the nss which gives a max of 4 ss but there
+ * may be up to 8 spatial streams.
+ */
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_VHT:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
+ break;
+ case RATE_MCS_MOD_TYPE_EHT:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
+ }
+
+ rcu_read_lock();
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_frame_release *release = (void *)pkt->data;
+
+ if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+ return;
+
+ iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
+ le16_to_cpu(release->nssn),
+ queue);
+}
+
+void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_bar_frame_release *release = (void *)pkt->data;
+ struct iwl_mvm_baid_data *baid_data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ unsigned int baid, nssn, sta_id, tid;
+
+ if (IWL_FW_CHECK(mvm, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %d (expected %zu)\n",
+ pkt_len, sizeof(*release)))
+ return;
+
+ baid = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_BAID_MASK);
+ nssn = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK);
+ sta_id = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_STA_MASK);
+ tid = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_TID_MASK);
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mvm->baid_map)))
+ return;
+
+ rcu_read_lock();
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
+ if (!baid_data) {
+ IWL_DEBUG_RX(mvm,
+ "Got valid BAID %d but not allocated, invalid BAR release!\n",
+ baid);
+ goto out;
+ }
+
+ if (WARN(tid != baid_data->tid || sta_id > IWL_STATION_COUNT_MAX ||
+ !(baid_data->sta_mask & BIT(sta_id)),
+ "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
+ baid, baid_data->sta_mask, baid_data->tid, sta_id,
+ tid))
+ goto out;
+
+ IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n",
+ nssn);
+
+ iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue);
+out:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_beacon_filter_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ /* MAC or link ID in v1/v2, but driver has the IDs equal */
+ struct iwl_beacon_filter_notif *notif = (void *)pkt->data;
+ u32 id = le32_to_cpu(notif->link_id);
+ struct iwl_mvm_vif *mvm_vif;
+ struct ieee80211_vif *vif;
+
+ /* >= means AUX MAC/link ID, no energy correction needed then */
+ if (IWL_FW_CHECK(mvm, id >= ARRAY_SIZE(mvm->vif_id_to_mac),
+ "invalid link ID %d\n", id))
+ return;
+
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
+ if (!vif)
+ return;
+
+ mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_vif->deflink.average_beacon_energy =
+ le32_to_cpu(notif->average_energy);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/scan.c b/sys/contrib/dev/iwlwifi/mvm/scan.c
new file mode 100644
index 000000000000..9ce1ce0dab34
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/scan.c
@@ -0,0 +1,3748 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include <linux/crc32.h>
+
+#include "mvm.h"
+#include "fw/api/scan.h"
+#include "iwl-io.h"
+#include "iwl-utils.h"
+
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
+
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_DWELL_FRAGMENTED 44
+#define IWL_SCAN_DWELL_EXTENDED 90
+#define IWL_SCAN_NUM_OF_FRAGS 3
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+/* adaptive dwell default high band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+/* adaptive dwell default low band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+/* adaptive dwell default APs number in social channels (1, 6, 11) */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+/* number of scan channels */
+#define IWL_SCAN_NUM_CHANNELS 112
+/* adaptive dwell number of APs override mask for p2p friendly GO */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
+/* adaptive dwell number of APs override mask for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
+/* adaptive dwell number of APs override for p2p friendly GO channels */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+/* adaptive dwell number of APs override for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+
+/* minimal number of 2GHz and 5GHz channels in the regular scan request */
+#define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
+
+/* Number of iterations on the channel for mei filtered scan */
+#define IWL_MEI_SCAN_NUM_ITER 5U
+
+#define WFA_TPC_IE_LEN 9
+
+struct iwl_mvm_scan_timing_params {
+ u32 suspend_time;
+ u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+ [IWL_SCAN_TYPE_UNASSOC] = {
+ .suspend_time = 0,
+ .max_out_time = 0,
+ },
+ [IWL_SCAN_TYPE_WILD] = {
+ .suspend_time = 30,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_MILD] = {
+ .suspend_time = 120,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_FRAGMENTED] = {
+ .suspend_time = 95,
+ .max_out_time = 44,
+ },
+ [IWL_SCAN_TYPE_FAST_BALANCE] = {
+ .suspend_time = 30,
+ .max_out_time = 37,
+ },
+};
+
+struct iwl_mvm_scan_params {
+ /* For CDB this is low band scan type, for non-CDB - type. */
+ enum iwl_mvm_scan_type type;
+ enum iwl_mvm_scan_type hb_type;
+ u32 n_channels;
+ u16 delay;
+ int n_ssids;
+ struct cfg80211_ssid *ssids;
+ struct ieee80211_channel **channels;
+ u32 flags;
+ u8 *mac_addr;
+ u8 *mac_addr_mask;
+ bool no_cck;
+ bool pass_all;
+ int n_match_sets;
+ struct iwl_scan_probe_req preq;
+ struct cfg80211_match_set *match_sets;
+ int n_scan_plans;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ bool iter_notif;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ u32 n_6ghz_params;
+ bool scan_6ghz;
+ bool enable_6ghz_passive;
+ bool respect_p2p_go, respect_p2p_go_hb;
+ s8 tsf_report_link_id;
+ u8 bssid[ETH_ALEN] __aligned(2);
+};
+
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+ return (void *)&cmd->v8.data;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_cdb_scan_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
+static inline struct iwl_scan_umac_chan_param *
+iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+ return &cmd->v8.channel;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return &cmd->v7.channel;
+
+ if (iwl_mvm_cdb_scan_api(mvm))
+ return &cmd->v6.channel;
+
+ return &cmd->v1.channel;
+}
+
+static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
+{
+ if (mvm->scan_rx_ant != ANT_NONE)
+ return mvm->scan_rx_ant;
+ return iwl_mvm_get_valid_rx_ant(mvm);
+}
+
+static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
+{
+ u16 rx_chain;
+ u8 rx_ant;
+
+ rx_ant = iwl_mvm_scan_rx_ant(mvm);
+ rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
+ return cpu_to_le16(rx_chain);
+}
+
+static inline __le32
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
+ bool no_cck)
+{
+ u32 tx_ant;
+
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
+ tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+ if (band == NL80211_BAND_2GHZ && !no_cck)
+ return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 |
+ tx_ant);
+ else
+ return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
+}
+
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+ return mvm->tcm.result.global_load;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ return mvm->tcm.result.band_load[band];
+}
+
+struct iwl_mvm_scan_iter_data {
+ u32 global_cnt;
+ struct ieee80211_vif *current_vif;
+ bool is_dcm_with_p2p_go;
+};
+
+static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_scan_iter_data *data = _data;
+ struct iwl_mvm_vif *curr_mvmvif;
+
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ mvmvif->deflink.phy_ctxt &&
+ mvmvif->deflink.phy_ctxt->id < NUM_PHY_CTX)
+ data->global_cnt += 1;
+
+ if (!data->current_vif || vif == data->current_vif)
+ return;
+
+ curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+ mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
+ mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
+ data->is_dcm_with_p2p_go = true;
+}
+
+static enum
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_mvm_traffic_load load,
+ bool low_latency)
+{
+ struct iwl_mvm_scan_iter_data data = {
+ .current_vif = vif,
+ .is_dcm_with_p2p_go = false,
+ .global_cnt = 0,
+ };
+
+ /*
+ * A scanning AP interface probably wants to generate a survey to do
+ * ACS (automatic channel selection).
+ * Force a non-fragmented scan in that case.
+ */
+ if (vif && ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ return IWL_SCAN_TYPE_WILD;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_iterator,
+ &data);
+
+ if (!data.global_cnt)
+ return IWL_SCAN_TYPE_UNASSOC;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+ if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+ (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ /*
+ * in case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
+ */
+ if (vif && vif->type == NL80211_IFTYPE_STATION &&
+ data.is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
+ }
+
+ if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+ return IWL_SCAN_TYPE_MILD;
+
+ return IWL_SCAN_TYPE_WILD;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
+
+ load = iwl_mvm_get_traffic_load(mvm);
+ low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ enum iwl_mvm_traffic_load load;
+ bool low_latency;
+
+ load = iwl_mvm_get_traffic_load_band(mvm, band);
+ low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
+}
+
+static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
+{
+ /* require rrm scan whenever the fw supports it */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
+}
+
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
+{
+ int max_probe_len;
+
+ max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+ /* we create the 802.11 header SSID element and WFA TPC element */
+ max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
+
+ /* DS parameter set element is added on 2.4GHZ band if required */
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ max_probe_len -= 3;
+
+ return max_probe_len;
+}
+
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+ int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
+
+ /* TODO: [BUG] This function should return the maximum allowed size of
+ * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
+ * in the same command. So the correct implementation of this function
+ * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
+ * command has only 512 bytes and it would leave us with about 240
+ * bytes for scan IEs, which is clearly not enough. So meanwhile
+ * we will report an incorrect value. This may result in a failure to
+ * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
+ * functions with -ENOBUFS, if a large enough probe will be provided.
+ */
+ return max_ie_len;
+}
+
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+ IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ }
+}
+
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mvm->hw);
+}
+
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+ switch (status) {
+ case IWL_SCAN_EBS_SUCCESS:
+ return "successful";
+ case IWL_SCAN_EBS_INACTIVE:
+ return "inactive";
+ case IWL_SCAN_EBS_FAILED:
+ case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+ default:
+ return "failed";
+ }
+}
+
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+ bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+
+ /* If this happens, the firmware has mistakenly sent an LMAC
+ * notification during UMAC scans -- warn and ignore it.
+ */
+ if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
+ return;
+
+ /* scan status must be locked for proper checking */
+ lockdep_assert_held(&mvm->mutex);
+
+ /* We first check if we were stopping a scan, in which case we
+ * just clear the stopping flag. Then we check if it was a
+ * firmware initiated stop, in which case we need to inform
+ * mac80211.
+ * Note that we can have a stopping and a running scan
+ * simultaneously, but we can't have two different types of
+ * scans stopping or running at the same time (since LMAC
+ * doesn't support it).
+ */
+
+ if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time after last iteration %d\n",
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ IWL_DEBUG_SCAN(mvm,
+ "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+ scan_notif->last_schedule_line,
+ scan_notif->last_schedule_iteration,
+ __le32_to_cpu(scan_notif->time_after_last_iter));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
+ ieee80211_scan_completed(mvm->hw, &info);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
+ } else {
+ IWL_ERR(mvm,
+ "got scan complete notification but no scan is running\n");
+ }
+
+ mvm->last_ebs_successful =
+ scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+ scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ int i;
+
+ for (i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ break;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list[i].ssid, ssid, ssid_len))
+ return i;
+ }
+ return -1;
+}
+
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+ struct iwl_ssid_ie *ssids,
+ u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+ u32 tmp_bitmap = 0;
+
+ /*
+ * copy SSIDs from match list.
+ * iwl_config_sched_scan_profiles() uses the order of these ssids to
+ * config match list.
+ */
+ for (i = 0, j = params->n_match_sets - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ /* skip empty SSID matchsets */
+ if (!params->match_sets[j].ssid.ssid_len)
+ continue;
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->match_sets[j].ssid.ssid_len;
+ memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+ ssids[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ for (j = params->n_ssids - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ index = iwl_ssid_exist(params->ssids[j].ssid,
+ params->ssids[j].ssid_len,
+ ssids);
+ if (index < 0) {
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->ssids[j].ssid_len;
+ memcpy(ssids[i].ssid, params->ssids[j].ssid,
+ ssids[i].len);
+ tmp_bitmap |= BIT(i);
+ } else {
+ tmp_bitmap |= BIT(index);
+ }
+ }
+ if (ssid_bitmap)
+ *ssid_bitmap = tmp_bitmap;
+}
+
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
+ struct iwl_scan_offload_blocklist *blocklist;
+ struct iwl_scan_offload_profile_cfg_data *data;
+ int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
+ int profile_cfg_size = sizeof(*data) +
+ sizeof(*profile) * max_profiles;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .len[1] = profile_cfg_size,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int blocklist_len;
+ int i;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > max_profiles))
+ return -EIO;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+ blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+ else
+ blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+ blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL);
+ if (!blocklist)
+ return -ENOMEM;
+
+ profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
+ if (!profile_cfg_v1) {
+ ret = -ENOMEM;
+ goto free_blocklist;
+ }
+
+ cmd.data[0] = blocklist;
+ cmd.len[0] = sizeof(*blocklist) * blocklist_len;
+ cmd.data[1] = profile_cfg_v1;
+
+ /* if max_profile is MAX_PROFILES_V2, we have the new API */
+ if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
+ struct iwl_scan_offload_profile_cfg *profile_cfg =
+ (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
+
+ data = &profile_cfg->data;
+ } else {
+ data = &profile_cfg_v1->data;
+ }
+
+ /* No blocklist configuration */
+ data->num_profiles = req->n_match_sets;
+ data->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ data->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ data->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg_v1->profiles[i];
+ profile->ssid_index = i;
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
+ IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
+ IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(profile_cfg_v1);
+free_blocklist:
+ kfree(blocklist);
+
+ return ret;
+}
+
+static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sending scheduled scan with filtering, n_match_sets %d\n",
+ req->n_match_sets);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
+
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ return true;
+}
+
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = SCAN_OFFLOAD_ABORT_CMD,
+ };
+ u32 status = CAN_ABORT_STATUS;
+
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+ if (ret)
+ return ret;
+
+ if (status != CAN_ABORT_STATUS) {
+ /*
+ * The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before the
+ * microcode has notified us that a scan is completed.
+ */
+ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+ ret = -ENOENT;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+ struct iwl_scan_req_tx_cmd *tx_cmd,
+ bool no_cck)
+{
+ tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
+ tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+ NL80211_BAND_2GHZ,
+ no_cck);
+
+ if (!iwl_mvm_has_new_station_api(mvm->fw)) {
+ tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+ tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
+
+ /*
+ * Fw doesn't use this sta anymore, pending deprecation via HOST API
+ * change
+ */
+ } else {
+ tx_cmd[0].sta_id = 0xff;
+ tx_cmd[1].sta_id = 0xff;
+ }
+
+ tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
+
+ tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+ NL80211_BAND_5GHZ,
+ no_cck);
+}
+
+static void
+iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ int n_channels, u32 ssid_bitmap,
+ struct iwl_scan_req_lmac *cmd)
+{
+ struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ channel_cfg[i].channel_num =
+ cpu_to_le16(channels[i]->hw_value);
+ channel_cfg[i].iter_count = cpu_to_le16(1);
+ channel_cfg[i].iter_interval = 0;
+ channel_cfg[i].flags =
+ cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
+ ssid_bitmap);
+ }
+}
+
+static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
+ size_t len, u8 *const pos)
+{
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ };
+ size_t offs;
+ u8 *newpos = pos;
+
+ if (!iwl_mvm_rrm_scan_needed(mvm)) {
+ memcpy(newpos, ies, len);
+ return newpos + len;
+ }
+
+ offs = ieee80211_ie_split(ies, len,
+ before_ds_params,
+ ARRAY_SIZE(before_ds_params),
+ 0);
+
+ memcpy(newpos, ies, offs);
+ newpos += offs;
+
+ /* Add a placeholder for DS Parameter Set element */
+ *newpos++ = WLAN_EID_DS_PARAMS;
+ *newpos++ = 1;
+ *newpos++ = 0;
+
+ memcpy(newpos, ies + offs, len - offs);
+ newpos += len - offs;
+
+ return newpos;
+}
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+ pos[1] = WFA_TPC_IE_LEN - 2;
+ pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+ pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+ pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+ pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+ pos[6] = 0;
+ /* pos[7] - tx power will be inserted by the FW */
+ pos[7] = 0;
+ pos[8] = 0;
+}
+
+static void
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_ies *ies,
+ struct iwl_mvm_scan_params *params)
+{
+ struct ieee80211_mgmt *frame = (void *)params->preq.buf;
+ u8 *pos, *newpos;
+ const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ params->mac_addr : NULL;
+
+ /*
+ * Unfortunately, right now the offload scan doesn't support randomising
+ * within the firmware, so until the firmware API is ready we implement
+ * it in the driver. This means that the scan iterations won't really be
+ * random, only when it's restarted, but at least that helps a bit.
+ */
+ if (mac_addr)
+ get_random_mask_addr(frame->sa, mac_addr,
+ params->mac_addr_mask);
+ else
+ memcpy(frame->sa, vif->addr, ETH_ALEN);
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ eth_broadcast_addr(frame->da);
+ ether_addr_copy(frame->bssid, params->bssid);
+ frame->seq_ctrl = 0;
+
+ pos = frame->u.probe_req.variable;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ params->preq.mac_header.offset = 0;
+ params->preq.mac_header.len = cpu_to_le16(24 + 2);
+
+ /* Insert ds parameter set element on 2.4 GHz band */
+ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
+ ies->ies[NL80211_BAND_2GHZ],
+ ies->len[NL80211_BAND_2GHZ],
+ pos);
+ params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
+ pos = newpos;
+
+ memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+ ies->len[NL80211_BAND_5GHZ]);
+ params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[1].len =
+ cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+ pos += ies->len[NL80211_BAND_5GHZ];
+
+ memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
+ ies->len[NL80211_BAND_6GHZ]);
+ params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[2].len =
+ cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
+ pos += ies->len[NL80211_BAND_6GHZ];
+ memcpy(pos, ies->common_ies, ies->common_ie_len);
+ params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ !fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+ iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+ WFA_TPC_IE_LEN);
+ } else {
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+ }
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_req_lmac *cmd,
+ struct iwl_mvm_scan_params *params)
+{
+ cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+ cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
+ cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+}
+
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ struct ieee80211_scan_ies *ies,
+ int n_channels)
+{
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &&
+ (ies->common_ie_len +
+ ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
+ ies->len[NL80211_BAND_6GHZ] <=
+ iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
+
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
+ bool low_latency;
+
+ if (iwl_mvm_is_cdb_supported(mvm))
+ low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
+ else
+ low_latency = iwl_mvm_low_latency(mvm);
+
+ /* We can only use EBS if:
+ * 1. the feature is supported;
+ * 2. the last EBS was successful;
+ * 3. if only single scan, the single scan EBS API is supported;
+ * 4. it's not a p2p find operation.
+ * 5. we are not in low latency mode,
+ * or if fragmented ebs is supported by the FW
+ * 6. the VIF is not an AP interface (scan wants survey results)
+ */
+ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+ mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)) &&
+ ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
+}
+
+static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
+{
+ return params->n_scan_plans == 1 &&
+ params->scan_plans[0].iterations == 1;
+}
+
+static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
+{
+ return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+ type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ int flags = 0;
+
+ if (params->n_ssids == 0)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+
+ if (iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+ flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
+
+ if (params->pass_all)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+
+ if (iwl_mvm_is_regular_scan(params) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ !iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
+
+ return flags;
+}
+
+static void
+iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req,
+ struct iwl_scan_probe_req *src_p_req)
+{
+ int i;
+
+ p_req->mac_header = src_p_req->mac_header;
+ for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++)
+ p_req->band_data[i] = src_p_req->band_data[i];
+ p_req->common_data = src_p_req->common_data;
+ memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf));
+}
+
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req_v1 *preq =
+ (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+ u32 ssid_bitmap = 0;
+ int i;
+ u8 band;
+
+ if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
+
+ cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+ cmd->iter_num = cpu_to_le32(1);
+ cmd->n_channels = (u8)params->n_channels;
+
+ cmd->delay = cpu_to_le32(params->delay);
+
+ cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
+ vif));
+
+ band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
+ cmd->flags = cpu_to_le32(band);
+ cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+ MAC_FILTER_IN_BEACON);
+ iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+ iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
+
+ /* this API uses bits 1-20 instead of 0-19 */
+ ssid_bitmap <<= 1;
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ cmd->schedule[i].delay =
+ cpu_to_le16(scan_plan->interval);
+ cmd->schedule[i].iterations = scan_plan->iterations;
+ cmd->schedule[i].full_scan_mul = 1;
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!cmd->schedule[i - 1].iterations)
+ cmd->schedule[i - 1].iterations = 0xff;
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif)) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
+ iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap, cmd);
+
+ iwl_mvm_scan_set_legacy_probe_req(preq, &params->preq);
+
+ return 0;
+}
+
+static int rate_to_scan_rate_flag(unsigned int rate)
+{
+ static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
+ [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
+ [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
+ [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
+ [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
+ [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
+ [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
+ [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
+ [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
+ [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
+ [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
+ [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
+ [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
+ };
+
+ return rate_to_scan_rate[rate];
+}
+
+static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
+{
+ struct ieee80211_supported_band *band;
+ unsigned int rates = 0;
+ int i;
+
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+ for (i = 0; i < band->n_bitrates; i++)
+ rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+
+ /* Set both basic rates and supported rates */
+ rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
+
+ return cpu_to_le32(rates);
+}
+
+static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_dwell *dwell)
+{
+ dwell->active = IWL_SCAN_DWELL_ACTIVE;
+ dwell->passive = IWL_SCAN_DWELL_PASSIVE;
+ dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
+ dwell->extended = IWL_SCAN_DWELL_EXTENDED;
+}
+
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
+ u32 max_channels)
+{
+ struct ieee80211_supported_band *band;
+ int i, j = 0;
+
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
+ channels[j] = band->channels[i].hw_value;
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+ for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
+ channels[j] = band->channels[i].hw_value;
+}
+
+static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
+{
+ enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
+ struct iwl_scan_config_v1 *cfg = config;
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+ cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
+
+ memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ /* This function should not be called when using ADD_STA ver >=12 */
+ WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
+
+ cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg->channel_flags = channel_flags;
+
+ iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
+}
+
+static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
+{
+ struct iwl_scan_config_v2 *cfg = config;
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+ cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ enum iwl_mvm_scan_type lb_type, hb_type;
+
+ lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
+ NL80211_BAND_2GHZ);
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
+ NL80211_BAND_5GHZ);
+
+ cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[lb_type].max_out_time);
+ cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[lb_type].suspend_time);
+
+ cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[hb_type].max_out_time);
+ cfg->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[hb_type].suspend_time);
+ } else {
+ enum iwl_mvm_scan_type type =
+ iwl_mvm_get_scan_type(mvm, NULL);
+
+ cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[type].max_out_time);
+ cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(scan_timing[type].suspend_time);
+ }
+
+ iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
+
+ memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+ /* This function should not be called when using ADD_STA ver >=12 */
+ WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
+
+ cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+ cfg->channel_flags = channel_flags;
+
+ iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
+}
+
+static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
+{
+ void *cfg;
+ int ret, cmd_size;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
+ };
+ enum iwl_mvm_scan_type type;
+ enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
+ int num_channels =
+ mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
+ u32 flags;
+ u8 channel_flags;
+
+ if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
+ num_channels = mvm->fw->ucode_capa.n_scan_channels;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ type = iwl_mvm_get_scan_type_band(mvm, NULL,
+ NL80211_BAND_2GHZ);
+ hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
+ NL80211_BAND_5GHZ);
+ if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
+ return 0;
+ } else {
+ type = iwl_mvm_get_scan_type(mvm, NULL);
+ if (type == mvm->scan_type)
+ return 0;
+ }
+
+ if (iwl_mvm_cdb_scan_api(mvm))
+ cmd_size = sizeof(struct iwl_scan_config_v2);
+ else
+ cmd_size = sizeof(struct iwl_scan_config_v1);
+ cmd_size += mvm->fw->ucode_capa.n_scan_channels;
+
+ cfg = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ flags = SCAN_CONFIG_FLAG_ACTIVATE |
+ SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+ SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
+ SCAN_CONFIG_N_CHANNELS(num_channels) |
+ (iwl_mvm_is_scan_fragmented(type) ?
+ SCAN_CONFIG_FLAG_SET_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+ channel_flags = IWL_CHANNEL_FLAG_EBS |
+ IWL_CHANNEL_FLAG_ACCURATE_EBS |
+ IWL_CHANNEL_FLAG_EBS_ADD |
+ IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+ /*
+ * Check for fragmented scan on LMAC2 - high band.
+ * LMAC1 - low band is checked above.
+ */
+ if (iwl_mvm_cdb_scan_api(mvm)) {
+ if (iwl_mvm_is_cdb_supported(mvm))
+ flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
+ SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
+ SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
+ iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
+ num_channels);
+ } else {
+ iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
+ num_channels);
+ }
+
+ cmd.data[0] = cfg;
+ cmd.len[0] = cmd_size;
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (!ret) {
+ mvm->scan_type = type;
+ mvm->hb_scan_type = hb_type;
+ }
+
+ kfree(cfg);
+ return ret;
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_config cfg;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
+ .len[0] = sizeof(cfg),
+ .data[0] = &cfg,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
+ return iwl_mvm_legacy_config_scan(mvm);
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ if (!iwl_mvm_has_new_station_api(mvm->fw)) {
+ cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
+ /*
+ * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
+ * version 5.
+ */
+ cfg.bcast_sta_id = 0xff;
+ }
+
+ cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+ cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+
+ IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
+{
+ int i;
+
+ for (i = 0; i < mvm->max_scans; i++)
+ if (mvm->scan_uid_status[i] == status)
+ return i;
+
+ return -ENOENT;
+}
+
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_req_umac *cmd,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+ u8 active_dwell, passive_dwell;
+
+ timing = &scan_timing[params->type];
+ active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.adwell_default_n_aps_social =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ cmd->v7.adwell_default_n_aps =
+ IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+
+ if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm))
+ cmd->v9.adwell_default_hb_n_aps =
+ IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ /* if custom max budget was configured with debugfs */
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+ else if (params->n_ssids && params->ssids[0].ssid_len)
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ cmd->v7.adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ hb_timing = &scan_timing[params->hb_type];
+
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+ }
+
+ if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+ cmd->v7.active_dwell = active_dwell;
+ cmd->v7.passive_dwell = passive_dwell;
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ } else {
+ cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+ cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
+ active_dwell;
+ cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
+ passive_dwell;
+ }
+ }
+ } else {
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = active_dwell;
+ cmd->v1.passive_dwell = passive_dwell;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ hb_timing = &scan_timing[params->hb_type];
+
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+ }
+
+ if (iwl_mvm_cdb_scan_api(mvm)) {
+ cmd->v6.scan_priority =
+ cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ } else {
+ cmd->v1.scan_priority =
+ cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v1.max_out_time =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v1.suspend_time =
+ cpu_to_le32(timing->suspend_time);
+ }
+ }
+
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+}
+
+static u32 iwl_mvm_scan_umac_ooc_priority(int type)
+{
+ if (type == IWL_MVM_SCAN_REGULAR)
+ return IWL_SCAN_PRIORITY_EXT_6;
+ if (type == IWL_MVM_SCAN_INT_MLO)
+ return IWL_SCAN_PRIORITY_EXT_4;
+
+ return IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static void
+iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
+ struct iwl_scan_general_params_v11 *general_params,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+ u8 active_dwell, passive_dwell;
+
+ timing = &scan_timing[params->type];
+ active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+
+ general_params->adwell_default_social_chn =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ /* if custom max budget was configured with debugfs */
+ if (IWL_MVM_ADWELL_MAX_BUDGET)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+ else if (params->n_ssids && params->ssids[0].ssid_len)
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ general_params->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ general_params->suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+
+ hb_timing = &scan_timing[params->hb_type];
+
+ general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->max_out_time);
+ general_params->suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(hb_timing->suspend_time);
+
+ general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+ general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
+}
+
+struct iwl_mvm_scan_channel_segment {
+ u8 start_idx;
+ u8 end_idx;
+ u8 first_channel_id;
+ u8 last_channel_id;
+ u8 channel_spacing_shift;
+ u8 band;
+};
+
+static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
+ {
+ .start_idx = 0,
+ .end_idx = 13,
+ .first_channel_id = 1,
+ .last_channel_id = 14,
+ .channel_spacing_shift = 0,
+ .band = PHY_BAND_24
+ },
+ {
+ .start_idx = 14,
+ .end_idx = 41,
+ .first_channel_id = 36,
+ .last_channel_id = 144,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+ {
+ .start_idx = 42,
+ .end_idx = 50,
+ .first_channel_id = 149,
+ .last_channel_id = 181,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_5
+ },
+ {
+ .start_idx = 51,
+ .end_idx = 111,
+ .first_channel_id = 1,
+ .last_channel_id = 241,
+ .channel_spacing_shift = 2,
+ .band = PHY_BAND_6
+ },
+};
+
+static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
+{
+ int i, index;
+
+ if (!channel_id)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
+ const struct iwl_mvm_scan_channel_segment *ch_segment =
+ &scan_channel_segments[i];
+ u32 ch_offset;
+
+ if (ch_segment->band != band ||
+ ch_segment->first_channel_id > channel_id ||
+ ch_segment->last_channel_id < channel_id)
+ continue;
+
+ ch_offset = (channel_id - ch_segment->first_channel_id) >>
+ ch_segment->channel_spacing_shift;
+
+ index = scan_channel_segments[i].start_idx + ch_offset;
+ if (index < IWL_SCAN_NUM_CHANNELS)
+ return index;
+
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+};
+
+static const u8 social_chs[] = {
+ 1, 6, 11
+};
+
+static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
+ u8 ch_id, u8 band, u8 *ch_bitmap,
+ size_t bitmap_n_entries)
+{
+ int i;
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ int ch_idx, bitmap_idx;
+
+ ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
+ if (ch_idx < 0)
+ return;
+
+ bitmap_idx = ch_idx / 8;
+ if (bitmap_idx >= bitmap_n_entries)
+ return;
+
+ ch_idx = ch_idx % 8;
+ ch_bitmap[bitmap_idx] |= BIT(ch_idx);
+
+ return;
+ }
+ }
+}
+
+static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
+{
+ int i;
+ u32 flags = 0;
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+ break;
+ }
+ }
+
+ if (flags)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
+ if (social_chs[i] == ch_id) {
+ flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
+ break;
+ }
+ }
+
+out:
+ return flags;
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ int n_channels, u32 flags,
+ struct iwl_scan_channel_cfg_umac *channel_cfg)
+{
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ channel_cfg[i].flags = cpu_to_le32(flags);
+ channel_cfg[i].channel_num = channels[i]->hw_value;
+ if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
+ enum nl80211_band band = channels[i]->band;
+
+ channel_cfg[i].v2.band =
+ iwl_mvm_phy_band_from_nl80211(band);
+ channel_cfg[i].v2.iter_count = 1;
+ channel_cfg[i].v2.iter_interval = 0;
+ } else {
+ channel_cfg[i].v1.iter_count = 1;
+ channel_cfg[i].v1.iter_interval = 0;
+ }
+ }
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v4 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ u8 *bitmap = cp->adwell_ch_override_bitmap;
+ size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[i];
+
+ cfg->flags = cpu_to_le32(flags);
+ cfg->channel_num = channels[i]->hw_value;
+ cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+
+ iwl_mvm_scan_ch_add_n_aps_override(vif_type,
+ cfg->channel_num,
+ cfg->v2.band, bitmap,
+ bitmap_n_entries);
+ }
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v7 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type, u32 version)
+{
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
+ u32 n_aps_flag =
+ iwl_mvm_scan_ch_n_aps_flag(vif_type,
+ channels[i]->hw_value);
+ u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band);
+
+ cfg->flags = cpu_to_le32(flags | n_aps_flag);
+ cfg->channel_num = channels[i]->hw_value;
+ if (cfg80211_channel_is_psc(channels[i]))
+ cfg->flags = 0;
+
+ if (band == NL80211_BAND_6GHZ) {
+ /* 6 GHz channels should only appear in a scan request
+ * that has scan_6ghz set. The only exception is MLO
+ * scan, which has to be passive.
+ */
+ WARN_ON_ONCE(cfg->flags != 0);
+ cfg->flags =
+ cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+ }
+
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+ if (version < 17)
+ cfg->v2.band = iwl_band;
+ else
+ cfg->flags |= cpu_to_le32((iwl_band <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS));
+ }
+}
+
+static void
+iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp)
+{
+ int j, idex_s = 0, idex_b = 0;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ params->scan_6ghz_params;
+ bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN);
+
+ for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
+ j++) {
+ if (!params->ssids[j].ssid_len)
+ continue;
+
+ pp->short_ssid[idex_s] =
+ cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
+ params->ssids[j].ssid_len));
+
+ if (hidden_supported) {
+ pp->direct_scan[idex_s].id = WLAN_EID_SSID;
+ pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
+ memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
+ params->ssids[j].ssid_len);
+ }
+ idex_s++;
+ }
+
+ /*
+ * Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
+ * collocated parameters. This might not be optimal, as this processing
+ * does not (yet) correspond to the actual channels, so it is possible
+ * that some entries would be left out.
+ *
+ * TODO: improve this logic.
+ */
+ for (j = 0; j < params->n_6ghz_params; j++) {
+ int k;
+
+ /* First, try to place the short SSID */
+ if (scan_6ghz_params[j].short_ssid_valid) {
+ for (k = 0; k < idex_s; k++) {
+ if (pp->short_ssid[k] ==
+ cpu_to_le32(scan_6ghz_params[j].short_ssid))
+ break;
+ }
+
+ if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
+ pp->short_ssid[idex_s++] =
+ cpu_to_le32(scan_6ghz_params[j].short_ssid);
+ }
+ }
+
+ /* try to place BSSID for the same entry */
+ for (k = 0; k < idex_b; k++) {
+ if (!memcmp(&pp->bssid_array[k],
+ scan_6ghz_params[j].bssid, ETH_ALEN))
+ break;
+ }
+
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
+ memcpy(&pp->bssid_array[idex_b++],
+ scan_6ghz_params[j].bssid, ETH_ALEN);
+ }
+ }
+
+ pp->short_ssid_num = idex_s;
+ pp->bssid_num = idex_b;
+}
+
+/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v7 */
+static u32
+iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ u32 n_channels,
+ struct iwl_scan_probe_params_v4 *pp,
+ struct iwl_scan_channel_params_v7 *cp,
+ enum nl80211_iftype vif_type,
+ u32 version)
+{
+ int i;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ params->scan_6ghz_params;
+ u32 ch_cnt;
+
+ for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[ch_cnt];
+
+ u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
+ u8 k, n_s_ssids = 0, n_bssids = 0;
+ u8 max_s_ssids, max_bssids;
+ bool force_passive = false, found = false, allow_passive = true,
+ unsolicited_probe_on_chan = false, psc_no_listen = false;
+ s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ /*
+ * Avoid performing passive scan on non PSC channels unless the
+ * scan is specifically a passive scan, i.e., no SSIDs
+ * configured in the scan command.
+ */
+ if (!cfg80211_channel_is_psc(params->channels[i]) &&
+ !params->n_6ghz_params && params->n_ssids)
+ continue;
+
+ cfg->channel_num = params->channels[i]->hw_value;
+ if (version < 17)
+ cfg->v2.band = PHY_BAND_6;
+ else
+ cfg->flags |= cpu_to_le32(PHY_BAND_6 <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS);
+
+ cfg->v5.iter_count = 1;
+ cfg->v5.iter_interval = 0;
+
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
+ s8 tmp_psd_20;
+
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ unsolicited_probe_on_chan |=
+ scan_6ghz_params[j].unsolicited_probe;
+
+ /* Use the highest PSD value allowed as advertised by
+ * APs for this channel
+ */
+ tmp_psd_20 = scan_6ghz_params[j].psd_20;
+ if (tmp_psd_20 !=
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
+ (psd_20 ==
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
+ psd_20 < tmp_psd_20))
+ psd_20 = tmp_psd_20;
+
+ psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+ }
+
+ /*
+ * In the following cases apply passive scan:
+ * 1. Non fragmented scan:
+ * - PSC channel with NO_LISTEN_FLAG on should be treated
+ * like non PSC channel
+ * - Non PSC channel with more than 3 short SSIDs or more
+ * than 9 BSSIDs.
+ * - Non PSC Channel with unsolicited probe response and
+ * more than 2 short SSIDs or more than 6 BSSIDs.
+ * - PSC channel with more than 2 short SSIDs or more than
+ * 6 BSSIDs.
+ * 3. Fragmented scan:
+ * - PSC channel with more than 1 SSID or 3 BSSIDs.
+ * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+ * - Non PSC channel with unsolicited probe response and
+ * more than 1 SSID or more than 3 BSSIDs.
+ */
+ if (!iwl_mvm_is_scan_fragmented(params->type)) {
+ if (!cfg80211_channel_is_psc(params->channels[i]) ||
+ psc_no_listen) {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ } else {
+ max_s_ssids = 3;
+ max_bssids = 9;
+ }
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ } else if (cfg80211_channel_is_psc(params->channels[i])) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ }
+
+ /*
+ * The optimize the scan time, i.e., reduce the scan dwell time
+ * on each channel, the below logic tries to set 3 direct BSSID
+ * probe requests for each broadcast probe request with a short
+ * SSID.
+ * TODO: improve this logic
+ */
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ found = false;
+
+ for (k = 0;
+ k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+ k++) {
+ if (!scan_6ghz_params[j].unsolicited_probe &&
+ le32_to_cpu(pp->short_ssid[k]) ==
+ scan_6ghz_params[j].short_ssid) {
+ /* Relevant short SSID bit set */
+ if (s_ssid_bitmap & BIT(k)) {
+ found = true;
+ break;
+ }
+
+ /*
+ * Prefer creating BSSID entries unless
+ * the short SSID probe can be done in
+ * the same channel dwell iteration.
+ *
+ * We also need to create a short SSID
+ * entry for any hidden AP.
+ */
+ if (3 * n_s_ssids > n_bssids &&
+ !pp->direct_scan[k].len)
+ break;
+
+ /* Hidden AP, cannot do passive scan */
+ if (pp->direct_scan[k].len)
+ allow_passive = false;
+
+ s_ssid_bitmap |= BIT(k);
+ n_s_ssids++;
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ for (k = 0; k < pp->bssid_num; k++) {
+ if (!memcmp(&pp->bssid_array[k],
+ scan_6ghz_params[j].bssid,
+ ETH_ALEN)) {
+ if (!(bssid_bitmap & BIT(k))) {
+ if (n_bssids < max_bssids) {
+ bssid_bitmap |= BIT(k);
+ n_bssids++;
+ } else {
+ force_passive = TRUE;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ if (cfg80211_channel_is_psc(params->channels[i]) &&
+ psc_no_listen)
+ flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
+
+ if (unsolicited_probe_on_chan)
+ flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
+
+ if ((allow_passive && force_passive) ||
+ (!(bssid_bitmap | s_ssid_bitmap) &&
+ !cfg80211_channel_is_psc(params->channels[i])))
+ flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
+ else
+ flags |= bssid_bitmap | (s_ssid_bitmap << 16);
+
+ cfg->flags |= cpu_to_le32(flags);
+ if (version >= 17)
+ cfg->v5.psd_20 = psd_20;
+
+ ch_cnt++;
+ }
+
+ if (params->n_channels > ch_cnt)
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz: reducing number channels: (%u->%u)\n",
+ params->n_channels, ch_cnt);
+
+ return ch_cnt;
+}
+
+static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ u8 flags = 0;
+
+ flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan on HB channels */
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type)) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type)))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+ /*
+ * force EBS in case the scan is a fragmented and there is a need to take P2P
+ * GO operation into consideration during scan operation.
+ */
+ if ((!iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
+ (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type) &&
+ params->respect_p2p_go_hb)) {
+ IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
+ flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+ }
+
+ return flags;
+}
+
+static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband =
+ &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
+ u32 n_disabled, i;
+
+ params->enable_6ghz_passive = false;
+
+ if (params->scan_6ghz)
+ return;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: Not supported by FW\n");
+ return;
+ }
+
+ /* 6GHz passive scan allowed only on station interface */
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: not station interface\n");
+ return;
+ }
+
+ /*
+ * 6GHz passive scan is allowed in a defined time interval following HW
+ * reset or resume flow, or while not associated and a large interval
+ * has passed since the last 6GHz passive scan.
+ */
+ if ((vif->cfg.assoc ||
+ time_after(mvm->last_6ghz_passive_scan_jiffies +
+ (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
+ (time_before(mvm->last_reset_or_resume_time_jiffies +
+ (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
+ jiffies))) {
+ IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
+ vif->cfg.assoc ? "associated" :
+ "timeout did not expire");
+ return;
+ }
+
+ /* not enough channels in the regular scan request */
+ if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: not enough channels\n");
+ return;
+ }
+
+ for (i = 0; i < params->n_ssids; i++) {
+ if (!params->ssids[i].ssid_len)
+ break;
+ }
+
+ /* not a wildcard scan, so cannot enable passive 6GHz scan */
+ if (i == params->n_ssids) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: no wildcard SSID\n");
+ return;
+ }
+
+ if (!sband || !sband->n_channels) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: no 6GHz channels\n");
+ return;
+ }
+
+ for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
+ n_disabled++;
+ }
+
+ /*
+ * Not all the 6GHz channels are disabled, so no need for 6GHz passive
+ * scan
+ */
+ if (n_disabled != sband->n_channels) {
+ IWL_DEBUG_SCAN(mvm,
+ "6GHz passive scan: 6GHz channels enabled\n");
+ return;
+ }
+
+ /* all conditions to enable 6ghz passive scan are satisfied */
+ IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n");
+ params->enable_6ghz_passive = true;
+}
+
+static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ int type)
+{
+ u16 flags = 0;
+
+ /*
+ * If no direct SSIDs are provided perform a passive scan. Otherwise,
+ * if there is a single SSID which is not the broadcast SSID, assume
+ * that the scan is intended for roaming purposes and thus enable Rx on
+ * all chains to improve chances of hearing the beacons/probe responses.
+ */
+ if (params->n_ssids == 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+ else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
+
+ if (iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+ if (iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+ if (!iwl_mvm_is_regular_scan(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+ if (params->iter_notif ||
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+ if (IWL_MVM_ADWELL_ENABLE)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+ if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
+ params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
+
+ if (params->enable_6ghz_passive)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
+
+ if (iwl_mvm_is_oce_supported(mvm) &&
+ (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
+ NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
+
+ return flags;
+}
+
+static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif, int type,
+ u16 gen_flags)
+{
+ u8 flags = 0;
+
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ if (params->respect_p2p_go)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
+ if (params->respect_p2p_go_hb)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ } else {
+ if (params->respect_p2p_go)
+ flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+ }
+
+ if (params->scan_6ghz &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+
+ /* Passive and AP interface -> ACS (automatic channel selection) */
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
+ ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_notif_ver(mvm->fw, SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ 0) >= 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
+
+ return flags;
+}
+
+static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ u16 flags = 0;
+
+ if (params->n_ssids == 0)
+ flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
+
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
+
+ if (iwl_mvm_is_scan_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+
+ if (iwl_mvm_is_cdb_supported(mvm) &&
+ iwl_mvm_is_scan_fragmented(params->hb_type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
+
+ if (iwl_mvm_rrm_scan_needed(mvm) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+ if (!iwl_mvm_is_regular_scan(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+ if (params->iter_notif)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
+
+ /*
+ * Extended dwell is relevant only for low band to start with, as it is
+ * being used for social channles only (1, 6, 11), so we can check
+ * only scan type on low band also for CDB.
+ */
+ if (iwl_mvm_is_regular_scan(params) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ !iwl_mvm_is_scan_fragmented(params->type) &&
+ !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
+ !iwl_mvm_is_oce_supported(mvm))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
+
+ if (iwl_mvm_is_oce_supported(mvm)) {
+ if ((params->flags &
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
+ /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
+ * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
+ * the same bit, we need to make sure that we use this bit here
+ * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
+ * used. */
+ if ((params->flags &
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
+ !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
+ if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
+ }
+
+ return flags;
+}
+
+static int
+iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_umac_schedule *schedule,
+ __le16 *delay)
+{
+ int i;
+ if (WARN_ON(!params->n_scan_plans ||
+ params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ for (i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ schedule[i].iter_count = scan_plan->iterations;
+ schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /*
+ * If the number of iterations of the last scan plan is set to
+ * zero, it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!schedule[params->n_scan_plans - 1].iter_count)
+ schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+ *delay = cpu_to_le16(params->delay);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params,
+ int type, int uid)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+ struct iwl_scan_umac_chan_param *chan_param;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
+ void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels;
+ struct iwl_scan_req_umac_tail_v2 *tail_v2 =
+ (struct iwl_scan_req_umac_tail_v2 *)sec_part;
+ struct iwl_scan_req_umac_tail_v1 *tail_v1;
+ struct iwl_ssid_ie *direct_scan;
+ int ret = 0;
+ u32 ssid_bitmap = 0;
+ u8 channel_flags = 0;
+ u16 gen_flags;
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
+
+ iwl_mvm_scan_umac_dwell(mvm, cmd, params);
+
+ cmd->uid = cpu_to_le32(uid);
+ gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
+ cmd->general_flags = cpu_to_le16(gen_flags);
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
+ cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
+ IWL_SCAN_NUM_OF_FRAGS;
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
+ cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
+ IWL_SCAN_NUM_OF_FRAGS;
+
+ cmd->v8.general_flags2 =
+ IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
+ }
+
+ cmd->scan_start_mac_id = scan_vif->id;
+
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+ cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
+ if (iwl_mvm_scan_use_ebs(mvm, vif)) {
+ channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan on HB channels */
+ if (iwl_mvm_is_frag_ebs_supported(mvm)) {
+ if (gen_flags &
+ IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
+ (!iwl_mvm_is_cdb_supported(mvm) &&
+ gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
+ channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+ }
+ }
+
+ chan_param->flags = channel_flags;
+ chan_param->count = params->n_channels;
+
+ ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
+ &tail_v2->delay);
+ if (ret)
+ return ret;
+
+ if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
+ tail_v2->preq = params->preq;
+ direct_scan = tail_v2->direct_scan;
+ } else {
+ tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part;
+ iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq,
+ &params->preq);
+ direct_scan = tail_v1->direct_scan;
+ }
+ iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap);
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap,
+ cmd_data);
+ return 0;
+}
+
+static void
+iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_general_params_v11 *gp,
+ u16 gen_flags, u8 gen_flags2,
+ u32 version)
+{
+ struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
+
+ IWL_DEBUG_SCAN(mvm, "General: flags=0x%x, flags2=0x%x\n",
+ gen_flags, gen_flags2);
+
+ gp->flags = cpu_to_le16(gen_flags);
+ gp->flags2 = gen_flags2;
+
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+ mvm->scan_link_id = 0;
+
+ if (version < 16) {
+ gp->scan_start_mac_or_link_id = scan_vif->id;
+ } else {
+ struct iwl_mvm_vif_link_info *link_info =
+ scan_vif->link[params->tsf_report_link_id];
+
+ mvm->scan_link_id = params->tsf_report_link_id;
+ if (!WARN_ON(!link_info))
+ gp->scan_start_mac_or_link_id = link_info->fw_link_id;
+ }
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v3 *pp)
+{
+ pp->preq = params->preq;
+ pp->ssid_num = params->n_ssids;
+ iwl_scan_build_ssids(params, pp->direct_scan, NULL);
+}
+
+static void
+iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp,
+ u32 *bitmap_ssid)
+{
+ pp->preq = params->preq;
+ iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v4 *cp,
+ u32 channel_cfg_flags)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+
+ iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
+ params->n_channels,
+ channel_cfg_flags,
+ vif->type);
+}
+
+static void
+iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v7 *cp,
+ u32 channel_cfg_flags,
+ u32 version)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+ iwl_mvm_umac_scan_cfg_channels_v7(mvm, params->channels, cp,
+ params->n_channels,
+ channel_cfg_flags,
+ vif->type, version);
+
+ if (params->enable_6ghz_passive) {
+ struct ieee80211_supported_band *sband =
+ &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
+ u32 i;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *channel =
+ &sband->channels[i];
+
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[cp->count];
+
+ if (!cfg80211_channel_is_psc(channel))
+ continue;
+
+ cfg->channel_num = channel->hw_value;
+ cfg->v5.iter_count = 1;
+ cfg->v5.iter_interval = 0;
+
+ if (version < 17) {
+ cfg->flags = 0;
+ cfg->v2.band = PHY_BAND_6;
+ } else {
+ cfg->flags = cpu_to_le32(PHY_BAND_6 <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS);
+ cfg->v5.psd_20 =
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+ }
+ cp->count++;
+ }
+ }
+}
+
+static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags, 0, 12);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
+ iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
+ &scan_p->channel_params, 0);
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params,
+ int type, int uid, u32 version)
+{
+ struct iwl_scan_req_umac_v17 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
+ struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
+ struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
+ int ret;
+ u16 gen_flags;
+ u8 gen_flags2;
+ u32 bitmap_ssid = 0;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+
+ if (version >= 15)
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type,
+ gen_flags);
+ else
+ gen_flags2 = 0;
+
+ iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags, gen_flags2, version);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ if (!params->scan_6ghz) {
+ iwl_mvm_scan_umac_fill_probe_p_v4(params,
+ &scan_p->probe_params,
+ &bitmap_ssid);
+ iwl_mvm_scan_umac_fill_ch_p_v7(mvm, params, vif,
+ &scan_p->channel_params,
+ bitmap_ssid,
+ version);
+ return 0;
+ } else {
+ pb->preq = params->preq;
+ }
+
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+ iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
+
+ cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params,
+ params->n_channels,
+ pb, cp, vif->type,
+ version);
+ if (!cp->count)
+ return -EINVAL;
+
+ if (!params->n_ssids ||
+ (params->n_ssids == 1 && !params->ssids[0].ssid_len))
+ cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
+
+ return 0;
+}
+
+static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14);
+}
+
+static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15);
+}
+
+static int iwl_mvm_scan_umac_v16(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 16);
+}
+
+static int iwl_mvm_scan_umac_v17(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 17);
+}
+
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+ return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
+
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ /* This looks a bit arbitrary, but the idea is that if we run
+ * out of possible simultaneous scans and the userspace is
+ * trying to run a scan type that is already running, we
+ * return -EBUSY. But if the userspace wants to start a
+ * different type of scan, we stop the opposite type to make
+ * space for the new request. The reason is backwards
+ * compatibility with old wpa_supplicant that wouldn't stop a
+ * scheduled scan before starting a normal scan.
+ */
+
+ /* FW supports only a single periodic scan */
+ if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
+ mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT))
+ return -EBUSY;
+
+ if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+ return 0;
+
+ /* Use a switch, even though this is a bitmask, so that more
+ * than one bits set will fall in default and we will warn.
+ */
+ switch (type) {
+ case IWL_MVM_SCAN_REGULAR:
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ case IWL_MVM_SCAN_SCHED:
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+ case IWL_MVM_SCAN_NETDETECT:
+ /* For non-unified images, there's no need to stop
+ * anything for net-detect since the firmware is
+ * restarted anyway. This way, any sched scans that
+ * were running will be restarted when we resume.
+ */
+ if (!unified_image)
+ return 0;
+
+ /* If this is a unified image and we ran out of scans,
+ * we need to stop something. Prefer stopping regular
+ * scans, because the results are useless at this
+ * point, and we should be able to keep running
+ * another scheduled scan while suspended.
+ */
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
+ true);
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
+ true);
+ /* Something is wrong if no scan was running but we
+ * ran out of scans.
+ */
+ fallthrough;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -EIO;
+}
+
+#define SCAN_TIMEOUT 30000
+
+void iwl_mvm_scan_timeout_wk(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ scan_timeout_dwork);
+
+ IWL_ERR(mvm, "regular scan timed out\n");
+
+ iwl_force_nmi(mvm->trans);
+}
+
+static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->type =
+ iwl_mvm_get_scan_type_band(mvm, vif,
+ NL80211_BAND_2GHZ);
+ params->hb_type =
+ iwl_mvm_get_scan_type_band(mvm, vif,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->type = iwl_mvm_get_scan_type(mvm, vif);
+ }
+}
+
+struct iwl_scan_umac_handler {
+ u8 version;
+ int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type, int uid);
+};
+
+#define IWL_SCAN_UMAC_HANDLER(_ver) { \
+ .version = _ver, \
+ .handler = iwl_mvm_scan_umac_v##_ver, \
+}
+
+static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
+ /* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(17),
+ IWL_SCAN_UMAC_HANDLER(16),
+ IWL_SCAN_UMAC_HANDLER(15),
+ IWL_SCAN_UMAC_HANDLER(14),
+ IWL_SCAN_UMAC_HANDLER(12),
+};
+
+static void iwl_mvm_mei_scan_work(struct work_struct *wk)
+{
+ struct iwl_mei_scan_filter *scan_filter =
+ container_of(wk, struct iwl_mei_scan_filter, scan_work);
+ struct iwl_mvm *mvm =
+ container_of(scan_filter, struct iwl_mvm, mei_scan_filter);
+ struct iwl_mvm_csme_conn_info *info;
+ struct sk_buff *skb;
+ u8 bssid[ETH_ALEN];
+
+ mutex_lock(&mvm->mutex);
+ info = iwl_mvm_get_csme_conn_info(mvm);
+ memcpy(bssid, info->conn_info.bssid, ETH_ALEN);
+ mutex_unlock(&mvm->mutex);
+
+ while ((skb = skb_dequeue(&scan_filter->scan_res))) {
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!memcmp(mgmt->bssid, bssid, ETH_ALEN))
+ ieee80211_rx_irqsafe(mvm->hw, skb);
+ else
+ kfree_skb(skb);
+ }
+}
+
+void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter)
+{
+ skb_queue_head_init(&mei_scan_filter->scan_res);
+ INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work);
+}
+
+/* In case CSME is connected and has link protection set, this function will
+ * override the scan request to scan only the associated channel and only for
+ * the associated SSID.
+ */
+static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm);
+ struct iwl_mei_conn_info *conn_info;
+ struct ieee80211_channel *chan;
+ int scan_iters, i;
+
+ if (!info) {
+ IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n");
+ return;
+ }
+
+ conn_info = &info->conn_info;
+ if (!info->conn_info.lp_state || !info->conn_info.ssid_len)
+ return;
+
+ if (!params->n_channels || !params->n_ssids)
+ return;
+
+ mvm->mei_scan_filter.is_mei_limited_scan = true;
+
+ chan = ieee80211_get_channel(mvm->hw->wiphy,
+ ieee80211_channel_to_frequency(conn_info->channel,
+ conn_info->band));
+ if (!chan) {
+ IWL_DEBUG_SCAN(mvm,
+ "Failed to get CSME channel (chan=%u band=%u)\n",
+ conn_info->channel, conn_info->band);
+ return;
+ }
+
+ /* The mei filtered scan must find the AP, otherwise CSME will
+ * take the NIC ownership. Add several iterations on the channel to
+ * make the scan more robust.
+ */
+ scan_iters = min(IWL_MEI_SCAN_NUM_ITER, params->n_channels);
+ params->n_channels = scan_iters;
+ for (i = 0; i < scan_iters; i++)
+ params->channels[i] = chan;
+
+ IWL_DEBUG_SCAN(mvm, "Mei scan: num iterations=%u\n", scan_iters);
+
+ params->n_ssids = 1;
+ params->ssids[0].ssid_len = conn_info->ssid_len;
+ memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len);
+}
+
+static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_host_cmd *hcmd,
+ struct iwl_mvm_scan_params *params,
+ int type)
+{
+ int uid, i, err;
+ u8 scan_ver;
+
+ lockdep_assert_held(&mvm->mutex);
+ memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
+
+ iwl_mvm_mei_limited_scan(mvm, params);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
+
+ return iwl_mvm_scan_lmac(mvm, vif, params);
+ }
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
+
+ hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
+
+ scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
+ const struct iwl_scan_umac_handler *ver_handler =
+ &iwl_scan_umac_handlers[i];
+
+ if (ver_handler->version != scan_ver)
+ continue;
+
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
+ }
+
+ err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
+ if (err)
+ return err;
+
+ return uid;
+}
+
+struct iwl_mvm_scan_respect_p2p_go_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool p2p_go;
+ enum nl80211_band band;
+};
+
+static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
+ u32 link_id;
+
+ for (link_id = 0;
+ link_id < ARRAY_SIZE(mvmvif->link);
+ link_id++) {
+ struct iwl_mvm_vif_link_info *link =
+ mvmvif->link[link_id];
+
+ if (link && link->phy_ctxt->id < NUM_PHY_CTX &&
+ (data->band == NUM_NL80211_BANDS ||
+ link->phy_ctxt->channel->band == data->band)) {
+ data->p2p_go = true;
+ break;
+ }
+ }
+ }
+}
+
+static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum nl80211_band band)
+{
+ struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
+ .current_vif = vif,
+ .p2p_go = false,
+ .band = band,
+ };
+
+ if (!low_latency)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_scan_respect_p2p_go_iter,
+ &data);
+
+ return data.p2p_go;
+}
+
+static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ bool low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
+}
+
+static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ bool low_latency = iwl_mvm_low_latency(mvm);
+
+ return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
+ NUM_NL80211_BANDS);
+}
+
+static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ params->respect_p2p_go =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_2GHZ);
+ params->respect_p2p_go_hb =
+ iwl_mvm_get_respect_p2p_go_band(mvm, vif,
+ NL80211_BAND_5GHZ);
+ } else {
+ params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
+ }
+}
+
+static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mvm_scan_params params = {};
+ int ret, uid;
+ struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+ return -EBUSY;
+ }
+
+ ret = iwl_mvm_check_running_scans(mvm, type);
+ if (ret)
+ return ret;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mvm->scan_cmd))
+ return -ENOMEM;
+
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
+
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.delay = 0;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = req->no_cck;
+ params.pass_all = true;
+ params.n_match_sets = 0;
+ params.match_sets = NULL;
+ ether_addr_copy(params.bssid, req->bssid);
+
+ params.scan_plans = &scan_plan;
+ params.n_scan_plans = 1;
+
+ params.n_6ghz_params = req->n_6ghz_params;
+ params.scan_6ghz_params = req->scan_6ghz_params;
+ params.scan_6ghz = req->scan_6ghz;
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
+
+ if (req->duration)
+ params.iter_notif = true;
+
+ params.tsf_report_link_id = req->tsf_report_link_id;
+ if (params.tsf_report_link_id < 0) {
+ if (vif->active_links)
+ params.tsf_report_link_id = __ffs(vif->active_links);
+ else
+ params.tsf_report_link_id = 0;
+ }
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
+
+ uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
+
+ if (uid < 0)
+ return uid;
+
+ iwl_mvm_pause_tcm(mvm, false);
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (ret) {
+ /* If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ iwl_mvm_resume_tcm(mvm);
+ return ret;
+ }
+
+ IWL_DEBUG_SCAN(mvm, "Scan request send success: type=%u, uid=%u\n",
+ type, uid);
+
+ mvm->scan_uid_status[uid] = type;
+ mvm->scan_status |= type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ schedule_delayed_work(&mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
+ }
+
+ if (params.enable_6ghz_passive)
+ mvm->last_6ghz_passive_scan_jiffies = jiffies;
+
+ return 0;
+}
+
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ return _iwl_mvm_single_scan_start(mvm, vif, req, ies,
+ IWL_MVM_SCAN_REGULAR);
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mvm_scan_params params = {};
+ int ret, uid;
+ int i, j;
+ bool non_psc_included = false;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+ return -EBUSY;
+ }
+
+ ret = iwl_mvm_check_running_scans(mvm, type);
+ if (ret)
+ return ret;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mvm->scan_cmd))
+ return -ENOMEM;
+
+
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = false;
+ params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
+ params.n_match_sets = req->n_match_sets;
+ params.match_sets = req->match_sets;
+ eth_broadcast_addr(params.bssid);
+ if (!req->n_scan_plans)
+ return -EINVAL;
+
+ params.n_scan_plans = req->n_scan_plans;
+ params.scan_plans = req->scan_plans;
+
+ iwl_mvm_fill_scan_type(mvm, &params, vif);
+ iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
+
+ /* In theory, LMAC scans can handle a 32-bit delay, but since
+ * waiting for over 18 hours to start the scan is a bit silly
+ * and to keep it aligned with UMAC scans (which only support
+ * 16-bit delays), trim it down to 16-bits.
+ */
+ if (req->delay > U16_MAX) {
+ IWL_DEBUG_SCAN(mvm,
+ "delay value is > 16-bits, set to max possible\n");
+ params.delay = U16_MAX;
+ } else {
+ params.delay = req->delay;
+ }
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ /* for 6 GHZ band only PSC channels need to be added */
+ for (i = 0; i < params.n_channels; i++) {
+ struct ieee80211_channel *channel = params.channels[i];
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_channel_is_psc(channel)) {
+ non_psc_included = true;
+ break;
+ }
+ }
+
+ if (non_psc_included) {
+ params.channels = kmemdup(params.channels,
+ sizeof(params.channels[0]) *
+ params.n_channels,
+ GFP_KERNEL);
+ if (!params.channels)
+ return -ENOMEM;
+
+ for (i = j = 0; i < params.n_channels; i++) {
+ if (params.channels[i]->band == NL80211_BAND_6GHZ &&
+ !cfg80211_channel_is_psc(params.channels[i]))
+ continue;
+ params.channels[j++] = params.channels[i];
+ }
+ params.n_channels = j;
+ }
+
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
+ if (uid < 0) {
+ ret = uid;
+ goto out;
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm,
+ "Sched scan request send success: type=%u, uid=%u\n",
+ type, uid);
+ mvm->scan_uid_status[uid] = type;
+ mvm->scan_status |= type;
+ } else {
+ /* If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+
+out:
+ if (non_psc_included)
+ kfree(params.channels);
+ return ret;
+}
+
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+ bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ bool select_links = false;
+
+ mvm->mei_scan_filter.is_mei_limited_scan = false;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+ uid, mvm->scan_uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mvm_ebs_status_str(notif->ebs_status));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: scan_status=0x%x\n",
+ mvm->scan_status);
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
+ if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
+ return;
+
+ /* if the scan is already stopping, we don't need to notify mac80211 */
+ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ .scan_start_tsf = mvm->scan_start,
+ };
+ struct iwl_mvm_vif *scan_vif = mvm->scan_vif;
+ struct iwl_mvm_vif_link_info *link_info =
+ scan_vif->link[mvm->scan_link_id];
+
+ /* It is possible that by the time the scan is complete the link
+ * was already removed and is not valid.
+ */
+ if (link_info)
+ memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
+ else
+ IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n");
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_vif = NULL;
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ iwl_mvm_resume_tcm(mvm);
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
+ /*
+ * Other scan types won't necessarily scan for the MLD links channels.
+ * Therefore, only select links after successful internal scan.
+ */
+ select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
+ }
+
+ mvm->scan_status &= ~mvm->scan_uid_status[uid];
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: after update: scan_status=0x%x\n",
+ mvm->scan_status);
+
+ if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+ notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
+ mvm->last_ebs_successful = false;
+
+ mvm->scan_uid_status[uid] = 0;
+
+ if (select_links)
+ wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
+}
+
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+
+ mvm->scan_start = le64_to_cpu(notif->start_tsf);
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+ IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ }
+
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ mvm->scan_start);
+}
+
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait)
+{
+ struct iwl_umac_scan_abort abort_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
+ .len = { sizeof(abort_cmd), },
+ .data = { &abort_cmd, },
+ .flags = CMD_SEND_IN_RFKILL,
+ };
+
+ int uid, ret;
+ u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ *wait = true;
+
+ /* We should always get a valid index here, because we already
+ * checked that this type of scan was running in the generic
+ * code.
+ */
+ uid = iwl_mvm_scan_uid_by_status(mvm, type);
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+ abort_cmd.uid = cpu_to_le32(uid);
+
+ IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
+
+ ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status);
+ if (!ret)
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+ /* Handle the case that the FW is no longer familiar with the scan that
+ * is to be stopped. In such a case, it is expected that the scan
+ * complete notification was already received but not yet processed.
+ * In such a case, there is no need to wait for a scan complete
+ * notification and the flow should continue similar to the case that
+ * the scan was really aborted.
+ */
+ if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) {
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ *wait = false;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
+{
+ struct iwl_notification_wait wait_scan_done;
+ static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ SCAN_OFFLOAD_COMPLETE, };
+ int ret;
+ bool wait = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ NULL, NULL);
+
+ IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ ret = iwl_mvm_umac_scan_abort(mvm, type, &wait);
+ else
+ ret = iwl_mvm_lmac_scan_abort(mvm);
+
+ if (ret) {
+ IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return ret;
+ } else if (!wait) {
+ IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type);
+ iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+ return 0;
+ }
+
+ return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
+ 1 * HZ);
+}
+
+static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
+{
+ switch (scan_ver) {
+ case 12:
+ return sizeof(struct iwl_scan_req_umac_v12);
+ case 14:
+ case 15:
+ case 16:
+ case 17:
+ return sizeof(struct iwl_scan_req_umac_v17);
+ }
+
+ return 0;
+}
+
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
+{
+ int base_size, tail_size;
+ u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ base_size = iwl_scan_req_umac_get_size(scan_ver);
+ if (base_size)
+ return base_size;
+
+
+ if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
+ else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_cdb_scan_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+ else
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ if (iwl_mvm_is_scan_ext_chan_supported(mvm))
+ tail_size = sizeof(struct iwl_scan_req_umac_tail_v2);
+ else
+ tail_size = sizeof(struct iwl_scan_req_umac_tail_v1);
+
+ return base_size +
+ sizeof(struct iwl_scan_channel_cfg_umac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ tail_size;
+ }
+ return sizeof(struct iwl_scan_req_lmac) +
+ sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels +
+ sizeof(struct iwl_scan_probe_req_v1);
+}
+
+/*
+ * This function is used in nic restart flow, to inform mac80211 about scans
+ * that was aborted by restart flow or by an assert.
+ */
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ int uid, i;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+ if (uid >= 0) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ mvm->scan_uid_status[uid] = 0;
+ }
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+ if (uid >= 0) {
+ /* Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if (!iwlwifi_mod_params.fw_restart)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ mvm->scan_uid_status[uid] = 0;
+ }
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
+ if (uid >= 0) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
+ mvm->scan_uid_status[uid] = 0;
+ }
+
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_REGULAR);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_SCHED);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_INT_MLO);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
+ /* We shouldn't have any UIDs still set. Loop over all the
+ * UIDs to make sure there's nothing left there and warn if
+ * any is found.
+ */
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
+ }
+ } else {
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ ieee80211_scan_completed(mvm->hw, &info);
+ }
+
+ /* Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
+ !iwlwifi_mod_params.fw_restart) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+ }
+}
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+ int ret;
+
+ IWL_DEBUG_SCAN(mvm,
+ "Request to stop scan: type=0x%x, status=0x%x\n",
+ type, mvm->scan_status);
+
+ if (!(mvm->scan_status & type))
+ return 0;
+
+ if (!iwl_trans_device_enabled(mvm->trans)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iwl_mvm_scan_stop_wait(mvm, type);
+ if (!ret)
+ mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ else
+ IWL_DEBUG_SCAN(mvm, "Failed to stop scan\n");
+
+out:
+ /* Clear the scan status so the next scan requests will
+ * succeed and mark the scan as stopping, so that the Rx
+ * handler doesn't do anything, as the scan was stopped from
+ * above.
+ */
+ mvm->scan_status &= ~type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ if (notify) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ }
+ } else if (notify) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel **channels,
+ size_t n_channels)
+{
+ struct cfg80211_scan_request *req = NULL;
+ struct ieee80211_scan_ies ies = {};
+ size_t size, i;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
+ n_channels);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
+ hweight16(vif->valid_links) == 1)
+ return -EINVAL;
+
+ size = struct_size(req, channels, n_channels);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /* set the requested channels */
+ for (i = 0; i < n_channels; i++)
+ req->channels[i] = channels[i];
+
+ req->n_channels = n_channels;
+
+ /* set the rates */
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+ if (mvm->hw->wiphy->bands[i])
+ req->rates[i] =
+ (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
+
+ req->wdev = ieee80211_vif_to_wdev(vif);
+ req->wiphy = mvm->hw->wiphy;
+ req->scan_start = jiffies;
+ req->tsf_report_link_id = -1;
+
+ ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
+ IWL_MVM_SCAN_INT_MLO);
+ kfree(req);
+
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
+ return ret;
+}
+
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ size_t n_channels = 0;
+ u8 link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
+ return -EBUSY;
+ }
+
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ channels[n_channels++] = link_conf->chanreq.oper.chan;
+ }
+
+ rcu_read_unlock();
+
+ if (!n_channels)
+ return -EINVAL;
+
+ return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
+}
+
+static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
+ enum nl80211_band band,
+ u16 phy_chan_num)
+{
+ struct ieee80211_supported_band *sband = mvm->hw->wiphy->bands[band];
+ int chan_idx;
+
+ if (WARN_ON_ONCE(!sband))
+ return -EINVAL;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel = &sband->channels[chan_idx];
+
+ if (channel->hw_value == phy_chan_num)
+ return chan_idx;
+ }
+
+ return -EINVAL;
+}
+
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_umac_scan_channel_survey_notif *notif =
+ (void *)pkt->data;
+ struct iwl_mvm_acs_survey_channel *info;
+ enum nl80211_band band;
+ int chan_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ size_t n_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+
+ mvm->acs_survey = kzalloc(struct_size(mvm->acs_survey,
+ channels, n_channels),
+ GFP_KERNEL);
+
+ if (!mvm->acs_survey)
+ return;
+
+ mvm->acs_survey->n_channels = n_channels;
+ n_channels = 0;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ mvm->acs_survey->bands[band] =
+ &mvm->acs_survey->channels[n_channels];
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+ }
+
+ band = iwl_mvm_nl80211_band_from_phy(le32_to_cpu(notif->band));
+ chan_idx = iwl_mvm_chanidx_from_phy(mvm, band,
+ le32_to_cpu(notif->channel));
+ if (WARN_ON_ONCE(chan_idx < 0))
+ return;
+
+ IWL_DEBUG_SCAN(mvm, "channel survey received for freq %d\n",
+ mvm->hw->wiphy->bands[band]->channels[chan_idx].center_freq);
+
+ info = &mvm->acs_survey->bands[band][chan_idx];
+
+ /* Times are all in ms */
+ info->time = le32_to_cpu(notif->active_time);
+ info->time_busy = le32_to_cpu(notif->busy_time);
+ info->time_rx = le32_to_cpu(notif->rx_time);
+ info->time_tx = le32_to_cpu(notif->tx_time);
+ info->noise =
+ iwl_average_neg_dbm(notif->noise, ARRAY_SIZE(notif->noise));
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/sf.c b/sys/contrib/dev/iwlwifi/mvm/sf.c
new file mode 100644
index 000000000000..16285ae7cae9
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/sf.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ */
+#include "mvm.h"
+
+/* For counting bound interfaces */
+struct iwl_mvm_active_iface_iterator_data {
+ struct ieee80211_vif *ignore_vif;
+ struct ieee80211_sta *sta_vif_ap_sta;
+ enum iwl_sf_state sta_vif_state;
+ u32 num_active_macs;
+};
+
+/*
+ * Count bound interfaces which are not p2p, besides data->ignore_vif.
+ * data->station_vif will point to one bound vif of type station, if exists.
+ */
+static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_active_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif == data->ignore_vif || !mvmvif->deflink.phy_ctxt ||
+ vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return;
+
+ data->num_active_macs++;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ data->sta_vif_ap_sta = mvmvif->ap_sta;
+ if (vif->cfg.assoc)
+ data->sta_vif_state = SF_FULL_ON;
+ else
+ data->sta_vif_state = SF_INIT_OFF;
+ }
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const
+__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER_DEF),
+ cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+ {
+ cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
+ cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_MCAST_AGING_TIMER),
+ cpu_to_le32(SF_MCAST_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_BA_AGING_TIMER),
+ cpu_to_le32(SF_BA_IDLE_TIMER)
+ },
+ {
+ cpu_to_le32(SF_TX_RE_AGING_TIMER),
+ cpu_to_le32(SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
+ struct iwl_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_sta *sta)
+{
+ int i, j, watermark;
+ u8 max_rx_nss = 0;
+ bool is_legacy = true;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (sta) {
+ /* find the maximal NSS number among all links (if relevant) */
+ rcu_read_lock();
+ for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ continue;
+
+ if (link_sta->ht_cap.ht_supported ||
+ link_sta->vht_cap.vht_supported ||
+ link_sta->eht_cap.has_eht ||
+ link_sta->he_cap.has_he) {
+ is_legacy = false;
+ max_rx_nss = max(max_rx_nss, link_sta->rx_nss);
+ }
+ }
+ rcu_read_unlock();
+
+ if (!is_legacy) {
+ switch (max_rx_nss) {
+ case 1:
+ watermark = SF_W_MARK_SISO;
+ break;
+ case 2:
+ watermark = SF_W_MARK_MIMO2;
+ break;
+ default:
+ watermark = SF_W_MARK_MIMO3;
+ break;
+ }
+ } else {
+ watermark = SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+
+ for (i = 0; i < SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+
+ if (sta) {
+ BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+ sizeof(__le32) * SF_NUM_SCENARIO *
+ SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+ sizeof(sf_full_timeout));
+ } else {
+ BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+ sizeof(__le32) * SF_NUM_SCENARIO *
+ SF_NUM_TIMEOUT_TYPES);
+
+ memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
+ sizeof(sf_full_timeout_def));
+ }
+}
+
+static int iwl_mvm_sf_config(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum iwl_sf_state new_state)
+{
+ struct iwl_sf_cfg_cmd sf_cmd = {
+ .state = cpu_to_le32(new_state),
+ };
+ int ret = 0;
+
+ /*
+ * If an associated AP sta changed its antenna configuration, the state
+ * will remain FULL_ON but SF parameters need to be reconsidered.
+ */
+ if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
+ return 0;
+
+ switch (new_state) {
+ case SF_UNINIT:
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+ break;
+ case SF_FULL_ON:
+ if (!sta) {
+ IWL_ERR(mvm,
+ "No station: Cannot switch SF to FULL_ON\n");
+ return -EINVAL;
+ }
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
+ break;
+ case SF_INIT_OFF:
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+ new_state);
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ if (!ret)
+ mvm->sf_state = new_state;
+
+ return ret;
+}
+
+/*
+ * Update Smart fifo:
+ * Count bound interfaces that are not to be removed, ignoring p2p devices,
+ * and set new state accordingly.
+ */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
+ bool remove_vif)
+{
+ enum iwl_sf_state new_state;
+ struct iwl_mvm_vif *mvmvif = NULL;
+ struct iwl_mvm_active_iface_iterator_data data = {
+ .ignore_vif = changed_vif,
+ .sta_vif_state = SF_UNINIT,
+ };
+ struct ieee80211_sta *sta = NULL;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD))
+ return 0;
+ /*
+ * Ignore the call if we are in HW Restart flow, or if the handled
+ * vif is a p2p device.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
+ return 0;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bound_iface_iterator,
+ &data);
+
+ /* If changed_vif exists and is not to be removed, add to the count */
+ if (changed_vif && !remove_vif)
+ data.num_active_macs++;
+
+ switch (data.num_active_macs) {
+ case 0:
+ /* If there are no active macs - change state to SF_INIT_OFF */
+ new_state = SF_INIT_OFF;
+ break;
+ case 1:
+ if (remove_vif) {
+ /* The one active mac left is of type station
+ * and we filled the relevant data during iteration
+ */
+ new_state = data.sta_vif_state;
+ sta = data.sta_vif_ap_sta;
+ } else {
+ if (WARN_ON(!changed_vif))
+ return -EINVAL;
+ if (changed_vif->type != NL80211_IFTYPE_STATION) {
+ new_state = SF_UNINIT;
+ } else if (changed_vif->cfg.assoc &&
+ changed_vif->bss_conf.dtim_period) {
+ mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+ sta = mvmvif->ap_sta;
+ new_state = SF_FULL_ON;
+ } else {
+ new_state = SF_INIT_OFF;
+ }
+ }
+ break;
+ default:
+ /* If there are multiple active macs - change to SF_UNINIT */
+ new_state = SF_UNINIT;
+ }
+
+ return iwl_mvm_sf_config(mvm, sta, new_state);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.c b/sys/contrib/dev/iwlwifi/mvm/sta.c
new file mode 100644
index 000000000000..3fc774c2ca39
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/sta.c
@@ -0,0 +1,4415 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2015, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <net/mac80211.h>
+#if defined(__FreeBSD__)
+#include <linux/cache.h>
+#endif
+
+#include "mvm.h"
+#include "sta.h"
+#include "rs.h"
+
+/*
+ * New version of ADD_STA_sta command added new fields at the end of the
+ * structure, so sending the size of the relevant API's structure is enough to
+ * support both API versions.
+ */
+static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
+{
+ if (iwl_mvm_has_new_rx_api(mvm) ||
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return sizeof(struct iwl_mvm_add_sta_cmd);
+ else
+ return sizeof(struct iwl_mvm_add_sta_cmd_v7);
+}
+
+int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype)
+{
+ int sta_id;
+ u32 reserved_ids = 0;
+
+ BUILD_BUG_ON(IWL_STATION_COUNT_MAX > 32);
+ WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+ if (iftype != NL80211_IFTYPE_STATION)
+ reserved_ids = BIT(0);
+
+ /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
+ if (BIT(sta_id) & reserved_ids)
+ continue;
+
+ if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex)))
+ return sta_id;
+ }
+ return IWL_INVALID_STA;
+}
+
+/* Calculate the ampdu density and max size */
+u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_bss_conf *link_conf,
+ u32 *_agg_size)
+{
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ if (WARN_ON(!link_sta))
+ return 0;
+
+ /* Note that we always use only legacy & highest supported PPDUs, so
+ * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
+ * the maximum A-MPDU size of various PPDU types in different bands,
+ * we only need to worry about the highest supported PPDU type here.
+ */
+
+ if (link_sta->ht_cap.ht_supported) {
+ agg_size = link_sta->ht_cap.ampdu_factor;
+ mpdu_dens = link_sta->ht_cap.ampdu_density;
+ }
+
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
+ /* overwrite HT values on 6 GHz */
+ mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ } else if (link_sta->vht_cap.vht_supported) {
+ /* if VHT supported overwrite HT value */
+ agg_size = u32_get_bits(link_sta->vht_cap.cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ }
+
+ /* D6.0 10.12.2 A-MPDU length limit rules
+ * A STA indicates the maximum length of the A-MPDU preEOF padding
+ * that it can receive in an HE PPDU in the Maximum A-MPDU Length
+ * Exponent field in its HT Capabilities, VHT Capabilities,
+ * and HE 6 GHz Band Capabilities elements (if present) and the
+ * Maximum AMPDU Length Exponent Extension field in its HE
+ * Capabilities element
+ */
+ if (link_sta->he_cap.has_he)
+ agg_size +=
+ u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ if (link_sta->eht_cap.has_eht)
+ agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
+ IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
+
+ /* Limit to max A-MPDU supported by FW */
+ agg_size = min_t(u32, agg_size,
+ STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
+
+ *_agg_size = agg_size;
+ return mpdu_dens;
+}
+
+u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta)
+{
+ u8 uapsd_acs = 0;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd_acs |= BIT(AC_BK);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd_acs |= BIT(AC_BE);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd_acs |= BIT(AC_VI);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd_acs |= BIT(AC_VO);
+
+ return uapsd_acs | uapsd_acs << 4;
+}
+
+/* send station add/update command to firmware */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update, unsigned int flags)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd add_sta_cmd = {
+ .sta_id = mvm_sta->deflink.sta_id,
+ .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+ .add_modify = update ? 1 : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
+ STA_FLG_MIMO_EN_MSK |
+ STA_FLG_RTS_MIMO_PROT),
+ .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
+ };
+ int ret;
+ u32 status;
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ add_sta_cmd.station_type = mvm_sta->sta_type;
+
+ if (!update || (flags & STA_MODIFY_QUEUES)) {
+ memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ add_sta_cmd.tfd_queue_msk =
+ cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+ } else {
+ WARN_ON(flags & STA_MODIFY_QUEUES);
+ }
+ }
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
+ case IEEE80211_STA_RX_BW_160:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
+ fallthrough;
+ case IEEE80211_STA_RX_BW_80:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
+ fallthrough;
+ case IEEE80211_STA_RX_BW_40:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
+ fallthrough;
+ case IEEE80211_STA_RX_BW_20:
+ if (sta->deflink.ht_cap.ht_supported)
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
+ break;
+ }
+
+ switch (sta->deflink.rx_nss) {
+ case 1:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+ break;
+ case 2:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
+ break;
+ case 3 ... 8:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
+ break;
+ }
+
+ switch (sta->deflink.smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ break;
+ case IEEE80211_SMPS_STATIC:
+ /* override NSS */
+ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
+ break;
+ case IEEE80211_SMPS_OFF:
+ /* nothing */
+ break;
+ }
+
+ if (sta->deflink.ht_cap.ht_supported ||
+ mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
+ add_sta_cmd.station_flags_msk |=
+ cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
+ STA_FLG_AGG_MPDU_DENS_MSK);
+
+ mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink,
+ &mvm_sta->vif->bss_conf,
+ &agg_size);
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+
+ if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
+ add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+
+ if (sta->wme) {
+ add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
+ add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta);
+ add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
+ }
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &add_sta_cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "ADD_STA failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
+{
+ struct iwl_mvm_baid_data *data =
+ timer_container_of(data, t, session_timer);
+ struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ unsigned long timeout;
+ unsigned int sta_id;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(*rcu_ptr);
+
+ if (WARN_ON(!ba_data))
+ goto unlock;
+
+ if (!ba_data->timeout)
+ goto unlock;
+
+ timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&ba_data->session_timer, timeout);
+ goto unlock;
+ }
+
+ /* Timer expired */
+ sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */
+ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * sta should be valid unless the following happens:
+ * The firmware asserts which triggers a reconfig flow, but
+ * the reconfig fails before we set the pointer to sta into
+ * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+ * A-MDPU and hence the timer continues to run. Then, the
+ * timer expires and sta is NULL.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ goto unlock;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+ sta->addr, ba_data->tid);
+unlock:
+ rcu_read_unlock();
+}
+
+/* Disable aggregations for a bitmap of TIDs for a given station */
+static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
+ unsigned long disable_agg_tids,
+ bool remove_queue)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u32 status;
+ u8 sta_id;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ mvmsta->tid_disable_agg |= disable_agg_tids;
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->deflink.sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ if (disable_agg_tids)
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+ if (remove_queue)
+ cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ rcu_read_unlock();
+
+ /* Notify FW of queue removal from the STA queues */
+ status = ADD_STA_SUCCESS;
+ return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+}
+
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int sta_id, u16 *queueptr, u8 tid)
+{
+ int queue = *queueptr;
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (mvm->sta_remove_requires_queue_remove) {
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD);
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),
+ };
+
+ if (tid == IWL_MAX_TID_COUNT)
+ tid = IWL_MGMT_TID;
+
+ remove_cmd.u.remove.tid = cpu_to_le32(tid);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
+ sizeof(remove_cmd),
+ &remove_cmd);
+ } else {
+ ret = 0;
+ }
+
+ iwl_trans_txq_free(mvm->trans, queue);
+ *queueptr = IWL_MVM_INVALID_QUEUE;
+
+ return ret;
+ }
+
+ if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
+ return 0;
+
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ cmd.action = mvm->queue_info[queue].tid_bitmap ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Disabling TXQ #%d tids=0x%x\n",
+ queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /* If the queue is still enabled - nothing left to do in this func */
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE)
+ return 0;
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+
+ /* Make sure queue info is correct even though we overwrite it */
+ WARN(mvm->queue_info[queue].tid_bitmap,
+ "TXQ #%d info out-of-sync - tids=0x%x\n",
+ queue, mvm->queue_info[queue].tid_bitmap);
+
+ /* If we are here - the queue is freed and we can zero out these vals */
+ mvm->queue_info[queue].tid_bitmap = 0;
+
+ if (sta) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ spin_lock_bh(&mvm->add_stream_lock);
+ list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_unlock_bh(&mvm->add_stream_lock);
+ }
+
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+ mvm->queue_info[queue].reserved = false;
+
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
+ sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+ if (ret)
+ IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+ queue, ret);
+ return ret;
+}
+
+static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long tid_bitmap;
+ unsigned long agg_tids = 0;
+ u8 sta_id;
+ int tid;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return -EINVAL;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+ agg_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ return agg_tids;
+}
+
+/*
+ * Remove a queue from a station's resources.
+ * Note that this only marks as free. It DOESN'T delete a BA agreement, and
+ * doesn't disable the queue
+ */
+static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long tid_bitmap;
+ unsigned long disable_agg_tids = 0;
+ u8 sta_id;
+ int tid;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ /* Unmap MAC queues and TIDs from this queue */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+ disable_agg_tids |= BIT(tid);
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+
+ spin_lock_bh(&mvm->add_stream_lock);
+ list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ spin_unlock_bh(&mvm->add_stream_lock);
+ }
+
+ mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+ spin_unlock_bh(&mvmsta->lock);
+
+ rcu_read_unlock();
+
+ /*
+ * The TX path may have been using this TXQ_ID from the tid_data,
+ * so make sure it's no longer running so that we can safely reuse
+ * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
+ * above, but nothing guarantees we've stopped using them. Thus,
+ * without this, we could get to iwl_mvm_disable_txq() and remove
+ * the queue while still sending frames to it.
+ */
+ synchronize_net();
+
+ return disable_agg_tids;
+}
+
+static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+ struct ieee80211_sta *old_sta,
+ u8 new_sta_id)
+{
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id, tid;
+ unsigned long disable_agg_tids = 0;
+ bool same_sta;
+ u16 queue_tmp = queue;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid = mvm->queue_info[queue].txq_tid;
+
+ same_sta = sta_id == new_sta_id;
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (WARN_ON(!mvmsta))
+ return -EINVAL;
+
+ disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+ /* Disable the queue */
+ if (disable_agg_tids)
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+
+ ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Failed to free inactive queue %d (ret=%d)\n",
+ queue, ret);
+
+ return ret;
+ }
+
+ /* If TXQ is allocated to another STA, update removal in FW */
+ if (!same_sta)
+ iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
+
+ return 0;
+}
+
+static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
+ unsigned long tfd_queue_mask, u8 ac)
+{
+ int queue = 0;
+ u8 ac_to_queue[IEEE80211_NUM_ACS];
+ int i;
+
+ /*
+ * This protects us against grabbing a queue that's being reconfigured
+ * by the inactivity checker.
+ */
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
+
+ /* See what ACs the existing queues for this STA have */
+ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
+ /* Only DATA queues can be shared */
+ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+ i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
+ continue;
+
+ ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
+ }
+
+ /*
+ * The queue to share is chosen only from DATA queues as follows (in
+ * descending priority):
+ * 1. An AC_BE queue
+ * 2. Same AC queue
+ * 3. Highest AC queue that is lower than new AC
+ * 4. Any existing AC (there always is at least 1 DATA queue)
+ */
+
+ /* Priority 1: An AC_BE queue */
+ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_BE];
+ /* Priority 2: Same AC queue */
+ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[ac];
+ /* Priority 3a: If new AC is VO and VI exists - use VI */
+ else if (ac == IEEE80211_AC_VO &&
+ ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VI];
+ /* Priority 3b: No BE so only AC less than the new one is BK */
+ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_BK];
+ /* Priority 4a: No BE nor BK - use VI if exists */
+ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VI];
+ /* Priority 4b: No BE, BK nor VI - use VO if exists */
+ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VO];
+
+ /* Make sure queue found (or not) is legal */
+ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
+ !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
+ IWL_ERR(mvm, "No DATA queues available to share\n");
+ return -ENOSPC;
+ }
+
+ return queue;
+}
+
+/* Re-configure the SCD for a queue that has already been configured */
+static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
+ "Trying to reconfig unallocated queue %d\n", queue))
+ return -ENXIO;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
+/*
+ * If a given queue has a higher AC than the TID stream that is being compared
+ * to, the queue needs to be redirected to the lower AC. This function does that
+ * in such a case, otherwise - if no redirection required - it does nothing,
+ * unless the %force param is true.
+ */
+static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force, struct iwl_mvm_txq *txq)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_DISABLE_QUEUE,
+ };
+ bool shared_queue;
+ int ret;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -EINVAL;
+
+ /*
+ * If the AC is lower than current one - FIFO needs to be redirected to
+ * the lowest one of the streams in the queue. Check if this is needed
+ * here.
+ * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
+ * value 3 and VO with value 0, so to check if ac X is lower than ac Y
+ * we need to check if the numerical value of X is LARGER than of Y.
+ */
+ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "No redirection needed on TXQ #%d\n",
+ queue);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+ cmd.tid = mvm->queue_info[queue].txq_tid;
+ shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
+ queue, iwl_mvm_ac_to_tx_fifo[ac]);
+
+ /* Stop the queue and wait for it to empty */
+ set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
+
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
+ queue);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Before redirecting the queue we need to de-activate it */
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
+ ret);
+
+ /* Make sure the SCD wrptr is correctly set before reconfiguring */
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+
+ /* Update the TID "owner" of the queue */
+ mvm->queue_info[queue].txq_tid = tid;
+
+ /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
+
+ /* Redirect to lower AC */
+ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
+ cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
+
+ /* Update AC marking of the queue */
+ mvm->queue_info[queue].mac80211_ac = ac;
+
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+out:
+ /* Continue using the queue */
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
+
+ return ret;
+}
+
+static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
+ u8 minq, u8 maxq)
+{
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN(maxq >= mvm->trans->mac_cfg->base->num_of_queues,
+ "max queue %d >= num_of_queues (%d)", maxq,
+ mvm->trans->mac_cfg->base->num_of_queues))
+ maxq = mvm->trans->mac_cfg->base->num_of_queues - 1;
+
+ /* This should not be hit with new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return -ENOSPC;
+
+ /* Start by looking for a free queue */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].tid_bitmap == 0 &&
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+ return i;
+
+ return -ENOSPC;
+}
+
+static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)
+{
+ int max_size = IWL_DEFAULT_QUEUE_SIZE;
+ unsigned int link_id;
+
+ /* this queue isn't used for traffic (cab_queue) */
+ if (!sta)
+ return IWL_MGMT_QUEUE_SIZE;
+
+ rcu_read_lock();
+
+ for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
+ struct ieee80211_link_sta *link =
+ rcu_dereference(sta->link[link_id]);
+
+ if (!link)
+ continue;
+
+ /* support for 512 ba size */
+ if (link->eht_cap.has_eht &&
+ max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)
+ max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
+
+ /* support for 256 ba size */
+ if (link->he_cap.has_he &&
+ max_size < IWL_DEFAULT_QUEUE_SIZE_HE)
+ max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
+ }
+
+ rcu_read_unlock();
+ return max_size;
+}
+
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ u8 sta_id, u8 tid, unsigned int timeout)
+{
+ int queue, size;
+ u32 sta_mask = 0;
+
+ if (tid == IWL_MAX_TID_COUNT) {
+ tid = IWL_MGMT_TID;
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+ mvm->trans->mac_cfg->base->min_txq_size);
+ } else {
+ size = iwl_mvm_get_queue_size(sta);
+ }
+
+ if (sta) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *link =
+ rcu_dereference_protected(mvmsta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (!link)
+ continue;
+
+ sta_mask |= BIT(link->sta_id);
+ }
+ rcu_read_unlock();
+ } else {
+ sta_mask |= BIT(sta_id);
+ }
+
+ if (!sta_mask)
+ return -EINVAL;
+
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
+ tid, size, timeout);
+
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, sta_mask, tid);
+
+ return queue;
+}
+
+static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac,
+ int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
+ int queue = -1;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating queue for sta %d on tid %d\n",
+ mvmsta->deflink.sta_id, tid);
+ queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id,
+ tid, wdg_timeout);
+ if (queue < 0)
+ return queue;
+
+ mvmtxq->txq_id = queue;
+ mvm->tvqm_info[queue].txq_tid = tid;
+ mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+
+ spin_lock_bh(&mvmsta->lock);
+ mvmsta->tid_data[tid].txq_id = queue;
+ spin_unlock_bh(&mvmsta->lock);
+
+ return 0;
+}
+
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ int queue, u8 sta_id, u8 tid)
+{
+ bool enable_queue = true;
+
+ /* Make sure this TID isn't already enabled */
+ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+ IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+ queue, tid);
+ return false;
+ }
+
+ /* Update mappings and refcounts */
+ if (mvm->queue_info[queue].tid_bitmap)
+ enable_queue = false;
+
+ mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+ mvm->queue_info[queue].ra_sta_id = sta_id;
+
+ if (enable_queue) {
+ if (tid != IWL_MAX_TID_COUNT)
+ mvm->queue_info[queue].mac80211_ac =
+ tid_to_mac80211_ac[tid];
+ else
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = tid;
+ }
+
+ if (sta) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_tid(sta, tid);
+
+ mvmtxq->txq_id = queue;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d tids=0x%x\n",
+ queue, mvm->queue_info[queue].tid_bitmap);
+
+ return enable_queue;
+}
+
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int wdg_timeout)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_ENABLE_QUEUE,
+ .window = cfg->frame_limit,
+ .sta_id = cfg->sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = cfg->fifo,
+ .aggregate = cfg->aggregate,
+ .tid = cfg->tid,
+ };
+ bool inc_ssn;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Send the enabling command if we need to */
+ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
+ return false;
+
+ inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+ NULL, wdg_timeout);
+ if (inc_ssn)
+ le16_add_cpu(&cmd.ssn, 1);
+
+ WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+ "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+ return inc_ssn;
+}
+
+static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ return;
+ }
+
+ mvm->queue_info[queue].txq_tid = tid;
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ /* queue sharing is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true,
+ iwl_mvm_txq_from_tid(sta, tid));
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd = {0};
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->deflink.sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+}
+
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ *
+ * Returns %true if all TIDs were removed and the queue could be reused.
+ */
+static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int queue,
+ unsigned long tid_bitmap,
+ unsigned long *unshare_queues,
+ unsigned long *changetid_queues)
+{
+ unsigned int tid;
+
+ lockdep_assert_held(&mvmsta->lock);
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ /* If some TFDs are still queued - don't mark TID as inactive */
+ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+
+ /* Don't mark as inactive any TID that has an active BA */
+ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+ tid_bitmap &= ~BIT(tid);
+ }
+
+ /* If all TIDs in the queue are inactive - return it can be reused */
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
+ return true;
+ }
+
+ /*
+ * If we are here, this is a shared queue and not all TIDs timed-out.
+ * Remove the ones that did.
+ */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ u16 q_tid_bitmap;
+
+ mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+ q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ *
+ * Mark this queue in the right bitmap, we'll send the command
+ * to the firmware later.
+ */
+ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
+ set_bit(queue, changetid_queues);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Removing inactive TID %d from shared Q:%d\n",
+ tid, queue);
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /*
+ * There may be different TIDs with the same mac queues, so make
+ * sure all TIDs have existing corresponding mac queues enabled
+ */
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+
+ /* If the queue is marked as shared - "unshare" it */
+ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ set_bit(queue, unshare_queues);
+ }
+
+ return false;
+}
+
+/*
+ * Check for inactivity - this includes checking if any queue
+ * can be unshared and finding one (and only one) that can be
+ * reused.
+ * This function is also invoked as a sort of clean-up task,
+ * in which case @alloc_for_sta is IWL_INVALID_STA.
+ *
+ * Returns the queue number, or -ENOSPC.
+ */
+static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
+{
+ unsigned long now = jiffies;
+ unsigned long unshare_queues = 0;
+ unsigned long changetid_queues = 0;
+ int i, ret, free_queue = -ENOSPC;
+ struct ieee80211_sta *queue_owner = NULL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -ENOSPC;
+
+ rcu_read_lock();
+
+ /* we skip the CMD queue below by starting at 1 */
+ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
+
+ for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid;
+ unsigned long inactive_tid_bitmap = 0;
+ unsigned long queue_tid_bitmap;
+
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+ if (!queue_tid_bitmap)
+ continue;
+
+ /* If TXQ isn't in active use anyway - nothing to do here... */
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
+ continue;
+
+ /* Check to see if there are inactive TIDs on this queue */
+ for_each_set_bit(tid, &queue_tid_bitmap,
+ IWL_MAX_TID_COUNT + 1) {
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ continue;
+
+ inactive_tid_bitmap |= BIT(tid);
+ }
+
+ /* If all TIDs are active - finish check on this queue */
+ if (!inactive_tid_bitmap)
+ continue;
+
+ /*
+ * If we are here - the queue hadn't been served recently and is
+ * in use
+ */
+
+ sta_id = mvm->queue_info[i].ra_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * If the STA doesn't exist anymore, it isn't an error. It could
+ * be that it was removed since getting the queues, and in this
+ * case it should've inactivated its queues anyway.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+ inactive_tid_bitmap,
+ &unshare_queues,
+ &changetid_queues);
+ if (ret && free_queue < 0) {
+ queue_owner = sta;
+ free_queue = i;
+ }
+ /* only unlock sta lock - we still need the queue info lock */
+ spin_unlock_bh(&mvmsta->lock);
+ }
+
+
+ /* Reconfigure queues requiring reconfiguation */
+ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_unshare_queue(mvm, i);
+ for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
+ iwl_mvm_change_queue_tid(mvm, i);
+
+ rcu_read_unlock();
+
+ if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) {
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
+ alloc_for_sta);
+ if (ret)
+ return ret;
+ }
+
+ return free_queue;
+}
+
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac, int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
+ .sta_id = mvmsta->deflink.sta_id,
+ .tid = tid,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif);
+ int queue = -1;
+ u16 queue_tmp;
+ unsigned long disable_agg_tids = 0;
+ enum iwl_mvm_agg_state queue_state;
+ bool shared_queue = false, inc_ssn;
+ int ssn;
+ unsigned long tfd_queue_mask;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
+ spin_lock_bh(&mvmsta->lock);
+ tfd_queue_mask = mvmsta->tfd_queue_msk;
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (tid == IWL_MAX_TID_COUNT) {
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE);
+ if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+ IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+ queue);
+
+ /* If no such queue is found, we'll use a DATA queue instead */
+ }
+
+ if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
+ (mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_RESERVED)) {
+ queue = mvmsta->reserved_queue;
+ mvm->queue_info[queue].reserved = true;
+ IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+ }
+
+ if (queue < 0)
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ /* try harder - perhaps kill an inactive queue */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
+ }
+
+ /* No free queue - we'll have to share */
+ if (queue <= 0) {
+ queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
+ if (queue > 0) {
+ shared_queue = true;
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
+ }
+ }
+
+ /*
+ * Mark TXQ as ready, even though it hasn't been fully configured yet,
+ * to make sure no one else takes it.
+ * This will allow avoiding re-acquiring the lock at the end of the
+ * configuration. On error we'll mark it back as free.
+ */
+ if (queue > 0 && !shared_queue)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+ /* This shouldn't happen - out of queues */
+ if (WARN_ON(queue <= 0)) {
+ IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
+ tid, cfg.sta_id);
+ return queue;
+ }
+
+ /*
+ * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+ * but for configuring the SCD to send A-MPDUs we need to mark the queue
+ * as aggregatable.
+ * Mark all DATA queues as allowing to be aggregated at some point
+ */
+ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating %squeue #%d to sta %d on tid %d\n",
+ shared_queue ? "shared " : "", queue,
+ mvmsta->deflink.sta_id, tid);
+
+ if (shared_queue) {
+ /* Disable any open aggs on this queue */
+ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
+
+ if (disable_agg_tids) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
+ queue);
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+ }
+ }
+
+ inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
+
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+ spin_lock_bh(&mvmsta->lock);
+ /*
+ * This looks racy, but it is not. We have only one packet for
+ * this ra/tid in our Tx path since we stop the Qdisc when we
+ * need to allocate a new TFD queue.
+ */
+ if (inc_ssn) {
+ mvmsta->tid_data[tid].seq_number += 0x10;
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ }
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+ queue_state = mvmsta->tid_data[tid].state;
+
+ if (mvmsta->reserved_queue == queue)
+ mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (!shared_queue) {
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+ if (ret)
+ goto out_err;
+
+ /* If we need to re-enable aggregations... */
+ if (queue_state == IWL_AGG_ON) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ goto out_err;
+ }
+ } else {
+ /* Redirect queue, if needed */
+ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
+ wdg_timeout, false,
+ iwl_mvm_txq_from_tid(sta, tid));
+ if (ret)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ queue_tmp = queue;
+ iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid);
+
+ return ret;
+}
+
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+ add_stream_wk);
+
+ guard(mvm)(mvm);
+
+ /* will reschedule to run after restart */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ||
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
+
+ while (!list_empty(&mvm->add_stream_txqs)) {
+ struct iwl_mvm_txq *mvmtxq;
+ struct ieee80211_txq *txq;
+ u8 tid;
+
+ mvmtxq = list_first_entry(&mvm->add_stream_txqs,
+ struct iwl_mvm_txq, list);
+
+ txq = container_of((void *)mvmtxq, struct ieee80211_txq,
+ drv_priv);
+ tid = txq->tid;
+ if (tid == IEEE80211_NUM_TIDS)
+ tid = IWL_MAX_TID_COUNT;
+
+ /*
+ * We can't really do much here, but if this fails we can't
+ * transmit anyway - so just don't transmit the frame etc.
+ * and let them back up ... we've tried our best to allocate
+ * a queue in the function itself.
+ */
+ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
+ spin_lock_bh(&mvm->add_stream_lock);
+ list_del_init(&mvmtxq->list);
+ spin_unlock_bh(&mvm->add_stream_lock);
+ continue;
+ }
+
+ /* now we're ready, any remaining races/concurrency will be
+ * handled in iwl_mvm_mac_itxq_xmit()
+ */
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+
+ iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ local_bh_enable();
+ }
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum nl80211_iftype vif_type)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int queue;
+
+ /* queue reserving is disabled on new TX path */
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return 0;
+
+ /* run the general cleanup/unsharing of queues */
+ iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA);
+
+ /* Make sure we have free resources for this STA */
+ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
+ (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
+ IWL_MVM_QUEUE_FREE))
+ queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+ else
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ /* try again - this time kick out a queue if needed */
+ queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id);
+ if (queue < 0) {
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ }
+ }
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+
+ mvmsta->reserved_queue = queue;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+ queue, mvmsta->deflink.sta_id);
+
+ return 0;
+}
+
+/*
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
+ * order to avoid race conditions when there are shared queues. This function
+ * does the re-mapping and queue allocation.
+ *
+ * Note that re-enabling aggregations isn't done in this function.
+ */
+void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ unsigned int wdg =
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif);
+ int i;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvm_sta->deflink.sta_id,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ /* Make sure reserved queue is still marked as such (if allocated) */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+ mvm->queue_info[mvm_sta->reserved_queue].status =
+ IWL_MVM_QUEUE_RESERVED;
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+ int txq_id = tid_data->txq_id;
+ int ac;
+
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ ac = tid_to_mac80211_ac[i];
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d\n",
+ mvm_sta->deflink.sta_id, i);
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta,
+ mvm_sta->deflink.sta_id,
+ i, wdg);
+ /*
+ * on failures, just set it to IWL_MVM_INVALID_QUEUE
+ * to try again later, we have no other good way of
+ * failing here
+ */
+ if (txq_id < 0)
+ txq_id = IWL_MVM_INVALID_QUEUE;
+ tid_data->txq_id = txq_id;
+
+ /*
+ * Since we don't set the seq number after reset, and HW
+ * sets it now, FW reset will cause the seq num to start
+ * at 0 again, so driver will need to update it
+ * internally as well, so it keeps in sync with real val
+ */
+ tid_data->seq_number = 0;
+ } else {
+ u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ cfg.tid = i;
+ cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+ cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ txq_id ==
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d to queue %d\n",
+ mvm_sta->deflink.sta_id, i,
+ txq_id);
+
+ iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ }
+ }
+}
+
+static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr,
+ u16 mac_id, u16 color)
+{
+ struct iwl_mvm_add_sta_cmd cmd;
+ int ret;
+ u32 status = ADD_STA_SUCCESS;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.sta_id = sta->sta_id;
+
+ if (iwl_mvm_has_new_station_api(mvm->fw) &&
+ sta->type == IWL_STA_AUX_ACTIVITY)
+ cmd.mac_id_n_color = cpu_to_le32(mac_id);
+ else
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+ color));
+
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ cmd.station_type = sta->type;
+
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(0xffff);
+
+ if (addr)
+ memcpy(cmd.addr, addr, ETH_ALEN);
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+ return 0;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
+ status);
+ break;
+ }
+ return ret;
+}
+
+/* Initialize driver data of a new sta */
+int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, int sta_id, u8 sta_type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_rxq_dup_data *dup_data;
+ int i, ret = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color);
+ mvm_sta->vif = vif;
+
+ /* for MLD sta_id(s) should be allocated for each link before calling
+ * this function
+ */
+ if (!mvm->mld_api_is_used) {
+ if (WARN_ON(sta_id == IWL_INVALID_STA))
+ return -EINVAL;
+
+ mvm_sta->deflink.sta_id = sta_id;
+ rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
+
+ if (!mvm->trans->mac_cfg->gen2)
+ mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ else
+ mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
+ }
+
+ mvm_sta->tt_tx_protection = false;
+ mvm_sta->sta_type = sta_type;
+
+ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ /*
+ * Mark all queues for this STA as unallocated and defer TX
+ * frames until the queue is allocated
+ */
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ INIT_LIST_HEAD(&mvmtxq->list);
+ atomic_set(&mvmtxq->tx_request, 0);
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ int q;
+
+ dup_data = kcalloc(mvm->trans->info.num_rxqs,
+ sizeof(*dup_data), GFP_KERNEL);
+ if (!dup_data)
+ return -ENOMEM;
+ /*
+ * Initialize all the last_seq values to 0xffff which can never
+ * compare equal to the frame's seq_ctrl in the check in
+ * iwl_mvm_is_dup() since the lower 4 bits are the fragment
+ * number and fragmented packets don't reach that function.
+ *
+ * This thus allows receiving a packet with seqno 0 and the
+ * retry bit set as the very first packet on a new TID.
+ */
+ for (q = 0; q < mvm->trans->info.num_rxqs; q++)
+ memset(dup_data[q].last_seq, 0xff,
+ sizeof(dup_data[q].last_seq));
+ mvm_sta->dup_data = dup_data;
+ }
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+ ieee80211_vif_type_p2p(vif));
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * if rs is registered with mac80211, then "add station" will be handled
+ * via the corresponding ops, otherwise need to notify rate scaling here
+ */
+ if (iwl_mvm_has_tlc_offload(mvm))
+ iwl_mvm_rs_add_sta(mvm, mvm_sta);
+ else
+ spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock);
+
+ iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+
+ /* MPDUs are counted only when EMLSR is possible */
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !sta->tdls && ieee80211_vif_is_mld(vif)) {
+ mvm_sta->mpdu_counters =
+ kcalloc(mvm->trans->info.num_rxqs,
+ sizeof(*mvm_sta->mpdu_counters),
+ GFP_KERNEL);
+ if (mvm_sta->mpdu_counters)
+ for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
+ spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
+ }
+
+ return 0;
+}
+
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ int ret, sta_id;
+ bool sta_update = false;
+ unsigned int sta_flags = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
+ else
+ sta_id = mvm_sta->deflink.sta_id;
+
+ if (sta_id == IWL_INVALID_STA)
+ return -ENOSPC;
+
+ spin_lock_init(&mvm_sta->lock);
+
+ /* if this is a HW restart re-alloc existing queues */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_int_sta tmp_sta = {
+ .sta_id = sta_id,
+ .type = mvm_sta->sta_type,
+ };
+
+ /* First add an empty station since allocating
+ * a queue requires a valid station
+ */
+ ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
+ mvmvif->id, mvmvif->color);
+ if (ret)
+ goto err;
+
+ iwl_mvm_realloc_queues_after_restart(mvm, sta);
+ sta_update = true;
+ sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
+ goto update_fw;
+ }
+
+ ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id,
+ sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK);
+ if (ret)
+ goto err;
+
+update_fw:
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
+ if (ret)
+ goto err;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (!sta->tdls) {
+ WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA);
+ mvmvif->deflink.ap_sta_id = sta_id;
+ } else {
+ WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA);
+ }
+ }
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+ return 0;
+
+err:
+ return ret;
+}
+
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->deflink.sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
+ mvmsta->deflink.sta_id);
+ break;
+ default:
+ ret = -EIO;
+#if defined(__linux__)
+ IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
+ mvmsta->deflink.sta_id);
+#elif defined(__FreeBSD__)
+ IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n",
+ mvmsta->deflink.sta_id, status);
+#endif
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+ .sta_id = sta_id,
+ };
+ int ret;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* Note: internal stations are marked as error values */
+ if (!sta) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
+ sizeof(rm_sta_cmd), &rm_sta_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id,
+ &mvm_sta->tid_data[i].txq_id, i);
+ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ spin_lock_bh(&mvm->add_stream_lock);
+ mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ list_del_init(&mvmtxq->list);
+ clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ spin_unlock_bh(&mvm->add_stream_lock);
+ }
+}
+
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ u16 txq_id;
+ int ret;
+
+ spin_lock_bh(&mvm_sta->lock);
+ txq_id = mvm_sta->tid_data[i].txq_id;
+ spin_unlock_bh(&mvm_sta->lock);
+
+ if (txq_id == IWL_MVM_INVALID_QUEUE)
+ continue;
+
+ ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Execute the common part for both MLD and non-MLD modes.
+ * Returns if we're done with removing the station, either
+ * with error or success
+ */
+void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link =
+ mvmvif->link[link_sta->link_id];
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ u8 sta_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_sta->link_id],
+ lockdep_is_held(&mvm->mutex));
+ sta_id = mvm_link_sta->sta_id;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvm_link->ap_sta_id == sta_id) {
+ /* first remove remaining keys */
+ iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link,
+ link_sta->link_id);
+
+ mvm_link->ap_sta_id = IWL_INVALID_STA;
+ }
+
+ /*
+ * This shouldn't happen - the TDLS channel switch should be canceled
+ * before the STA is removed.
+ */
+ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
+ mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
+ cancel_delayed_work(&mvm->tdls_cs.dwork);
+ }
+}
+
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+ if (ret)
+ return ret;
+
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
+ mvm_sta->tfd_queue_msk);
+ if (ret)
+ return ret;
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+ } else {
+ u32 q_mask = mvm_sta->tfd_queue_msk;
+
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ q_mask);
+ }
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+
+ iwl_mvm_disable_sta_queues(mvm, vif, sta);
+
+ /* If there is a TXQ still marked as reserved - free it */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->deflink.sta_id, reserved_txq, *status))
+ return -EINVAL;
+
+ *status = IWL_MVM_QUEUE_FREE;
+ }
+
+ iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id);
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL);
+
+ return ret;
+}
+
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id)
+{
+ int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
+ return ret;
+}
+
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ u32 qmask, enum nl80211_iftype iftype,
+ u8 type)
+{
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+ sta->sta_id == IWL_INVALID_STA) {
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
+ if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
+ return -ENOSPC;
+ }
+
+ sta->tfd_queue_msk = qmask;
+ sta->type = type;
+
+ /* put a non-NULL value so iterating over the stations won't stop */
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+ return 0;
+}
+
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
+{
+ RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+ memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+ sta->sta_id = IWL_INVALID_STA;
+}
+
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
+ u8 sta_id, u8 fifo)
+{
+ unsigned int wdg_timeout =
+ mvm->trans->mac_cfg->base->wd_timeout;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = fifo,
+ .sta_id = sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+}
+
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
+{
+ unsigned int wdg_timeout =
+ mvm->trans->mac_cfg->base->wd_timeout;
+
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT,
+ wdg_timeout);
+}
+
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
+ int maccolor, u8 *addr,
+ struct iwl_mvm_int_sta *sta,
+ u16 *queue, int fifo)
+{
+ int ret;
+
+ /* Map queue to fifo - needs to happen before adding station */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
+
+ ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
+ if (ret) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,
+ IWL_MAX_TID_COUNT);
+ return ret;
+ }
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int txq;
+
+ txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
+ if (txq < 0) {
+ iwl_mvm_rm_sta_common(mvm, sta->sta_id);
+ return txq;
+ }
+
+ *queue = txq;
+ }
+
+ return 0;
+}
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
+{
+ int ret;
+ u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 :
+ BIT(mvm->aux_queue);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Allocate aux station and assign to it the aux queue */
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask,
+ NL80211_IFTYPE_UNSPECIFIED,
+ IWL_STA_AUX_ACTIVITY);
+ if (ret)
+ return ret;
+
+ /*
+ * In CDB NICs we need to specify which lmac to use for aux activity
+ * using the mac_id argument place to send lmac_id to the function
+ */
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
+ &mvm->aux_sta, &mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST);
+ if (ret) {
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+ NULL, &mvm->snif_sta,
+ &mvm->snif_queue,
+ IWL_MVM_TX_FIFO_BE);
+}
+
+int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA))
+ return -EINVAL;
+
+ iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
+ &mvm->snif_queue, IWL_MAX_TID_COUNT);
+ ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ return ret;
+}
+
+int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA))
+ return -EINVAL;
+
+ iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
+ &mvm->aux_queue, IWL_MAX_TID_COUNT);
+ ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+
+ return ret;
+}
+
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
+{
+ iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
+}
+
+/*
+ * Send the add station command for the vif's broadcast station.
+ * Assumes that the station was already allocated.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add.
+ */
+int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
+ static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ const u8 *baddr = _baddr;
+ int queue;
+ int ret;
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_VO,
+ .sta_id = mvmvif->deflink.bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ queue = mvm->probe_queue;
+ } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ queue = mvm->p2p_dev_queue;
+ } else {
+ WARN(1, "Missing required TXQ for adding bcast STA\n");
+ return -EINVAL;
+ }
+
+ bsta->tfd_queue_msk |= BIT(queue);
+
+ iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+ }
+
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ baddr = vif->bss_conf.bssid;
+
+ if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA))
+ return -ENOSPC;
+
+ ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+ mvmvif->id, mvmvif->color);
+ if (ret)
+ return ret;
+
+ /*
+ * For 22000 firmware and on we cannot add queue to a station unknown
+ * to firmware so enable queue here - after the station was added
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id,
+ IWL_MAX_TID_COUNT,
+ wdg_timeout);
+ if (queue < 0) {
+ iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+ return queue;
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /* for queue management */
+ mvm->probe_queue = queue;
+ /* for use in TX */
+ mvmvif->deflink.mgmt_queue = queue;
+ } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mvm->p2p_dev_queue = queue;
+ }
+ } else if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /* set it for use in TX */
+ mvmvif->deflink.mgmt_queue = mvm->probe_queue;
+ }
+
+ return 0;
+}
+
+void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 *queueptr, queue;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
+ mvmvif->deflink.bcast_sta.tfd_queue_msk);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ queueptr = &mvm->probe_queue;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ queueptr = &mvm->p2p_dev_queue;
+ break;
+ default:
+ WARN(1, "Can't free bcast queue on vif type %d\n",
+ vif->type);
+ return;
+ }
+
+ queue = *queueptr;
+ iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id,
+ queueptr, IWL_MAX_TID_COUNT);
+
+ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)
+ mvmvif->deflink.mgmt_queue = mvm->probe_queue;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue)));
+ mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);
+}
+
+/* Send the FW a request to remove the station from its internal data
+ * structures, but DO NOT remove the entry from the local data structures. */
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_free_bcast_sta_queues(mvm, vif);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+ return ret;
+}
+
+int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0,
+ ieee80211_vif_type_p2p(vif),
+ IWL_STA_GENERAL_PURPOSE);
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add. */
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, bsta);
+
+ return ret;
+}
+
+void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta);
+}
+
+/*
+ * Send the FW a request to remove the station from its internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
+
+ iwl_mvm_dealloc_bcast_sta(mvm, vif);
+
+ return ret;
+}
+
+/*
+ * Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the multicast station is added
+ */
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta;
+ static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+ const u8 *maddr = _maddr;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = vif->type == NL80211_IFTYPE_AP ?
+ IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
+ .sta_id = msta->sta_id,
+ .tid = 0,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -EOPNOTSUPP;
+
+ /*
+ * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+ * invalid, so make sure we use the queue we want.
+ * Note that this is done here as we want to avoid making DQA
+ * changes in mac80211 layer.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+
+ /*
+ * While in previous FWs we had to exclude cab queue from TFD queue
+ * mask, now it is needed as any other queue.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) &&
+ fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
+ &cfg,
+ timeout);
+ msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue);
+ }
+ ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
+ mvmvif->id, mvmvif->color);
+ if (ret)
+ goto err;
+
+ /*
+ * Enable cab queue after the ADD_STA command is sent.
+ * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
+ * command with unknown station id, and for FW that doesn't support
+ * station API since the cab queue is not included in the
+ * tfd_queue_mask.
+ */
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id,
+ 0, timeout);
+ if (queue < 0) {
+ ret = queue;
+ goto err;
+ }
+ mvmvif->deflink.cab_queue = queue;
+ } else if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_STA_TYPE))
+ iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0,
+ &cfg,
+ timeout);
+
+ return 0;
+err:
+ iwl_mvm_dealloc_int_sta(mvm, msta);
+ return ret;
+}
+
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
+ struct ieee80211_key_conf *keyconf,
+ bool mcast)
+{
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+ __le16 key_flags;
+ int ret, size;
+ u32 status;
+
+ /* This is a valid situation for GTK removal */
+ if (sta_id == IWL_INVALID_STA)
+ return 0;
+
+ key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+ key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+ /*
+ * The fields assigned here are in the same location at the start
+ * of the command, so we can do this union trick.
+ */
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.key_offset = keyconf->hw_key_idx;
+ u.cmd.common.sta_id = sta_id;
+
+ size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+ &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Send the FW a request to remove the station from its internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
+ mvmvif->deflink.mcast_sta.tfd_queue_msk);
+
+ iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
+ &mvmvif->deflink.cab_queue, 0);
+
+ ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id);
+ if (ret)
+ IWL_WARN(mvm, "Failed sending remove station\n");
+
+ return ret;
+}
+
+static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
+{
+ struct iwl_mvm_delba_data notif = {
+ .baid = baid,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
+ &notif, sizeof(notif));
+};
+
+static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
+ struct iwl_mvm_baid_data *data)
+{
+ int i;
+
+ iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
+
+ for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
+ int j;
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ if (likely(!reorder_buf->num_stored)) {
+ spin_unlock_bh(&reorder_buf->lock);
+ continue;
+ }
+
+ /*
+ * This shouldn't happen in regular DELBA since the internal
+ * delBA notification should trigger a release of all frames in
+ * the reorder buffer.
+ */
+ WARN_ON(1);
+
+ for (j = 0; j < data->buf_size; j++)
+ __skb_queue_purge(&entries[j].frames);
+
+ spin_unlock_bh(&reorder_buf->lock);
+ }
+}
+
+static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
+ struct iwl_mvm_baid_data *data,
+ u16 ssn)
+{
+ int i;
+
+ for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+ int j;
+
+ reorder_buf->num_stored = 0;
+ reorder_buf->head_sn = ssn;
+ spin_lock_init(&reorder_buf->lock);
+ reorder_buf->queue = i;
+ reorder_buf->valid = false;
+ for (j = 0; j < data->buf_size; j++)
+ __skb_queue_head_init(&entries[j].frames);
+ }
+}
+
+static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+ .sta_id = mvm_sta->deflink.sta_id,
+ .add_modify = STA_MODE_MODIFY,
+ };
+ u32 status;
+ int ret;
+
+ if (start) {
+ cmd.add_immediate_ba_tid = tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.rx_ba_window = cpu_to_le16(buf_size);
+ cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
+ } else {
+ cmd.remove_immediate_ba_tid = tid;
+ cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
+ }
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+ if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
+ !(status & IWL_ADD_STA_BAID_VALID_MASK)))
+ return -EINVAL;
+ return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
+ case ADD_STA_IMMEDIATE_BA_FAILURE:
+ IWL_WARN(mvm, "RX BA Session refused by fw\n");
+ return -ENOSPC;
+ default:
+ IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ return -EIO;
+ }
+}
+
+static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool start, int tid, u16 ssn,
+ u16 buf_size, int baid)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
+ cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_SEND_IN_RFKILL,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ if (start) {
+ cmd.alloc.sta_id_mask =
+ cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
+ cmd.alloc.tid = tid;
+ cmd.alloc.ssn = cpu_to_le16(ssn);
+ cmd.alloc.win_size = cpu_to_le16(buf_size);
+ baid = -EIO;
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
+ cmd.remove_v1.baid = cpu_to_le32(baid);
+ BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
+ } else {
+ cmd.remove.sta_id_mask =
+ cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1));
+ cmd.remove.tid = cpu_to_le32(tid);
+ }
+
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
+ if (ret)
+ return ret;
+
+ if (!start) {
+ /* ignore firmware baid on remove */
+ baid = 0;
+ }
+
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
+
+ if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
+ return -EINVAL;
+
+ return baid;
+}
+
+static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool start, int tid, u16 ssn, u16 buf_size,
+ int baid)
+{
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
+ return iwl_mvm_fw_baid_op_cmd(mvm, sta, start,
+ tid, ssn, buf_size, baid);
+
+ return iwl_mvm_fw_baid_op_sta(mvm, sta, start,
+ tid, ssn, buf_size);
+}
+
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_baid_data *baid_data = NULL;
+ int ret, baid;
+ u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
+ IWL_MAX_BAID_OLD;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
+ IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+ return -ENOSPC;
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm) && start) {
+ u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+
+ /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+ /*
+ * The division below will be OK if either the cache line size
+ * can be divided by the entry size (ALIGN will round up) or if
+ * the entry size can be divided by the cache line size, in
+ * which case the ALIGN() will do nothing.
+ */
+ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+ sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+ /*
+ * Upward align the reorder buffer size to fill an entire cache
+ * line for each queue, to avoid sharing cache lines between
+ * different queues.
+ */
+ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
+ /*
+ * Allocate here so if allocation fails we can bail out early
+ * before starting the BA session in the firmware
+ */
+ baid_data = kzalloc(sizeof(*baid_data) +
+ mvm->trans->info.num_rxqs *
+ reorder_buf_size,
+ GFP_KERNEL);
+ if (!baid_data)
+ return -ENOMEM;
+
+ /*
+ * This division is why we need the above BUILD_BUG_ON(),
+ * if that doesn't hold then this will not be right.
+ */
+ baid_data->entries_per_queue =
+ reorder_buf_size / sizeof(baid_data->entries[0]);
+ }
+
+ if (iwl_mvm_has_new_rx_api(mvm) && !start) {
+ baid = mvm_sta->tid_to_baid[tid];
+ } else {
+ /* we don't really need it in this case */
+ baid = -1;
+ }
+
+ /* Don't send command to remove (start=0) BAID during restart */
+ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size,
+ baid);
+
+ if (baid < 0) {
+ ret = baid;
+ goto out_free;
+ }
+
+ if (start) {
+ mvm->rx_ba_sessions++;
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ baid_data->baid = baid;
+ baid_data->timeout = timeout;
+ baid_data->last_rx = jiffies;
+ baid_data->rcu_ptr = &mvm->baid_map[baid];
+ timer_setup(&baid_data->session_timer,
+ iwl_mvm_rx_agg_session_expired, 0);
+ baid_data->mvm = mvm;
+ baid_data->tid = tid;
+ baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
+ baid_data->buf_size = buf_size;
+
+ mvm_sta->tid_to_baid[tid] = baid;
+ if (timeout)
+ mod_timer(&baid_data->session_timer,
+ TU_TO_EXP_TIME(timeout * 2));
+
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
+ /*
+ * protect the BA data with RCU to cover a case where our
+ * internal RX sync mechanism will timeout (not that it's
+ * supposed to happen) and we will free the session data while
+ * RX is being processed in parallel
+ */
+ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
+ mvm_sta->deflink.sta_id, tid, baid);
+ WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
+ rcu_assign_pointer(mvm->baid_map[baid], baid_data);
+ } else {
+ baid = mvm_sta->tid_to_baid[tid];
+
+ if (mvm->rx_ba_sessions > 0)
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return -EINVAL;
+
+ baid_data = rcu_access_pointer(mvm->baid_map[baid]);
+ if (WARN_ON(!baid_data))
+ return -EINVAL;
+
+ /* synchronize all rx queues so we can safely delete */
+ iwl_mvm_free_reorder(mvm, baid_data);
+ timer_shutdown_sync(&baid_data->session_timer);
+ RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
+ kfree_rcu(baid_data, rcu_head);
+ IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
+ }
+ return 0;
+
+out_free:
+ kfree(baid_data);
+ return ret;
+}
+
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (start) {
+ mvm_sta->tfd_queue_msk |= BIT(queue);
+ mvm_sta->tid_disable_agg &= ~BIT(tid);
+ } else {
+ /* In DQA-mode the queue isn't removed on agg termination */
+ mvm_sta->tid_disable_agg |= BIT(tid);
+ }
+
+ cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+ cmd.sta_id = mvm_sta->deflink.sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+ if (ret)
+ return ret;
+
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
+ case ADD_STA_SUCCESS:
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
+ start ? "start" : "stopp", status);
+ break;
+ }
+
+ return ret;
+}
+
+const u8 tid_to_mac80211_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
+};
+
+static const u8 tid_to_ucode_ac[] = {
+ AC_BE,
+ AC_BK,
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VI,
+ AC_VO,
+ AC_VO,
+};
+
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data;
+ u16 normalized_ssn;
+ u16 txq_id;
+ int ret;
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
+ mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+ IWL_ERR(mvm,
+ "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
+ mvmsta->tid_data[tid].state);
+ return -ENXIO;
+ }
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
+ iwl_mvm_has_new_tx_api(mvm)) {
+ u8 ac = tid_to_mac80211_ac[tid];
+
+ ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ /*
+ * Note the possible cases:
+ * 1. An enabled TXQ - TXQ needs to become agg'ed
+ * 2. The TXQ hasn't yet been enabled, so find a free one and mark
+ * it as reserved
+ */
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
+ ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (ret < 0) {
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ goto out;
+ }
+
+ txq_id = ret;
+
+ /* TXQ hasn't yet been enabled, so mark it only as reserved */
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+ } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
+ ret = -ENXIO;
+ IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
+ tid, IWL_MAX_HW_QUEUES - 1);
+ goto out;
+
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED)) {
+ ret = -ENXIO;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can't start tid %d agg on shared queue!\n",
+ tid);
+ goto out;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "AGG for tid %d will be on queue #%d\n",
+ tid, txq_id);
+
+ tid_data = &mvmsta->tid_data[tid];
+ tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+ tid_data->txq_id = txq_id;
+ *ssn = tid_data->ssn;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+ mvmsta->deflink.sta_id, tid, txq_id,
+ tid_data->ssn,
+ tid_data->next_reclaimed);
+
+ /*
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ normalized_ssn = tid_data->ssn;
+ if (mvm->trans->mac_cfg->gen2)
+ normalized_ssn &= 0xff;
+
+ if (normalized_ssn == tid_data->next_reclaimed) {
+ tid_data->state = IWL_AGG_STARTING;
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ } else {
+ tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
+ }
+
+out:
+ spin_unlock_bh(&mvmsta->lock);
+
+ return ret;
+}
+
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
+ bool amsdu)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif);
+ int queue, ret;
+ bool alloc_queue = true;
+ enum iwl_mvm_queue_status queue_status;
+ u16 ssn;
+
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvmsta->deflink.sta_id,
+ .tid = tid,
+ .frame_limit = buf_size,
+ .aggregate = true,
+ };
+
+ /*
+ * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+ * manager, so this function should never be called in this case.
+ */
+ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
+ return -EINVAL;
+
+ BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
+ != IWL_MAX_TID_COUNT);
+
+ spin_lock_bh(&mvmsta->lock);
+ ssn = tid_data->ssn;
+ queue = tid_data->txq_id;
+ tid_data->state = IWL_AGG_ON;
+ mvmsta->agg_tids |= BIT(tid);
+ tid_data->ssn = 0xffff;
+ tid_data->amsdu_in_ampdu_allowed = amsdu;
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ /*
+ * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
+ * would have failed, so if we are here there is no need to
+ * allocate a queue.
+ * However, if aggregation size is different than the default
+ * size, the scheduler should be reconfigured.
+ * We cannot do this with the new TX API, so return unsupported
+ * for now, until it will be offloaded to firmware..
+ * Note that if SCD default value changes - this condition
+ * should be updated as well.
+ */
+ if (buf_size < IWL_FRAME_LIMIT)
+ return -EOPNOTSUPP;
+
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ goto out;
+ }
+
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ queue_status = mvm->queue_info[queue].status;
+
+ /* Maybe there is no need to even alloc a queue... */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
+ alloc_queue = false;
+
+ /*
+ * Only reconfig the SCD for the queue if the window size has
+ * changed from current (become smaller)
+ */
+ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
+ /*
+ * If reconfiguring an existing queue, it first must be
+ * drained
+ */
+ ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+ BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error draining queue before reconfig\n");
+ return ret;
+ }
+
+ ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
+ mvmsta->deflink.sta_id, tid,
+ buf_size, ssn);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error reconfiguring TXQ #%d\n", queue);
+ return ret;
+ }
+ }
+
+ if (alloc_queue)
+ iwl_mvm_enable_txq(mvm, sta, queue, ssn,
+ &cfg, wdg_timeout);
+
+ /* Send ADD_STA command to enable aggs only if the queue isn't shared */
+ if (queue_status != IWL_MVM_QUEUE_SHARED) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ }
+
+ /* No need to mark as reserved */
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+out:
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
+ min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize,
+ buf_size);
+ mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit =
+ mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize;
+
+ IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq);
+}
+
+static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct iwl_mvm_tid_data *tid_data)
+{
+ u16 txq_id = tid_data->txq_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+ tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
+ }
+}
+
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+ int err;
+
+ /*
+ * If mac80211 is cleaning its state, then say that we finished since
+ * our state has been cleared anyway.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ return 0;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ txq_id = tid_data->txq_id;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->deflink.sta_id, tid, txq_id,
+ tid_data->state);
+
+ mvmsta->agg_tids &= ~BIT(tid);
+
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+
+ switch (tid_data->state) {
+ case IWL_AGG_ON:
+ tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "ssn = %d, next_recl = %d\n",
+ tid_data->ssn, tid_data->next_reclaimed);
+
+ tid_data->ssn = 0xffff;
+ tid_data->state = IWL_AGG_OFF;
+ spin_unlock_bh(&mvmsta->lock);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+ return 0;
+ case IWL_AGG_STARTING:
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * The agg session has been stopped before it was set up. This
+ * can happen when the AddBA timer times out for example.
+ */
+
+ /* No barriers since we are under mutex */
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ tid_data->state = IWL_AGG_OFF;
+ err = 0;
+ break;
+ default:
+ IWL_ERR(mvm,
+ "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+ mvmsta->deflink.sta_id, tid, tid_data->state);
+ IWL_ERR(mvm,
+ "\ttid_data->txq_id = %d\n", tid_data->txq_id);
+ err = -EINVAL;
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ return err;
+}
+
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+ enum iwl_mvm_agg_state old_state;
+
+ /*
+ * First set the agg state to OFF to avoid calling
+ * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
+ */
+ spin_lock_bh(&mvmsta->lock);
+ txq_id = tid_data->txq_id;
+ IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->deflink.sta_id, tid, txq_id,
+ tid_data->state);
+ old_state = tid_data->state;
+ tid_data->state = IWL_AGG_OFF;
+ mvmsta->agg_tids &= ~BIT(tid);
+ spin_unlock_bh(&mvmsta->lock);
+
+ iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+
+ if (old_state >= IWL_AGG_ON) {
+ iwl_mvm_drain_sta(mvm, mvmsta, true);
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id,
+ BIT(tid)))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+ } else {
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
+ }
+
+ iwl_mvm_drain_sta(mvm, mvmsta, false);
+
+ iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
+{
+ int i, max = -1, max_offs = -1;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Pick the unused key offset with the highest 'deleted'
+ * counter. Every time a key is deleted, all the counters
+ * are incremented and the one that was just deleted is
+ * reset to zero. Thus, the highest counter is the one
+ * that was deleted longest ago. Pick that one.
+ */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (test_bit(i, mvm->fw_key_table))
+ continue;
+ if (mvm->fw_key_deleted[i] > max) {
+ max = mvm->fw_key_deleted[i];
+ max_offs = i;
+ }
+ }
+
+ if (max_offs < 0)
+ return STA_KEY_IDX_INVALID;
+
+ return max_offs;
+}
+
+static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (sta)
+ return iwl_mvm_sta_from_mac80211(sta);
+
+ /*
+ * The device expects GTKs for station interfaces to be
+ * installed as GTKs for the AP station. If we have no
+ * station ID, then use AP's station ID.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
+ u8 sta_id = mvmvif->deflink.ap_sta_id;
+
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /*
+ * It is possible that the 'sta' parameter is NULL,
+ * for example when a GTK is removed - the sta_id will then
+ * be the AP ID, and no station was passed by mac80211.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+ }
+
+ return NULL;
+}
+
+static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (pn1[i] > pn2[i])
+ return 1;
+ if (pn1[i] < pn2[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+ u32 sta_id,
+ struct ieee80211_key_conf *key, bool mcast,
+ u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+ u8 key_offset, bool mfp)
+{
+ union {
+ struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+ struct iwl_mvm_add_sta_key_cmd cmd;
+ } u = {};
+ __le16 key_flags;
+ int ret;
+ u32 status;
+ u16 keyidx;
+ u64 pn = 0;
+ int i, size;
+ bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+ int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
+ new_api ? 2 : 1);
+
+ if (sta_id == IWL_INVALID_STA)
+ return -EINVAL;
+
+ keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
+ STA_KEY_FLG_KEYID_MSK;
+ key_flags = cpu_to_le16(keyidx);
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+ if (api_ver >= 2) {
+ memcpy((void *)&u.cmd.tx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+
+ memcpy((void *)&u.cmd.rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWL_MIC_KEY_SIZE);
+ pn = atomic64_read(&key->tx_pn);
+
+ } else {
+ u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ u.cmd_v1.tkip_rx_ttak[i] =
+ cpu_to_le16(tkip_p1k[i]);
+ }
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (api_ver >= 2)
+ pn = atomic64_read(&key->tx_pn);
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
+ memcpy(u.cmd.common.key + 3, key->key, key->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ if (api_ver >= 2)
+ pn = atomic64_read(&key->tx_pn);
+ break;
+ default:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+ memcpy(u.cmd.common.key, key->key, key->keylen);
+ }
+
+ if (mcast)
+ key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+ if (mfp)
+ key_flags |= cpu_to_le16(STA_KEY_MFP);
+
+ u.cmd.common.key_offset = key_offset;
+ u.cmd.common.key_flags = key_flags;
+ u.cmd.common.sta_id = sta_id;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ i = 0;
+ else
+ i = -1;
+
+ for (; i < IEEE80211_NUM_TIDS; i++) {
+ struct ieee80211_key_seq seq = {};
+ u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
+ int rx_pn_len = 8;
+ /* there's a hole at 2/3 in FW format depending on version */
+ int hole = api_ver >= 3 ? 0 : 2;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ rx_pn[0] = seq.tkip.iv16;
+ rx_pn[1] = seq.tkip.iv16 >> 8;
+ rx_pn[2 + hole] = seq.tkip.iv32;
+ rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
+ rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
+ rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
+ } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
+ rx_pn = seq.hw.seq;
+ rx_pn_len = seq.hw.seq_len;
+ } else {
+ rx_pn[0] = seq.ccmp.pn[0];
+ rx_pn[1] = seq.ccmp.pn[1];
+ rx_pn[2 + hole] = seq.ccmp.pn[2];
+ rx_pn[3 + hole] = seq.ccmp.pn[3];
+ rx_pn[4 + hole] = seq.ccmp.pn[4];
+ rx_pn[5 + hole] = seq.ccmp.pn[5];
+ }
+
+ if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
+ rx_pn_len) > 0)
+ memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
+ rx_pn_len);
+ }
+
+ if (api_ver >= 2) {
+ u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+ size = sizeof(u.cmd);
+ } else {
+ size = sizeof(u.cmd_v1);
+ }
+
+ status = ADD_STA_SUCCESS;
+ if (cmd_flags & CMD_ASYNC)
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
+ &u.cmd);
+ else
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
+ &u.cmd, &status);
+
+ switch (status) {
+ case ADD_STA_SUCCESS:
+ IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, bool remove_key)
+{
+ struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+
+ /* verify the key details match the required command's expectations */
+ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
+ keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
+ (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
+ return -EINVAL;
+
+ if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
+ return -EINVAL;
+
+ igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+ igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+ if (remove_key) {
+ /* This is a valid situation for IGTK */
+ if (sta_id == IWL_INVALID_STA)
+ return 0;
+
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+ } else {
+ struct ieee80211_key_seq seq;
+ const u8 *pn;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ igtk_cmd.ctrl_flags |=
+ cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ pn = seq.aes_cmac.pn;
+ igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
+ ((u64) pn[4] << 8) |
+ ((u64) pn[3] << 16) |
+ ((u64) pn[2] << 24) |
+ ((u64) pn[1] << 32) |
+ ((u64) pn[0] << 40));
+ }
+
+ IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
+ remove_key ? "removing" : "installing",
+ keyconf->keyidx >= 6 ? "B" : "",
+ keyconf->keyidx, igtk_cmd.sta_id);
+
+ if (!iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+ .ctrl_flags = igtk_cmd.ctrl_flags,
+ .key_id = igtk_cmd.key_id,
+ .sta_id = igtk_cmd.sta_id,
+ .receive_seq_cnt = igtk_cmd.receive_seq_cnt
+ };
+
+ memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
+ ARRAY_SIZE(igtk_cmd_v1.igtk));
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+ sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+ }
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+ sizeof(igtk_cmd), &igtk_cmd);
+}
+
+
+static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (sta)
+ return sta->addr;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) {
+ u8 sta_id = mvmvif->deflink.ap_sta_id;
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return NULL;
+
+ return sta->addr;
+ }
+
+
+ return NULL;
+}
+
+static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset,
+ bool mcast)
+{
+ const u8 *addr;
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
+ u32 sta_id;
+ bool mfp = false;
+
+ if (sta) {
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ sta_id = mvm_sta->deflink.sta_id;
+ mfp = sta->mfp;
+ } else if (vif->type == NL80211_IFTYPE_AP &&
+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ sta_id = mvmvif->deflink.mcast_sta.sta_id;
+ } else {
+ IWL_ERR(mvm, "Failed to find station id\n");
+ return -EINVAL;
+ }
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+ if (!addr) {
+ IWL_ERR(mvm, "Failed to find mac address\n");
+ return -EINVAL;
+ }
+
+ /* get phase 1 key from mac80211 */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+
+ return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+ seq.tkip.iv32, p1k, 0, key_offset,
+ mfp);
+ }
+
+ return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+ 0, NULL, 0, key_offset, mfp);
+}
+
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset)
+{
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ struct iwl_mvm_sta *mvm_sta;
+ u8 sta_id = IWL_INVALID_STA;
+ int ret;
+ static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (vif->type != NL80211_IFTYPE_AP ||
+ keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ /* Get the station id from the mvm local station table */
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (!mvm_sta) {
+ IWL_ERR(mvm, "Failed to find station\n");
+ return -EINVAL;
+ }
+ sta_id = mvm_sta->deflink.sta_id;
+
+ /*
+ * It is possible that the 'sta' parameter is NULL, and thus
+ * there is a need to retrieve the sta from the local station
+ * table.
+ */
+ if (!sta) {
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta)) {
+ IWL_ERR(mvm, "Invalid station id\n");
+ return -EINVAL;
+ }
+ }
+
+ if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
+ return -EINVAL;
+ } else {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ sta_id = mvmvif->deflink.mcast_sta.sta_id;
+ }
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+ ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+ goto end;
+ }
+
+ /* If the key_offset is not pre-assigned, we need to find a
+ * new offset to use. In normal cases, the offset is not
+ * pre-assigned, but during HW_RESTART we want to reuse the
+ * same indices, so we pass them when this function is called.
+ *
+ * In D3 entry, we need to hardcoded the indices (because the
+ * firmware hardcodes the PTK offset to 0). In this case, we
+ * need to make sure we don't overwrite the hw_key_idx in the
+ * keyconf structure, because otherwise we cannot configure
+ * the original ones back when resuming.
+ */
+ if (key_offset == STA_KEY_IDX_INVALID) {
+ key_offset = iwl_mvm_set_fw_key_idx(mvm);
+ if (key_offset == STA_KEY_IDX_INVALID)
+ return -ENOSPC;
+ keyconf->hw_key_idx = key_offset;
+ }
+
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
+ if (ret)
+ goto end;
+
+ /*
+ * For WEP, the same key is used for multicast and unicast. Upload it
+ * again, using the same key offset, and now pointing the other one
+ * to the same key slot (offset).
+ * If this fails, remove the original as well.
+ */
+ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ sta) {
+ ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
+ key_offset, !mcast);
+ if (ret) {
+ __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ goto end;
+ }
+ }
+
+ __set_bit(key_offset, mvm->fw_key_table);
+
+end:
+ IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta ? sta->addr : zero_addr, ret);
+ return ret;
+}
+
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf)
+{
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ struct iwl_mvm_sta *mvm_sta;
+ u8 sta_id = IWL_INVALID_STA;
+ int ret, i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Get the station from the mvm local station table */
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (mvm_sta)
+ sta_id = mvm_sta->deflink.sta_id;
+ else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
+ sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id;
+
+
+ IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+
+ if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+ IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+ keyconf->hw_key_idx);
+ return -ENOENT;
+ }
+
+ /* track which key was deleted last */
+ for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+ if (mvm->fw_key_deleted[i] < U8_MAX)
+ mvm->fw_key_deleted[i]++;
+ }
+ mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
+ if (sta && !mvm_sta) {
+ IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+ return 0;
+ }
+
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+ if (ret)
+ return ret;
+
+ /* delete WEP key twice to get rid of (now useless) offset */
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+ ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
+
+ return ret;
+}
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ bool mfp = sta ? sta->mfp : false;
+
+ rcu_read_lock();
+
+ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+ if (WARN_ON_ONCE(!mvm_sta))
+ goto unlock;
+ iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast,
+ iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
+ mfp);
+
+ unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->deflink.sta_id,
+ .station_flags_msk = cpu_to_le32(STA_FLG_PS),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt, u16 tids, bool more_data,
+ bool single_sta_queue)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->deflink.sta_id,
+ .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+ .sleep_tx_count = cpu_to_le16(cnt),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int tid, ret;
+ unsigned long _tids = tids;
+
+ /* convert TIDs to ACs - we don't support TSPEC so that's OK
+ * Note that this field is reserved and unused by firmware not
+ * supporting GO uAPSD, so it's safe to always do this.
+ */
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+ cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+ /* If we're releasing frames from aggregation or dqa queues then check
+ * if all the queues that we're releasing frames from, combined, have:
+ * - more frames than the service period, in which case more_data
+ * needs to be set
+ * - fewer than 'cnt' frames, in which case we need to adjust the
+ * firmware command (but do that unconditionally)
+ */
+ if (single_sta_queue) {
+ int remaining = cnt;
+ int sleep_tx_count;
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+ struct iwl_mvm_tid_data *tid_data;
+ u16 n_queued;
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ n_queued = iwl_mvm_tid_queued(mvm, tid_data);
+ if (n_queued > remaining) {
+ more_data = true;
+ remaining = 0;
+ break;
+ }
+ remaining -= n_queued;
+ }
+ sleep_tx_count = cnt - remaining;
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+ mvmsta->sleep_tx_count = sleep_tx_count;
+ spin_unlock_bh(&mvmsta->lock);
+
+ cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
+ if (WARN_ON(cnt - remaining == 0)) {
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+ }
+
+ /* Note: this is ignored by firmware not supporting GO uAPSD */
+ if (more_data)
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+ mvmsta->next_status_eosp = true;
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
+ } else {
+ cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
+ }
+
+ /* block the Tx queues until the FW updated the sleep Tx count */
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
+ CMD_ASYNC | CMD_BLOCK_TXQS,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
+ return;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (!IS_ERR_OR_NULL(sta))
+ ieee80211_sta_eosp(sta);
+ rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ bool disable)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = mvmsta->deflink.sta_id,
+ .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+ .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+ };
+ int ret;
+
+ if (mvm->mld_api_is_used) {
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
+ return;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool disable)
+{
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (mvm->mld_api_is_used) {
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
+ return;
+ }
+
+ spin_lock_bh(&mvm_sta->lock);
+
+ if (mvm_sta->disable_tx == disable) {
+ spin_unlock_bh(&mvm_sta->lock);
+ return;
+ }
+
+ mvm_sta->disable_tx = disable;
+
+ /*
+ * If sta PS state is handled by mac80211, tell it to start/stop
+ * queuing tx for this station.
+ */
+ if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
+ ieee80211_sta_block_awake(mvm->hw, sta, disable);
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
+
+ spin_unlock_bh(&mvm_sta->lock);
+}
+
+static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_int_sta *sta,
+ bool disable)
+{
+ u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+ struct iwl_mvm_add_sta_cmd cmd = {
+ .add_modify = STA_MODE_MODIFY,
+ .sta_id = sta->sta_id,
+ .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+ .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+ .mac_id_n_color = cpu_to_le32(id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ bool disable)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ int i;
+
+ if (mvm->mld_api_is_used) {
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif,
+ disable);
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* Block/unblock all the stations of the given mvmvif */
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvm_sta->mac_id_n_color !=
+ FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
+ continue;
+
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
+ }
+
+ rcu_read_unlock();
+
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+ return;
+
+ /* Need to block/unblock also multicast station */
+ if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->deflink.mcast_sta,
+ disable);
+
+ /*
+ * Only unblock the broadcast station (FW blocks it for immediate
+ * quiet, not the driver)
+ */
+ if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA)
+ iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+ &mvmvif->deflink.bcast_sta,
+ disable);
+}
+
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_sta *mvmsta;
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id);
+
+ if (mvmsta)
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
+
+ rcu_read_unlock();
+}
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
+{
+ u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+ /*
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ if (mvm->trans->mac_cfg->gen2)
+ sn &= 0xff;
+
+ return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
+}
+
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 id)
+{
+ struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
+ .id = cpu_to_le32(id),
+ };
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
+ CMD_ASYNC,
+ sizeof(cancel_channel_switch_cmd),
+ &cancel_channel_switch_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to cancel the channel switch\n");
+}
+
+static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
+ u8 fw_sta_id)
+{
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
+ struct iwl_mvm_vif_link_info *link;
+
+ if (WARN_ON_ONCE(!link_sta))
+ return -EINVAL;
+
+ link = mvmvif->link[link_sta->link_id];
+
+ if (WARN_ON_ONCE(!link))
+ return -EINVAL;
+
+ return link->fw_link_id;
+}
+
+#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
+
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_tpt_counter *queue_counter;
+ struct iwl_mvm_mpdu_counter *link_counter;
+ u32 total_mpdus = 0;
+ int fw_link_id;
+
+ /* Count only for a BSS sta, and only when EMLSR is possible */
+ if (!mvm_sta->mpdu_counters)
+ return;
+
+ /* Map sta id to link id */
+ fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
+ if (fw_link_id < 0)
+ return;
+
+ queue_counter = &mvm_sta->mpdu_counters[queue];
+ link_counter = &queue_counter->per_link[fw_link_id];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ if (tx)
+ link_counter->tx += count;
+ else
+ link_counter->rx += count;
+
+ /*
+ * When not in EMLSR, the window and the decision to enter EMLSR are
+ * handled during counting, when in EMLSR - in the statistics flow
+ */
+ if (mvmvif->esr_active)
+ goto out;
+
+ if (time_is_before_jiffies(queue_counter->window_start +
+ IWL_MVM_TPT_COUNT_WINDOW)) {
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+ queue_counter->window_start = jiffies;
+
+ IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
+ }
+
+ for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
+ total_mpdus += tx ? queue_counter->per_link[i].tx :
+ queue_counter->per_link[i].rx;
+
+ if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
+ wiphy_work_queue(mvmvif->mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tpt_wk);
+
+out:
+ spin_unlock_bh(&queue_counter->lock);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.h b/sys/contrib/dev/iwlwifi/mvm/sta.h
new file mode 100644
index 000000000000..f6906061510b
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/sta.h
@@ -0,0 +1,686 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2016 Intel Deutschland GmbH
+ */
+#ifndef __sta_h__
+#define __sta_h__
+
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+#include <linux/wait.h>
+
+#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
+#include "fw-api.h" /* IWL_STATION_COUNT_MAX */
+#include "rs.h"
+
+struct iwl_mvm;
+struct iwl_mvm_vif;
+
+/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ * TXQ #2 - P2P device frames
+ * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ * TXQ #4 - BSS DATA frames queue
+ * TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ * TXQ #9 - P2P GO/SoftAP probe responses
+ * TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
+/**
+ * DOC: station table - introduction
+ *
+ * The station table is a list of data structure that reprensent the stations.
+ * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
+ * In GO/AP mode, the driver will have as many stations as associated clients.
+ * All these stations are reflected in the fw's station table. The driver
+ * keeps the fw's station table up to date with the ADD_STA command. Stations
+ * can be removed by the REMOVE_STA command.
+ *
+ * All the data related to a station is held in the structure %iwl_mvm_sta
+ * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
+ * This data includes the index of the station in the fw, per tid information
+ * (sequence numbers, Block-ack state machine, etc...). The stations are
+ * created and deleted by the %sta_state callback from %ieee80211_ops.
+ *
+ * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
+ * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
+ * station index. That way, the driver is able to get the tid related data in
+ * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
+ * paths are triggered by the fw, and the driver needs to get a pointer to the
+ * %ieee80211 structure. This map helps to get that pointer quickly.
+ */
+
+/**
+ * DOC: station table - locking
+ *
+ * As stated before, the station is created / deleted by mac80211's %sta_state
+ * callback from %ieee80211_ops which can sleep. The next paragraph explains
+ * the locking of a single stations, the next ones relates to the station
+ * table.
+ *
+ * The station holds the sequence number per tid. So this data needs to be
+ * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
+ * information (the state machine / and the logic that checks if the queues
+ * were drained), so it also needs to be accessible from the Tx response flow.
+ * In short, the station needs to be access from sleepable context as well as
+ * from tasklets, so the station itself needs a spinlock.
+ *
+ * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
+ * the mvm op_mode. This is possible since %sta_state can sleep.
+ * The pointers in this map are RCU protected, hence we won't replace the
+ * station while we have Tx / Tx response / BA notification running.
+ *
+ * If a station is deleted while it still has packets in its A-MPDU queues,
+ * then the reclaim flow will notice that there is no station in the map for
+ * sta_id and it will dump the responses.
+ */
+
+/**
+ * DOC: station table - internal stations
+ *
+ * The FW needs a few internal stations that are not reflected in
+ * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
+ * scanning and P2P device (during the GO negotiation).
+ * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
+ * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
+ * Usually the data for these stations is static, so no locking is required,
+ * and no TID data as this is also not needed.
+ * One thing to note, is that these stations have an ID in the fw, but not
+ * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
+ * we fill ERR_PTR(-EINVAL) in this mapping and all other dereferencing of
+ * pointers from this mapping need to check that the value is not error
+ * or NULL.
+ *
+ * Currently there is only one auxiliary station for scanning, initialized
+ * on init.
+ */
+
+/**
+ * DOC: station table - AP Station in STA mode
+ *
+ * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
+ * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
+ * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
+ * the AP station from the fw before setting the MAC context as unassociated.
+ * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
+ * removed by mac80211, but the station won't be removed in the fw until the
+ * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
+ */
+
+/**
+ * DOC: station table - Drain vs. Flush
+ *
+ * Flush means that all the frames in the SCD queue are dumped regardless the
+ * station to which they were sent. We do that when we disassociate and before
+ * we remove the STA of the AP. The flush can be done synchronously against the
+ * fw.
+ * Drain means that the fw will drop all the frames sent to a specific station.
+ * This is useful when a client (if we are IBSS / GO or AP) disassociates.
+ */
+
+/**
+ * DOC: station table - fw restart
+ *
+ * When the fw asserts, or we have any other issue that requires to reset the
+ * driver, we require mac80211 to reconfigure the driver. Since the private
+ * data of the stations is embed in mac80211's %ieee80211_sta, that data will
+ * not be zeroed and needs to be reinitialized manually.
+ * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
+ * that we must not allocate a new sta_id but reuse the previous one. This
+ * means that the stations being re-added after the reset will have the same
+ * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
+ * map, since the stations aren't in the fw any more. Internal stations that
+ * are not added by mac80211 will be re-added in the init flow that is called
+ * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
+ * %iwl_mvm_up.
+ */
+
+/**
+ * DOC: AP mode - PS
+ *
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
+ * AMPDUs are in a separate queue that is stopped by the fw. We just need to
+ * let mac80211 know when there are frames in these queues so that it can
+ * properly handle trigger frames.
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledge since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
+ */
+
+/**
+ * enum iwl_mvm_agg_state - aggregation session state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_QUEUED: aggregation start work has been queued
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_mvm_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_QUEUED,
+ IWL_AGG_STARTING,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+};
+
+/**
+ * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
+ * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session / DQA
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @tx_time: medium time consumed by this A-MPDU
+ * @tpt_meas_start: time of the throughput measurements start, is reset every HZ
+ * @tx_count_last: number of frames transmitted during the last second
+ * @tx_count: counts the number of frames transmitted since the last reset of
+ * tpt_meas_start
+ */
+struct iwl_mvm_tid_data {
+ u16 seq_number;
+ u16 next_reclaimed;
+ /* The rest is Tx AGG related */
+ __le32 rate_n_flags;
+ u8 lq_color;
+ bool amsdu_in_ampdu_allowed;
+ enum iwl_mvm_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ u16 tx_time;
+ unsigned long tpt_meas_start;
+ u32 tx_count_last;
+ u32 tx_count;
+};
+
+struct iwl_mvm_key_pn {
+ struct rcu_head rcu_head;
+ struct {
+ u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
+ } ____cacheline_aligned_in_smp q[];
+};
+
+/**
+ * enum iwl_mvm_rxq_notif_type - Internal message identifier
+ *
+ * @IWL_MVM_RXQ_EMPTY: empty sync notification
+ * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ */
+enum iwl_mvm_rxq_notif_type {
+ IWL_MVM_RXQ_EMPTY,
+ IWL_MVM_RXQ_NOTIF_DEL_BA,
+};
+
+/**
+ * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
+ * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+ * FW is agnostic to the payload, so there are no endianity requirements.
+ *
+ * @type: value from &iwl_mvm_rxq_notif_type
+ * @sync: ctrl path is waiting for all notifications to be received
+ * @cookie: internal cookie to identify old notifications
+ * @data: payload
+ */
+struct iwl_mvm_internal_rxq_notif {
+ u16 type;
+ u16 sync;
+ u32 cookie;
+ u8 data[];
+} __packed;
+
+struct iwl_mvm_delba_data {
+ u32 baid;
+} __packed;
+
+/**
+ * struct iwl_mvm_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwl_mvm_rxq_dup_data {
+ __le16 last_seq[IWL_MAX_TID_COUNT + 1];
+ u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_link_sta - link specific parameters of a station
+ * @rcu_head: used for freeing the data
+ * @sta_id: the index of the station in the fw
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ * the driver - %rs_drv or in the FW - %rs_fw.
+ * @orig_amsdu_len: used to save the original amsdu_len when it is changed via
+ * debugfs. If it's set to 0, it means that it is it's not set via
+ * debugfs.
+ * @avg_energy: energy as reported by FW statistics notification
+ */
+struct iwl_mvm_link_sta {
+ struct rcu_head rcu_head;
+ u32 sta_id;
+ union {
+ struct iwl_lq_sta_rs_fw rs_fw;
+ struct iwl_lq_sta rs_drv;
+ } lq_sta;
+
+ u16 orig_amsdu_len;
+
+ u8 avg_energy;
+};
+
+struct iwl_mvm_mpdu_counter {
+ u32 tx;
+ u32 rx;
+};
+
+/**
+ * struct iwl_mvm_tpt_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start: timestamp of the counting-window start
+ */
+struct iwl_mvm_tpt_counter {
+ spinlock_t lock;
+ struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID];
+ unsigned long window_start;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_sta - representation of a station in the driver
+ * @vif: the interface the station belongs to
+ * @tfd_queue_msk: the tfd queues used by the station
+ * @mac_id_n_color: the MAC context this station is linked to
+ * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
+ * tid.
+ * @sta_type: station type
+ * @authorized: indicates station is authorized
+ * @sta_state: station state according to enum %ieee80211_sta_state
+ * @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ * we need to signal the EOSP
+ * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
+ * and from Tx response flow, it needs a spinlock.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @tid_to_baid: a simple map of TID to baid
+ * @vif: a vif pointer
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ * Every STA has is given one reserved queue to allow it to operate. If no
+ * such queue can be guaranteed, the STA addition will fail.
+ * @tx_protection: reference counter for controlling the Tx protection.
+ * @tt_tx_protection: is thermal throttling enable Tx protection?
+ * @disable_tx: is tx to this STA disabled?
+ * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
+ * In case TLC offload is not active it is either 0xFFFF or 0.
+ * @max_amsdu_len: max AMSDU length
+ * @sleeping: indicates the station is sleeping (when not offloaded to FW)
+ * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
+ * @sleeping: sta sleep transitions in power management
+ * @sleep_tx_count: the number of frames that we told the firmware to let out
+ * even when that station is asleep. This is useful in case the queue
+ * gets empty before all the frames were sent, which can happen when
+ * we are sending frames from an AMPDU queue and there was a hole in
+ * the BA window. To be used for UAPSD only.
+ * @ptk_pn: per-queue PTK PN data structures
+ * @dup_data: per queue duplicate packet detection data
+ * @tx_ant: the index of the antenna to use for data tx to this station. Only
+ * used during connection establishment (e.g. for the 4 way handshake
+ * exchange).
+ * @pairwise_cipher: used to feed iwlmei upon authorization
+ * @deflink: the default link station, for non-MLO STA, all link specific data
+ * is accessed via deflink (or link[0]). For MLO, it will hold data of the
+ * first added link STA.
+ * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
+ * link[0] points to deflink and link[link_id] is allocated when new link
+ * sta is added.
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is placed in that
+ * space.
+ *
+ */
+struct iwl_mvm_sta {
+ u32 tfd_queue_msk;
+ u32 mac_id_n_color;
+ u16 tid_disable_agg;
+ u8 sta_type;
+ enum ieee80211_sta_state sta_state;
+ bool bt_reduced_txpower;
+ bool next_status_eosp;
+ bool authorized;
+ spinlock_t lock;
+ struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
+ u8 tid_to_baid[IWL_MAX_TID_COUNT];
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_key_pn __rcu *ptk_pn[4];
+ struct iwl_mvm_rxq_dup_data *dup_data;
+
+ u8 reserved_queue;
+
+ /* Temporary, until the new TLC will control the Tx protection */
+ s8 tx_protection;
+ bool tt_tx_protection;
+
+ bool disable_tx;
+ u16 amsdu_enabled;
+ u16 max_amsdu_len;
+ bool sleeping;
+ u8 agg_tids;
+ u8 sleep_tx_count;
+ u8 tx_ant;
+ u32 pairwise_cipher;
+
+ struct iwl_mvm_link_sta deflink;
+ struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct iwl_mvm_tpt_counter *mpdu_counters;
+};
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
+
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+ return (void *)sta->drv_priv;
+}
+
+/**
+ * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
+ * broadcast)
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @type: station type
+ * @tfd_queue_msk: the tfd queues used by the station
+ */
+struct iwl_mvm_int_sta {
+ u32 sta_id;
+ u8 type;
+ u32 tfd_queue_msk;
+};
+
+/**
+ * iwl_mvm_sta_send_to_fw - Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ * about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ * from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ * Return: negative error code or 0 on success
+ */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ bool update, unsigned int flags);
+int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype);
+int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, int sta_id, u8 sta_type);
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
+}
+
+void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta);
+void iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta);
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf,
+ u8 key_offset);
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *keyconf);
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue);
+
+/* AMPDU */
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 buf_size,
+ bool amsdu);
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start);
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id);
+int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
+
+int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ u32 qmask, enum nl80211_iftype iftype,
+ u8 type);
+void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
+int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta);
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum ieee80211_frame_release_type reason,
+ u16 cnt, u16 tids, bool more_data,
+ bool single_sta_queue);
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool drain);
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, bool disable);
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool disable);
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ bool disable);
+
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
+void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 id);
+/* Queues */
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ u8 sta_id, u8 tid, unsigned int timeout);
+
+/* Sta state */
+/**
+ * struct iwl_mvm_sta_state_ops - callbacks for the sta_state() ops
+ *
+ * Since the only difference between both MLD and
+ * non-MLD versions of sta_state() is these function calls,
+ * each version will send its specific function calls to
+ * %iwl_mvm_mac_sta_state_common().
+ *
+ * @add_sta: pointer to the function that adds a new sta
+ * @update_sta: pointer to the function that updates a sta
+ * @rm_sta: pointer to the functions that removes a sta
+ * @mac_ctxt_changed: pointer to the function that handles a change in mac ctxt
+ */
+struct iwl_mvm_sta_state_ops {
+ int (*add_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*update_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*rm_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*mac_ctxt_changed)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool force_assoc_off);
+};
+
+int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state,
+ const struct iwl_mvm_sta_state_ops *callbacks);
+
+/* New MLD STA related APIs */
+/* STA */
+int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id);
+int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id);
+int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
+int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
+u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int filter_link_id);
+int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_int_sta *sta,
+ const u8 *addr, int link_id,
+ u16 *queue, u8 tid,
+ unsigned int *_wdg_timeout);
+
+/* Queues */
+void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ bool disable);
+void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ bool disable);
+void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ bool disable);
+#endif /* __sta_h__ */
diff --git a/sys/contrib/dev/iwlwifi/mvm/tdls.c b/sys/contrib/dev/iwlwifi/mvm/tdls.c
new file mode 100644
index 000000000000..9a250b407f3a
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/tdls.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2020, 2022-2024 Intel Corporation
+ */
+#if defined(__FreeBSD__)
+#include <linux/delay.h>
+#endif
+#include <linux/etherdevice.h>
+#include "mvm.h"
+#include "time-event.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+#define TU_TO_US(x) (x * 1024)
+#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
+
+void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+ GFP_KERNEL);
+ }
+}
+
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int count = 0;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+ continue;
+
+ if (vif) {
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->vif != vif)
+ continue;
+ }
+
+ count++;
+ }
+
+ return count;
+}
+
+static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_tdls_config_res *resp;
+ struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
+ struct iwl_host_cmd cmd = {
+ .id = TDLS_CONFIG_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &tdls_cfg_cmd, },
+ .len = { sizeof(struct iwl_tdls_config_cmd), },
+ };
+ struct ieee80211_sta *sta;
+ int ret, i, cnt;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ tdls_cfg_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
+
+ /* for now the Tx cmd is empty and unused */
+
+ /* populate TDLS peer data */
+ cnt = 0;
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+ continue;
+
+ tdls_cfg_cmd.sta_info[cnt].sta_id = i;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
+ IWL_MVM_TDLS_FW_TID;
+ tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
+ tdls_cfg_cmd.sta_info[cnt].is_initiator =
+ cpu_to_le32(sta->tdls_initiator ? 1 : 0);
+
+ cnt++;
+ }
+
+ tdls_cfg_cmd.tdls_peer_count = cnt;
+ IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (WARN_ON_ONCE(ret))
+ return;
+
+ pkt = cmd.resp_pkt;
+
+ WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
+
+ /* we don't really care about the response at this point */
+
+ iwl_free_resp(&cmd);
+}
+
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool sta_added)
+{
+ int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
+
+ /* when the first peer joins, send a power update first */
+ if (tdls_sta_cnt == 1 && sta_added)
+ iwl_mvm_power_update_mac(mvm);
+
+ /* Configure the FW with TDLS peer info only if TDLS channel switch
+ * capability is set.
+ * TDLS config data is used currently only in TDLS channel switch code.
+ * Supposed to serve also TDLS buffer station which is not implemneted
+ * yet in FW*/
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
+ iwl_mvm_tdls_config(mvm, vif);
+
+ /* when the last peer leaves, send a power update last */
+ if (tdls_sta_cnt == 0 && !sta_added)
+ iwl_mvm_power_update_mac(mvm);
+}
+
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
+
+ /* Protect the session to hear the TDLS setup response on the channel */
+ guard(mvm)(mvm);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, duration,
+ duration, true, link_id);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ duration, 100, true);
+}
+
+static const char *
+iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
+{
+ switch (state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ return "IDLE";
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ return "REQ SENT";
+ case IWL_MVM_TDLS_SW_RESP_RCVD:
+ return "RESP RECEIVED";
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ return "REQ RECEIVED";
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ return "ACTIVE";
+ }
+
+ return NULL;
+}
+
+static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
+ enum iwl_mvm_tdls_cs_state state)
+{
+ if (mvm->tdls_cs.state == state)
+ return;
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
+ iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
+ iwl_mvm_tdls_cs_state_str(state));
+ mvm->tdls_cs.state = state;
+
+ /* we only send requests to our switching peer - update sent time */
+ if (state == IWL_MVM_TDLS_SW_REQ_SENT)
+ mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
+
+ if (state == IWL_MVM_TDLS_SW_IDLE)
+ mvm->tdls_cs.cur_sta_id = IWL_INVALID_STA;
+}
+
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ unsigned int delay;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* can fail sometimes */
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+ return;
+ }
+
+ if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
+ return;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+
+ /*
+ * Update state and possibly switch again after this is over (DTIM).
+ * Also convert TU to msec.
+ */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
+}
+
+static int
+iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator, u32 timestamp)
+{
+ bool same_peer = false;
+ int ret = 0;
+
+ /* get the existing peer if it's there */
+ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
+ mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) {
+ struct ieee80211_sta *sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!IS_ERR_OR_NULL(sta))
+ same_peer = ether_addr_equal(peer, sta->addr);
+ }
+
+ switch (mvm->tdls_cs.state) {
+ case IWL_MVM_TDLS_SW_IDLE:
+ /*
+ * might be spurious packet from the peer after the switch is
+ * already done
+ */
+ if (type == TDLS_MOVE_CH)
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_REQ_SENT:
+ /* only allow requests from the same peer */
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
+ !peer_initiator)
+ /*
+ * We received a ch-switch request while an outgoing
+ * one is pending. Allow it if the peer is the link
+ * initiator.
+ */
+ ret = -EBUSY;
+ else if (type == TDLS_SEND_CHAN_SW_REQ)
+ /* wait for idle before sending another request */
+ ret = -EBUSY;
+ else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
+ /* we got a stale response - ignore it */
+ ret = -EINVAL;
+ break;
+ case IWL_MVM_TDLS_SW_RESP_RCVD:
+ /*
+ * we are waiting for the FW to give an "active" notification,
+ * so ignore requests in the meantime
+ */
+ ret = -EBUSY;
+ break;
+ case IWL_MVM_TDLS_SW_REQ_RCVD:
+ /* as above, allow the link initiator to proceed */
+ if (type == TDLS_SEND_CHAN_SW_REQ) {
+ if (!same_peer)
+ ret = -EBUSY;
+ else if (peer_initiator) /* they are the initiator */
+ ret = -EBUSY;
+ } else if (type == TDLS_MOVE_CH) {
+ ret = -EINVAL;
+ }
+ break;
+ case IWL_MVM_TDLS_SW_ACTIVE:
+ /*
+ * the only valid request when active is a request to return
+ * to the base channel by the current off-channel peer
+ */
+ if (type != TDLS_MOVE_CH || !same_peer)
+ ret = -EBUSY;
+ break;
+ }
+
+ if (ret)
+ IWL_DEBUG_TDLS(mvm,
+ "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
+ type, mvm->tdls_cs.state, peer, same_peer,
+ peer_initiator);
+
+ return ret;
+}
+
+static int
+iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_tdls_channel_switch_type type,
+ const u8 *peer, bool peer_initiator,
+ u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ u32 timestamp, u16 switch_time,
+ u16 switch_timeout, struct sk_buff *skb,
+ u32 ch_sw_tm_ie)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_hdr *hdr;
+ struct iwl_tdls_channel_switch_cmd cmd = {0};
+ struct iwl_tdls_channel_switch_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci);
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
+ timestamp);
+ if (ret)
+ return ret;
+
+ if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd.switch_type = type;
+ tail->timing.frame_timestamp = cpu_to_le32(timestamp);
+ tail->timing.switch_time = cpu_to_le32(switch_time);
+ tail->timing.switch_timeout = cpu_to_le32(switch_timeout);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ ret = -ENOENT;
+ goto out;
+ }
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ cmd.peer_sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
+
+ if (!chandef) {
+ if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.peer.chandef.chan) {
+ /* actually moving to the channel */
+ chandef = &mvm->tdls_cs.peer.chandef;
+ } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
+ type == TDLS_MOVE_CH) {
+ /* we need to return to base channel */
+ struct ieee80211_chanctx_conf *chanctx =
+ rcu_dereference(vif->bss_conf.chanctx_conf);
+
+ if (WARN_ON_ONCE(!chanctx)) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ chandef = &chanctx->def;
+ }
+ }
+
+ if (chandef)
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef);
+
+ /* keep quota calculation simple for now - 50% of DTIM for TDLS */
+ tail->timing.max_offchan_duration =
+ cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int) / 2);
+
+ /* Switch time is the first element in the switch-timing IE. */
+ tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+
+ info = IEEE80211_SKB_CB(skb);
+ hdr = (void *)skb->data;
+ if (info->control.hw_key) {
+ if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+ rcu_read_unlock();
+ ret = -EINVAL;
+ goto out;
+ }
+ iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd);
+ }
+
+ iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info,
+ mvmsta->deflink.sta_id);
+
+ iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta,
+ hdr->frame_control);
+ rcu_read_unlock();
+
+ memcpy(tail->frame.data, skb->data, skb->len);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
+ ret);
+ goto out;
+ }
+
+ /* channel switch has started, update state */
+ if (type != TDLS_MOVE_CH) {
+ mvm->tdls_cs.cur_sta_id = mvmsta->deflink.sta_id;
+ iwl_mvm_tdls_update_cs_state(mvm,
+ type == TDLS_SEND_CHAN_SW_REQ ?
+ IWL_MVM_TDLS_SW_REQ_SENT :
+ IWL_MVM_TDLS_SW_REQ_RCVD);
+ } else {
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
+ }
+
+out:
+
+ /* channel switch failed - we are idle */
+ if (ret)
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ return ret;
+}
+
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
+{
+ struct iwl_mvm *mvm;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
+ unsigned int delay;
+ int ret;
+
+ mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
+ guard(mvm)(mvm);
+
+ /* called after an active channel switch has finished or timed-out */
+ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+ /* station might be gone, in that case do nothing */
+ if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA)
+ return;
+
+ sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* the station may not be here, but if it is, it must be a TDLS peer */
+ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr,
+ mvm->tdls_cs.peer.initiator,
+ mvm->tdls_cs.peer.op_class,
+ &mvm->tdls_cs.peer.chandef,
+ 0, 0, 0,
+ mvm->tdls_cs.peer.skb,
+ mvm->tdls_cs.peer.ch_sw_tm_ie);
+ if (ret)
+ IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
+
+ /* retry after a DTIM if we failed sending now */
+ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+ schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+}
+
+int
+iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 oper_class,
+ struct cfg80211_chan_def *chandef,
+ struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ unsigned int delay;
+ int ret;
+
+ guard(mvm)(mvm);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
+ sta->addr, chandef->chan->center_freq, chandef->width);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id != IWL_INVALID_STA) {
+ IWL_DEBUG_TDLS(mvm,
+ "Existing peer. Can't start switch with %pM\n",
+ sta->addr);
+ return -EBUSY;
+ }
+
+ ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+ TDLS_SEND_CHAN_SW_REQ,
+ sta->addr, sta->tdls_initiator,
+ oper_class, chandef, 0, 0, 0,
+ tmpl_skb, ch_sw_tm_ie);
+ if (ret)
+ return ret;
+
+ /*
+ * Mark the peer as "in tdls switch" for this vif. We only allow a
+ * single such peer per vif.
+ */
+ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
+ if (!mvm->tdls_cs.peer.skb)
+ return -ENOMEM;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
+ mvm->tdls_cs.peer.chandef = *chandef;
+ mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
+ mvm->tdls_cs.peer.op_class = oper_class;
+ mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
+
+ /*
+ * Wait for 2 DTIM periods before attempting the next switch. The next
+ * switch will be made sooner if the current one completes before that.
+ */
+ delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int);
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+ return 0;
+}
+
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *cur_sta;
+ bool wait_for_phy = false;
+
+ mutex_lock(&mvm->mutex);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
+
+ /* we only support a single peer for channel switching */
+ if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA) {
+ IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
+ goto out;
+ }
+
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+ lockdep_is_held(&mvm->mutex));
+ /* make sure it's the same peer */
+ if (cur_sta != sta)
+ goto out;
+
+ /*
+ * If we're currently in a switch because of the now canceled peer,
+ * wait a DTIM here to make sure the phy is back on the base channel.
+ * We can't otherwise force it.
+ */
+ if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
+ mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
+ wait_for_phy = true;
+
+ mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
+ dev_kfree_skb(mvm->tdls_cs.peer.skb);
+ mvm->tdls_cs.peer.skb = NULL;
+
+out:
+ mutex_unlock(&mvm->mutex);
+
+ /* make sure the phy is on the base channel */
+ if (wait_for_phy)
+#if defined(__linux__)
+ msleep(TU_TO_MS(vif->bss_conf.dtim_period *
+#elif defined(__FreeBSD__)
+ linux_msleep(TU_TO_MS(vif->bss_conf.dtim_period *
+#endif
+ vif->bss_conf.beacon_int));
+
+ /* flush the channel switch state */
+ flush_delayed_work(&mvm->tdls_cs.dwork);
+
+ IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
+}
+
+void
+iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tdls_ch_sw_params *params)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ enum iwl_tdls_channel_switch_type type;
+ unsigned int delay;
+ const char *action_str =
+ params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
+ "REQ" : "RESP";
+
+ guard(mvm)(mvm);
+
+ IWL_DEBUG_TDLS(mvm,
+ "Received TDLS ch switch action %s from %pM status %d\n",
+ action_str, params->sta->addr, params->status);
+
+ /*
+ * we got a non-zero status from a peer we were switching to - move to
+ * the idle state and retry again later
+ */
+ if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
+ params->status != 0 &&
+ mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+ mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) {
+ struct ieee80211_sta *cur_sta;
+
+ /* make sure it's the same peer */
+ cur_sta = rcu_dereference_protected(
+ mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (cur_sta == params->sta) {
+ iwl_mvm_tdls_update_cs_state(mvm,
+ IWL_MVM_TDLS_SW_IDLE);
+ goto retry;
+ }
+ }
+
+ type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
+ TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
+
+ iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
+ params->sta->tdls_initiator, 0,
+ params->chandef, params->timestamp,
+ params->switch_time,
+ params->switch_timeout,
+ params->tmpl_skb,
+ params->ch_sw_tm_ie);
+
+retry:
+ /* register a timeout in case we don't succeed in switching */
+ delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
+ 1024 / 1000;
+ mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+ msecs_to_jiffies(delay));
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/testmode.h b/sys/contrib/dev/iwlwifi/mvm/testmode.h
new file mode 100644
index 000000000000..ff82af11de8d
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/testmode.h
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+ IWL_MVM_TM_ATTR_UNSPEC,
+ IWL_MVM_TM_ATTR_CMD,
+ IWL_MVM_TM_ATTR_NOA_DURATION,
+ IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+ /* keep last */
+ NUM_IWL_MVM_TM_ATTRS,
+ IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+ IWL_MVM_TM_CMD_SET_NOA,
+ IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/sys/contrib/dev/iwlwifi/mvm/tests/hcmd.c b/sys/contrib/dev/iwlwifi/mvm/tests/hcmd.c
new file mode 100644
index 000000000000..1fee0320c756
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/tests/hcmd.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/test.h>
+
+#include <iwl-trans.h>
+#include "../mvm.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static void test_hcmd_names_sorted(struct kunit *test)
+{
+ for (int i = 0; i < iwl_mvm_groups_size; i++) {
+ const struct iwl_hcmd_arr *arr = &iwl_mvm_groups[i];
+
+ if (!arr->arr)
+ continue;
+
+ for (int j = 0; j < arr->size - 1; j++)
+ KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
+ arr->arr[j + 1].cmd_id);
+ }
+}
+
+static struct kunit_case hcmd_names_cases[] = {
+ KUNIT_CASE(test_hcmd_names_sorted),
+ {},
+};
+
+static struct kunit_suite hcmd_names = {
+ .name = "iwlmvm-hcmd-names",
+ .test_cases = hcmd_names_cases,
+};
+
+kunit_test_suite(hcmd_names);
diff --git a/sys/contrib/dev/iwlwifi/mvm/time-event.c b/sys/contrib/dev/iwlwifi/mvm/time-event.c
new file mode 100644
index 000000000000..aa653782d6d7
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/time-event.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2017 Intel Deutschland GmbH
+ */
+#include <linux/jiffies.h>
+#include <net/mac80211.h>
+
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+#include "fw-api.h"
+#include "time-event.h"
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/*
+ * For the high priority TE use a time event type that has similar priority to
+ * the FW's action scan priority.
+ */
+#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
+#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
+
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ if (!te_data || !te_data->vif)
+ return;
+
+ list_del(&te_data->list);
+
+ /*
+ * the list is only used for AUX ROC events so make sure it is always
+ * initialized
+ */
+ INIT_LIST_HEAD(&te_data->list);
+
+ te_data->running = false;
+ te_data->uid = 0;
+ te_data->id = TE_MAX;
+ te_data->vif = NULL;
+ te_data->link_id = -1;
+}
+
+static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct ieee80211_vif *vif = mvm->p2p_device_vif;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /*
+ * Clear the ROC_P2P_RUNNING status bit.
+ * This will cause the TX path to drop offchannel transmissions.
+ * That would also be done by mac80211, but it is racy, in particular
+ * in the case that the time event actually completed in the firmware.
+ *
+ * Also flush the offchannel queue -- this is called when the time
+ * event finishes or is canceled, so that frames queued for it
+ * won't get stuck on the queue and be transmitted in the next
+ * time event.
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) {
+ struct iwl_mvm_vif *mvmvif;
+
+ synchronize_net();
+
+ /*
+ * NB: access to this pointer would be racy, but the flush bit
+ * can only be set when we had a P2P-Device VIF, and we have a
+ * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+ * not really racy.
+ */
+
+ if (!WARN_ON(!vif)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
+ mvmvif->deflink.bcast_sta.tfd_queue_msk);
+
+ if (mvm->mld_api_is_used) {
+ iwl_mvm_mld_rm_bcast_sta(mvm, vif,
+ &vif->bss_conf);
+
+ iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE,
+ false);
+ } else {
+ iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ }
+
+ /* Do not remove the PHY context as removing and adding
+ * a PHY context has timing overheads. Leaving it
+ * configured in FW would be useful in case the next ROC
+ * is with the same channel.
+ */
+ }
+ }
+
+ /*
+ * P2P AUX ROC and HS2.0 ROC do not run simultaneously.
+ * Clear the ROC_AUX_RUNNING status bit.
+ * This will cause the TX path to drop offchannel transmissions.
+ * That would also be done by mac80211, but it is racy, in particular
+ * in the case that the time event actually completed in the firmware
+ * (which is handled in iwl_mvm_te_handle_notif).
+ */
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
+ synchronize_net();
+
+ iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
+ mvm->aux_sta.tfd_queue_msk);
+
+ /* In newer version of this command an aux station is added only
+ * in cases of dedicated tx queue and need to be removed in end
+ * of use. For the even newer mld api, use the appropriate
+ * function.
+ */
+ if (mvm->mld_api_is_used)
+ iwl_mvm_mld_rm_aux_sta(mvm);
+ else if (iwl_mvm_has_new_station_api(mvm->fw))
+ iwl_mvm_rm_aux_sta(mvm);
+ }
+
+ if (!IS_ERR_OR_NULL(bss_vif))
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
+ mutex_unlock(&mvm->mutex);
+}
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+ /* Mutex is released inside */
+ iwl_mvm_cleanup_roc(mvm);
+}
+
+static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
+{
+ /*
+ * Of course, our status bit is just as racy as mac80211, so in
+ * addition, fire off the work struct which will drop all frames
+ * from the hardware queues that made it through the race. First
+ * it will of course synchronize the TX path to make sure that
+ * any *new* TX will be rejected.
+ */
+ schedule_work(&mvm->roc_done_wk);
+}
+
+static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *csa_vif;
+
+ rcu_read_lock();
+
+ csa_vif = rcu_dereference(mvm->csa_vif);
+ if (!csa_vif || !csa_vif->bss_conf.csa_active)
+ goto out_unlock;
+
+ IWL_DEBUG_TE(mvm, "CSA NOA started\n");
+
+ /*
+ * CSA NoA is started but we still have beacons to
+ * transmit on the current channel.
+ * So we just do nothing here and the switch
+ * will be performed on the last TBTT.
+ */
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ IWL_WARN(mvm, "CSA NOA started too early\n");
+ goto out_unlock;
+ }
+
+ ieee80211_csa_finish(csa_vif, 0);
+
+ rcu_read_unlock();
+
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+ return;
+
+out_unlock:
+ rcu_read_unlock();
+}
+
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+
+ if (!mvmvif->csa_bcn_pending && vif->cfg.assoc &&
+ vif->bss_conf.dtim_period)
+ return false;
+ if (errmsg)
+ IWL_ERR(mvm, "%s\n", errmsg);
+
+ if (mvmvif->csa_bcn_pending) {
+ struct iwl_mvm_sta *mvmsta;
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm,
+ mvmvif->deflink.ap_sta_id);
+ if (!WARN_ON(!mvmsta))
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ rcu_read_unlock();
+ }
+
+ if (vif->cfg.assoc) {
+ /*
+ * When not associated, this will be called from
+ * iwl_mvm_event_mlme_callback_ini()
+ */
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
+ NULL);
+
+ mvmvif->session_prot_connection_loss = true;
+ }
+
+ iwl_mvm_connection_loss(mvm, vif, errmsg);
+ return true;
+}
+
+static void
+iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ struct ieee80211_vif *vif = te_data->vif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!notif->status)
+ IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
+
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_AP:
+ if (!notif->status)
+ mvmvif->csa_failed = true;
+ iwl_mvm_csa_noa_start(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!notif->status) {
+ iwl_mvm_connection_loss(mvm, vif,
+ "CSA TE failed to start");
+ break;
+ }
+ iwl_mvm_csa_client_absent(mvm, te_data->vif);
+ cancel_delayed_work(&mvmvif->csa_work);
+ ieee80211_chswitch_done(te_data->vif, true, 0);
+ break;
+ default:
+ /* should never happen */
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /* we don't need it anymore */
+ iwl_mvm_te_clear_data(mvm, te_data);
+}
+
+static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
+ struct iwl_time_event_notif *notif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_time_event *te_trig;
+ int i;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
+ ieee80211_vif_to_wdev(te_data->vif),
+ FW_DBG_TRIGGER_TIME_EVENT);
+ if (!trig)
+ return;
+
+ te_trig = (void *)trig->data;
+
+ for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
+ u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
+ u32 trig_action_bitmap =
+ le32_to_cpu(te_trig->time_events[i].action_bitmap);
+ u32 trig_status_bitmap =
+ le32_to_cpu(te_trig->time_events[i].status_bitmap);
+
+ if (trig_te_id != te_data->id ||
+ !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
+ !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
+ continue;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Time event %d Action 0x%x received status: %d",
+ te_data->id,
+ le32_to_cpu(notif->action),
+ le32_to_cpu(notif->status));
+ break;
+ }
+}
+
+/*
+ * Handles a FW notification for an event that is known to the driver.
+ *
+ * @mvm: the mvm component
+ * @te_data: the time event data
+ * @notif: the notification data corresponding the time event data.
+ */
+static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_notif *notif)
+{
+ lockdep_assert_held(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
+ /*
+ * The FW sends the start/end time event notifications even for events
+ * that it fails to schedule. This is indicated in the status field of
+ * the notification. This happens in cases that the scheduler cannot
+ * find a schedule that can handle the event (for example requesting a
+ * P2P Device discoveribility, while there are other higher priority
+ * events in the system).
+ */
+ if (!le32_to_cpu(notif->status)) {
+ const char *msg;
+
+ if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
+ msg = "Time Event start notification failure";
+ else
+ msg = "Time Event end notification failure";
+
+ IWL_DEBUG_TE(mvm, "%s\n", msg);
+
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+ }
+
+ if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
+ IWL_DEBUG_TE(mvm,
+ "TE ended - current time %lu, estimated end %lu\n",
+ jiffies, te_data->end_jiffies);
+
+ switch (te_data->vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_roc_finished(mvm);
+ break;
+ case NL80211_IFTYPE_STATION:
+ /*
+ * If we are switching channel, don't disconnect
+ * if the time event is already done. Beacons can
+ * be delayed a bit after the switch.
+ */
+ if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+ IWL_DEBUG_TE(mvm,
+ "No beacon heard and the CS time event is over, don't disconnect\n");
+ break;
+ }
+
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ !te_data->vif->cfg.assoc ?
+ "Not associated and the time event is over already..." :
+ "No beacon heard and the time event is over already...");
+ break;
+ default:
+ break;
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
+ te_data->running = true;
+ te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
+
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw);
+ } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+ iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
+ }
+ } else {
+ IWL_WARN(mvm, "Got TE with unknown action\n");
+ }
+}
+
+struct iwl_mvm_rx_roc_iterator_data {
+ u32 activity;
+ bool end_activity;
+ bool found;
+};
+
+static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_rx_roc_iterator_data *data = _data;
+
+ if (mvmvif->roc_activity == data->activity) {
+ data->found = true;
+ if (data->end_activity)
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+}
+
+void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_roc_notif *notif = (void *)pkt->data;
+ u32 activity = le32_to_cpu(notif->activity);
+ bool started = le32_to_cpu(notif->success) &&
+ le32_to_cpu(notif->started);
+ struct iwl_mvm_rx_roc_iterator_data data = {
+ .activity = activity,
+ .end_activity = !started,
+ };
+
+ /* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_rx_roc_iterator,
+ &data);
+ /*
+ * It is possible that the ROC was canceled
+ * but the notification was already fired.
+ */
+ if (!data.found)
+ return;
+
+ if (started) {
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw);
+ } else {
+ iwl_mvm_roc_finished(mvm);
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ }
+}
+
+/*
+ * Handle A Aux ROC time event
+ */
+static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
+ struct iwl_time_event_notif *notif)
+{
+ struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data;
+
+ list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+ if (le32_to_cpu(notif->unique_id) == te_data->uid) {
+ aux_roc_te = te_data;
+ break;
+ }
+ }
+ if (!aux_roc_te) /* Not a Aux ROC time event */
+ return -EINVAL;
+
+ iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
+ IWL_DEBUG_TE(mvm,
+ "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action), le32_to_cpu(notif->status));
+
+ if (!le32_to_cpu(notif->status) ||
+ le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+ /* End TE, notify mac80211 */
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ iwl_mvm_roc_finished(mvm); /* flush aux queue */
+ list_del(&te_data->list); /* remove from list */
+ te_data->running = false;
+ te_data->vif = NULL;
+ te_data->uid = 0;
+ te_data->id = TE_MAX;
+ } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ te_data->running = true;
+ ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ } else {
+ IWL_DEBUG_TE(mvm,
+ "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
+ le32_to_cpu(notif->action));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * The Rx handler for time event notifications
+ */
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_time_event_notif *notif = (void *)pkt->data;
+ struct iwl_mvm_time_event_data *te_data, *tmp;
+
+ IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+ le32_to_cpu(notif->unique_id),
+ le32_to_cpu(notif->action));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ /* This time event is triggered for Aux ROC request */
+ if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
+ goto unlock;
+
+ list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+ if (le32_to_cpu(notif->unique_id) == te_data->uid)
+ iwl_mvm_te_handle_notif(mvm, te_data, notif);
+ }
+unlock:
+ spin_unlock_bh(&mvm->time_event_lock);
+}
+
+static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_time_event_data *te_data = data;
+ struct iwl_time_event_notif *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ /* te_data->uid is already set in the TIME_EVENT_CMD response */
+ if (le32_to_cpu(resp->unique_id) != te_data->uid)
+ return false;
+
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
+ te_data->uid);
+ if (!resp->status)
+ IWL_ERR(mvm,
+ "TIME_EVENT_NOTIFICATION received but not executed\n");
+
+ return true;
+}
+
+static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_time_event_data *te_data = data;
+ struct iwl_time_event_resp *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ /* we should never get a response to another TIME_EVENT_CMD here */
+ if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
+ return false;
+
+ te_data->uid = le32_to_cpu(resp->unique_id);
+ IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+ te_data->uid);
+ return true;
+}
+
+static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_time_event_data *te_data,
+ struct iwl_time_event_cmd *te_cmd)
+{
+ static const u16 time_event_response[] = { TIME_EVENT_CMD };
+ struct iwl_notification_wait wait_time_event;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
+ le32_to_cpu(te_cmd->duration));
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (WARN_ON(te_data->id != TE_MAX)) {
+ spin_unlock_bh(&mvm->time_event_lock);
+ return -EIO;
+ }
+ te_data->vif = vif;
+ te_data->duration = le32_to_cpu(te_cmd->duration);
+ te_data->id = le32_to_cpu(te_cmd->id);
+ list_add_tail(&te_data->list, &mvm->time_event_list);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ /*
+ * Use a notification wait, which really just processes the
+ * command response and doesn't wait for anything, in order
+ * to be able to process the response and get the UID inside
+ * the RX path. Using CMD_WANT_SKB doesn't work because it
+ * stores the buffer and then wakes up this thread, by which
+ * time another notification (that the time event started)
+ * might already be processed unsuccessfully.
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+ time_event_response,
+ ARRAY_SIZE(time_event_response),
+ iwl_mvm_time_event_response, te_data);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(*te_cmd), te_cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+ iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+ goto out_clear_te;
+ }
+
+ /* No need to wait for anything, so just pass 1 (0 isn't valid) */
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+ /* should never fail */
+ WARN_ON_ONCE(ret);
+
+ if (ret) {
+ out_clear_te:
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+ return ret;
+}
+
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ u32 max_delay, bool wait_for_notif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+ struct iwl_notification_wait wait_te_notif;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (te_data->running &&
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ return;
+ }
+
+ if (te_data->running) {
+ IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
+ te_data->uid,
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ /*
+ * we don't have enough time
+ * cancel the current TE and issue a new one
+ * Of course it would be better to remove the old one only
+ * when the new one is added, but we don't care if we are off
+ * channel for a bit. All we need to do, is not to return
+ * before we actually begin to be on the channel.
+ */
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+ time_cmd.apply_time = cpu_to_le32(0);
+
+ time_cmd.max_frags = TE_V2_FRAG_NONE;
+ time_cmd.max_delay = cpu_to_le32(max_delay);
+ /* TODO: why do we need to interval = bi if it is not periodic? */
+ time_cmd.interval = cpu_to_le32(1);
+ time_cmd.duration = cpu_to_le32(duration);
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END |
+ TE_V2_START_IMMEDIATELY);
+
+ if (!wait_for_notif) {
+ iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+ return;
+ }
+
+ /*
+ * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
+ * right after we send the time event
+ */
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
+ te_notif_response,
+ ARRAY_SIZE(te_notif_response),
+ iwl_mvm_te_notif, te_data);
+
+ /* If TE was sent OK - wait for the notification that started */
+ if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
+ IWL_ERR(mvm, "Failed to add TE to protect session\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
+ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
+ TU_TO_JIFFIES(max_delay))) {
+ IWL_ERR(mvm, "Failed to protect session until TE\n");
+ }
+}
+
+/* Determine whether mac or link id should be used, and validate the link id */
+static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s8 link_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ SESSION_PROTECTION_CMD), 1);
+
+ if (ver < 2)
+ return mvmvif->id;
+
+ if (WARN(link_id < 0 || !mvmvif->link[link_id],
+ "Invalid link ID for session protection: %u\n", link_id))
+ return -EINVAL;
+
+ if (WARN(!mvmvif->link[link_id]->active,
+ "Session Protection on an inactive link: %u\n", link_id))
+ return -EINVAL;
+
+ return mvmvif->link[link_id]->fw_link_id;
+}
+
+static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 id, s8 link_id)
+{
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ struct iwl_session_prot_cmd cmd = {
+ .id_and_color = cpu_to_le32(mac_link_id),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .conf_id = cpu_to_le32(id),
+ };
+ int ret;
+
+ if (mac_link_id < 0)
+ return;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
+}
+
+static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
+{
+ struct iwl_roc_req roc_cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .activity = cpu_to_le32(activity),
+ };
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_cmd);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
+ cmd_len, &roc_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
+}
+
+static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data,
+ u32 *uid)
+{
+ u32 id;
+ struct ieee80211_vif *vif = te_data->vif;
+ struct iwl_mvm_vif *mvmvif;
+ enum nl80211_iftype iftype;
+ s8 link_id;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+
+ if (!vif)
+ return false;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ iftype = te_data->vif->type;
+
+ /*
+ * It is possible that by the time we got to this point the time
+ * event was already removed.
+ */
+ spin_lock_bh(&mvm->time_event_lock);
+
+ /* Save time event uid before clearing its data */
+ *uid = te_data->uid;
+ id = te_data->id;
+ link_id = te_data->link_id;
+
+ /*
+ * The clear_data function handles time events that were already removed
+ */
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) ||
+ (roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ iwl_mvm_roc_finished(mvm);
+ }
+ return false;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
+ id != HOT_SPOT_CMD) {
+ /* When session protection is used, the te_data->id field
+ * is reused to save session protection's configuration.
+ * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id
+ * field is set to HOT_SPOT_CMD.
+ */
+ if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
+ /* Session protection is still ongoing. Cancel it */
+ iwl_mvm_cancel_session_protection(mvm, vif, id,
+ link_id);
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ iwl_mvm_roc_finished(mvm);
+ }
+ }
+ return false;
+ } else {
+ /* It is possible that by the time we try to remove it, the
+ * time event has already ended and removed. In such a case
+ * there is no need to send a removal command.
+ */
+ if (id == TE_MAX) {
+ IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Explicit request to remove a aux roc time event. The removal of a time
+ * event needs to be synchronized with the flow of a time event's end
+ * notification, which also removes the time event from the op mode
+ * data structures.
+ */
+static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_hs20_roc_req aux_cmd = {};
+ u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm);
+
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
+ aux_cmd.event_unique_id = cpu_to_le32(uid);
+ aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ aux_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
+ le32_to_cpu(aux_cmd.event_unique_id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
+ len, &aux_cmd);
+
+ if (WARN_ON(ret))
+ return;
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data)
+{
+ struct iwl_time_event_cmd time_cmd = {};
+ u32 uid;
+ int ret;
+
+ if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+ return;
+
+ /* When we remove a TE, the UID is to be set in the id field */
+ time_cmd.id = cpu_to_le32(uid);
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+ IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+ ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+ sizeof(time_cmd), &time_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Couldn't remove the time event\n");
+}
+
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ u32 id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ if (id != SESSION_PROTECT_CONF_ASSOC) {
+ IWL_DEBUG_TE(mvm,
+ "don't remove session protection id=%u\n",
+ id);
+ return;
+ }
+ } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+ IWL_DEBUG_TE(mvm,
+ "don't remove TE with id=%u (not session protection)\n",
+ id);
+ return;
+ }
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
+ int id = le32_to_cpu(notif->mac_link_id);
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_vif *mvmvif;
+
+ rcu_read_lock();
+
+ /* note we use link ID == MAC ID */
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ if (!vif)
+ goto out_unlock;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* The vif is not a P2P_DEVICE, maintain its time_event_data */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ struct iwl_mvm_time_event_data *te_data =
+ &mvmvif->time_event_data;
+
+ if (!le32_to_cpu(notif->status)) {
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ "Session protection failure");
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+
+ if (le32_to_cpu(notif->start)) {
+ spin_lock_bh(&mvm->time_event_lock);
+ te_data->running = le32_to_cpu(notif->start);
+ te_data->end_jiffies =
+ TU_TO_EXP_TIME(te_data->duration);
+ spin_unlock_bh(&mvm->time_event_lock);
+ } else {
+ /*
+ * By now, we should have finished association
+ * and know the dtim period.
+ */
+ iwl_mvm_te_check_disconnect(mvm, vif,
+ !vif->cfg.assoc ?
+ "Not associated and the session protection is over already..." :
+ "No beacon heard and the session protection is over already...");
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+
+ goto out_unlock;
+ }
+
+ if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
+ /* End TE, notify mac80211 */
+ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+ mvmvif->time_event_data.link_id = -1;
+ /* set the bit so the ROC cleanup will actually clean up */
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+ iwl_mvm_roc_finished(mvm);
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ } else if (le32_to_cpu(notif->start)) {
+ if (WARN_ON(mvmvif->time_event_data.id !=
+ le32_to_cpu(notif->conf_id)))
+ goto out_unlock;
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ }
+
+ out_unlock:
+ rcu_read_unlock();
+}
+
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay)
+{
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ u32 dtim_interval = 0;
+
+ *delay = AUX_ROC_MIN_DELAY;
+ *duration_tu = MSEC_TO_TU(duration_ms);
+
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ dtim_interval =
+ max_t(u32, dtim_interval,
+ link_conf->dtim_period * link_conf->beacon_int);
+ }
+ rcu_read_unlock();
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ * dtim_interval should never be 0, it can be 1 if we don't know it
+ * (we haven't heard any beacon yet).
+ */
+ if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) {
+ *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= *duration_tu) {
+ *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (*duration_tu <= AUX_ROC_MIN_DURATION)
+ *duration_tu = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+}
+
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, enum iwl_roc_activity activity)
+{
+ int res;
+ u32 duration_tu, delay;
+ struct iwl_roc_req roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .activity = cpu_to_le32(activity),
+ .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_req);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES))
+ return -EBUSY;
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
+ channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20, 0);
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
+ &delay);
+ roc_req.duration = cpu_to_le32(duration_tu);
+ roc_req.max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+ IWL_DEBUG_TE(mvm,
+ "Requesting to remain on channel %u for %utu. activity %u\n",
+ channel->hw_value, duration_tu, activity);
+
+ /* Set the node address */
+ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ 0, cmd_len, &roc_req);
+ if (!res)
+ mvmvif->roc_activity = activity;
+
+ return res;
+}
+
+static int
+iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_session_prot_cmd cmd = {
+ .id_and_color =
+ cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* The time_event_data.id field is reused to save session
+ * protection's configuration.
+ */
+
+ mvmvif->time_event_data.link_id = 0;
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ mvmvif->time_event_data.id =
+ SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ mvmvif->time_event_data.id =
+ SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid ROC type\n");
+ return -EINVAL;
+ }
+
+ cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id);
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
+ 0, sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration, enum ieee80211_roc_type type)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+ if (te_data->running) {
+ IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+ return -EBUSY;
+ }
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
+ duration,
+ type);
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid ROC type\n");
+ return -EINVAL;
+ }
+
+ time_cmd.apply_time = cpu_to_le32(0);
+ time_cmd.interval = cpu_to_le32(1);
+
+ /*
+ * The P2P Device TEs can have lower priority than other events
+ * that are being scheduled by the driver/fw, and thus it might not be
+ * scheduled. To improve the chances of it being scheduled, allow them
+ * to be fragmented, and in addition allow them to be delayed.
+ */
+ time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
+ time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
+ time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END |
+ TE_V2_START_IMMEDIATELY);
+
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_time_event_data *te_data;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+
+ /*
+ * Iterate over the list of time events and find the time event that is
+ * associated with a P2P_DEVICE interface.
+ * This assumes that a P2P_DEVICE interface can have only a single time
+ * event at any given time and this time event coresponds to a ROC
+ * request
+ */
+ list_for_each_entry(te_data, &mvm->time_event_list, list) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ goto out;
+ }
+
+ /* There can only be at most one AUX ROC time event, we just use the
+ * list to simplify/unify code. Remove it if it exists.
+ */
+ te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+ struct iwl_mvm_time_event_data,
+ list);
+out:
+ spin_unlock_bh(&mvm->time_event_lock);
+ return te_data;
+}
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_time_event_data *te_data;
+ u32 uid;
+
+ te_data = iwl_mvm_get_roc_te(mvm);
+ if (te_data)
+ __iwl_mvm_remove_time_event(mvm, te_data, &uid);
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ int iftype = vif->type;
+
+ mutex_lock(&mvm->mutex);
+
+ if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+ goto cleanup_roc;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ te_data = &mvmvif->time_event_data;
+
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
+ IWL_DEBUG_TE(mvm,
+ "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
+ return;
+ }
+ iwl_mvm_cancel_session_protection(mvm, vif,
+ te_data->id,
+ te_data->link_id);
+ } else {
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
+ &mvmvif->hs_time_event_data);
+ }
+ goto cleanup_roc;
+ }
+
+ te_data = iwl_mvm_get_roc_te(mvm);
+ if (!te_data) {
+ IWL_WARN(mvm, "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
+ return;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+ iftype = te_data->vif->type;
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE)
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ else
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+
+cleanup_roc:
+ /*
+ * In case we get here before the ROC event started,
+ * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
+ * cleanup things properly
+ */
+ if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ else
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+
+ /* Mutex is released inside this function */
+ iwl_mvm_cleanup_roc(mvm);
+}
+
+void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ u32 id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (id != TE_CHANNEL_SWITCH_PERIOD)
+ return;
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 apply_time)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ struct iwl_time_event_cmd time_cmd = {};
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (te_data->running) {
+ u32 id;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (id == TE_CHANNEL_SWITCH_PERIOD) {
+ IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Remove the session protection time event to allow the
+ * channel switch. If we got here, we just heard a beacon so
+ * the session protection is not needed anymore anyway.
+ */
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+ }
+
+ time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+ time_cmd.id_and_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+ time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
+ time_cmd.apply_time = cpu_to_le32(apply_time);
+ time_cmd.max_frags = TE_V2_FRAG_NONE;
+ time_cmd.duration = cpu_to_le32(duration);
+ time_cmd.repeat = 1;
+ time_cmd.interval = cpu_to_le32(1);
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_ABSENCE);
+ if (!apply_time)
+ time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
+
+ return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_session_prot_notif *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
+ pkt->hdr.group_id != MAC_CONF_GROUP))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ if (!resp->status)
+ IWL_ERR(mvm,
+ "TIME_EVENT_NOTIFICATION received but not executed\n");
+
+ return true;
+}
+
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ bool wait_for_notif,
+ unsigned int link_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
+ struct iwl_notification_wait wait_notif;
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
+ struct iwl_session_prot_cmd cmd = {
+ .id_and_color = cpu_to_le32(mac_link_id),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+
+ if (mac_link_id < 0)
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->time_event_lock);
+ if (te_data->running && te_data->link_id == link_id &&
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+ jiffies_to_msecs(te_data->end_jiffies - jiffies));
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ return;
+ }
+
+ iwl_mvm_te_clear_data(mvm, te_data);
+ /*
+ * The time_event_data.id field is reused to save session
+ * protection's configuration.
+ */
+ te_data->id = le32_to_cpu(cmd.conf_id);
+ te_data->duration = le32_to_cpu(cmd.duration_tu);
+ te_data->vif = vif;
+ te_data->link_id = link_id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+ if (!wait_for_notif) {
+ if (iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
+ 0, sizeof(cmd), &cmd)) {
+ goto send_cmd_err;
+ }
+
+ return;
+ }
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
+ notif, ARRAY_SIZE(notif),
+ iwl_mvm_session_prot_notif, NULL);
+
+ if (iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
+ 0, sizeof(cmd), &cmd)) {
+ iwl_remove_notification(&mvm->notif_wait, &wait_notif);
+ goto send_cmd_err;
+ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
+ TU_TO_JIFFIES(100))) {
+ IWL_ERR(mvm,
+ "Failed to protect session until session protection\n");
+ }
+ return;
+
+send_cmd_err:
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/time-event.h b/sys/contrib/dev/iwlwifi/mvm/time-event.h
new file mode 100644
index 000000000000..1ef8768756db
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/time-event.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2012-2014, 2019-2020, 2023, 2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ */
+#ifndef __time_event_h__
+#define __time_event_h__
+
+#include "fw-api.h"
+
+#include "mvm.h"
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/**
+ * DOC: Abstraction to the driver
+ *
+ * In order to simplify the use of time events to the rest of the driver,
+ * we abstract the use of time events. This component provides the functions
+ * needed by the driver.
+ */
+
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600
+#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mvm_protect_session - start / extend the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ * @duration: the duration of the session in TU.
+ * @min_duration: will start a new session if the current session will end
+ * in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
+ * @wait_for_notif: true if it is required that a time event notification be
+ * waited for (that the time event has been scheduled before returning)
+ *
+ * This function can be used to start a session protection which means that the
+ * fw will stay on the channel for %duration_ms milliseconds. This function
+ * can block (sleep) until the session starts. This function can also be used
+ * to extend a currently running session.
+ * This function is meant to be used for BSS association for example, where we
+ * want to make sure that the fw stays on the channel during the association.
+ */
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ u32 max_delay, bool wait_for_notif);
+
+/**
+ * iwl_mvm_stop_session_protection - cancel the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be canceled because
+ * the other bindings wait for the medium during that time.
+ * This funtions doesn't sleep.
+ */
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/*
+ * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
+ */
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/**
+ * iwl_mvm_rx_roc_notif - handles %DISCOVERY_ROC_NTF.
+ * @mvm: the mvm component
+ * @rxb: RX buffer
+ */
+void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/**
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is requested. It is assumed
+ * that the vif type is NL80211_IFTYPE_P2P_DEVICE
+ * @duration: the requested duration in millisecond for the fw to be on the
+ * channel that is bound to the vif.
+ * @type: the remain on channel request type
+ *
+ * This function can be used to issue a remain on channel session,
+ * which means that the fw will stay in the channel for the request %duration
+ * milliseconds. The function is async, meaning that it only issues the ROC
+ * request but does not wait for it to start. Once the FW is ready to serve the
+ * ROC request, it will issue a notification to the driver that it is on the
+ * requested channel. Once the FW completes the ROC request it will issue
+ * another notification to the driver.
+ *
+ * Return: negative error code or 0 on success
+ */
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ int duration, enum ieee80211_roc_type type);
+
+/**
+ * iwl_mvm_stop_roc - stop remain on channel functionality
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is stopped
+ *
+ * This function can be used to cancel an ongoing ROC session.
+ * The function is async, it will instruct the FW to stop serving the ROC
+ * session, but will not wait for the actual stopping of the session.
+ */
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+/**
+ * iwl_mvm_remove_time_event - general function to clean up of time event
+ * @mvm: the mvm component
+ * @mvmvif: the vif to which the time event belongs
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function can be used to cancel a time event regardless its type.
+ * It is useful for cleaning up time events running before removing an
+ * interface.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_time_event_data *te_data);
+
+/**
+ * iwl_mvm_te_clear_data - remove time event from list
+ * @mvm: the mvm component
+ * @te_data: the time event data to remove
+ *
+ * This function is mostly internal, it is made available here only
+ * for firmware restart purposes.
+ */
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+ struct iwl_mvm_time_event_data *te_data);
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm);
+void iwl_mvm_roc_done_wk(struct work_struct *wk);
+
+void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
+/**
+ * iwl_mvm_schedule_csa_period - request channel switch absence period
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the channel switch is issued
+ * @duration: the duration of the NoA in TU.
+ * @apply_time: NoA start time in GP2.
+ *
+ * This function is used to schedule NoA time event and is used to perform
+ * the channel switch flow.
+ *
+ * Return: negative error code or 0 on success
+ */
+int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 apply_time);
+
+/**
+ * iwl_mvm_te_scheduled - check if the fw received the TE cmd
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * Return: %true if this TE is added to the fw, %false otherwise
+ */
+static inline bool
+iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
+{
+ if (!te_data)
+ return false;
+
+ return !!te_data->uid;
+}
+
+/**
+ * iwl_mvm_schedule_session_protection - schedule a session protection
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @wait_for_notif: if true, will block until the start of the protection
+ * @link_id: The link to schedule a session protection for
+ */
+void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ bool wait_for_notif,
+ unsigned int link_id);
+
+/**
+ * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+ * @mvm: the mvm component
+ * @rxb: the RX buffer containing the notification
+ */
+void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+#endif /* __time_event_h__ */
diff --git a/sys/contrib/dev/iwlwifi/mvm/time-sync.c b/sys/contrib/dev/iwlwifi/mvm/time-sync.c
new file mode 100644
index 000000000000..edae3e24192b
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/time-sync.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#include "mvm.h"
+#include "time-sync.h"
+#include <linux/ieee80211.h>
+
+void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data)
+{
+ skb_queue_head_init(&data->frame_list);
+}
+
+static bool iwl_mvm_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ u8 skb_dialog_token;
+
+ if (ieee80211_is_timing_measurement(skb))
+ skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token;
+ else
+ skb_dialog_token = mgmt->u.action.u.ftm.dialog_token;
+
+ if ((ether_addr_equal(mgmt->sa, addr) ||
+ ether_addr_equal(mgmt->da, addr)) &&
+ skb_dialog_token == dialog_token)
+ return true;
+
+ return false;
+}
+
+static struct sk_buff *iwl_mvm_time_sync_find_skb(struct iwl_mvm *mvm, u8 *addr,
+ u8 dialog_token)
+{
+ struct sk_buff *skb;
+
+ /* The queue is expected to have only one SKB. If there are other SKBs
+ * in the queue, they did not get a time sync notification and are
+ * probably obsolete by now, so drop them.
+ */
+ while ((skb = skb_dequeue(&mvm->time_sync.frame_list))) {
+ if (iwl_mvm_is_skb_match(skb, addr, dialog_token))
+ break;
+
+ kfree_skb(skb);
+ skb = NULL;
+ }
+
+ return skb;
+}
+
+static u64 iwl_mvm_get_64_bit(__le32 high, __le32 low)
+{
+ return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low);
+}
+
+void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_time_msmt_notify *notif = (void *)pkt->data;
+ struct ieee80211_rx_status *rx_status;
+ struct skb_shared_hwtstamps *shwt;
+ u64 ts_10ns;
+ struct sk_buff *skb =
+ iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr,
+ le32_to_cpu(notif->dialog_token));
+ u64 adj_time;
+
+ if (!skb) {
+ IWL_DEBUG_INFO(mvm, "Time sync event but no pending skb\n");
+ return;
+ }
+
+ ts_10ns = iwl_mvm_get_64_bit(notif->t2_hi, notif->t2_lo);
+ adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10);
+ shwt = skb_hwtstamps(skb);
+ shwt->hwtstamp = ktime_set(0, adj_time);
+
+ ts_10ns = iwl_mvm_get_64_bit(notif->t3_hi, notif->t3_lo);
+ adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10);
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time);
+
+ IWL_DEBUG_INFO(mvm,
+ "Time sync: RX event - report frame t2=%llu t3=%llu\n",
+ ktime_to_ns(shwt->hwtstamp),
+ ktime_to_ns(rx_status->ack_tx_hwtstamp));
+ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
+}
+
+void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data;
+ struct ieee80211_tx_status status = {};
+ struct skb_shared_hwtstamps *shwt;
+ u64 ts_10ns, adj_time;
+
+ status.skb =
+ iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr,
+ le32_to_cpu(notif->dialog_token));
+
+ if (!status.skb) {
+ IWL_DEBUG_INFO(mvm, "Time sync confirm but no pending skb\n");
+ return;
+ }
+
+ ts_10ns = iwl_mvm_get_64_bit(notif->t1_hi, notif->t1_lo);
+ adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10);
+ shwt = skb_hwtstamps(status.skb);
+ shwt->hwtstamp = ktime_set(0, adj_time);
+
+ ts_10ns = iwl_mvm_get_64_bit(notif->t4_hi, notif->t4_lo);
+ adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10);
+ status.info = IEEE80211_SKB_CB(status.skb);
+ status.ack_hwtstamp = ktime_set(0, adj_time);
+
+ IWL_DEBUG_INFO(mvm,
+ "Time sync: TX event - report frame t1=%llu t4=%llu\n",
+ ktime_to_ns(shwt->hwtstamp),
+ ktime_to_ns(status.ack_hwtstamp));
+ ieee80211_tx_status_ext(mvm->hw, &status);
+}
+
+int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr, u32 protocols)
+{
+ struct iwl_time_sync_cfg_cmd cmd = {};
+ int err;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
+ return -EINVAL;
+
+ /* The fw only supports one peer. We do allow reconfiguration of the
+ * same peer for cases of fw reset etc.
+ */
+ if (mvm->time_sync.active &&
+ !ether_addr_equal(addr, mvm->time_sync.peer_addr)) {
+ IWL_DEBUG_INFO(mvm, "Time sync: reject config for peer: %pM\n",
+ addr);
+ return -ENOBUFS;
+ }
+
+ if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM |
+ IWL_TIME_SYNC_PROTOCOL_FTM))
+ return -EINVAL;
+
+ cmd.protocols = cpu_to_le32(protocols);
+
+ ether_addr_copy(cmd.peer_addr, addr);
+
+ err = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (err) {
+ IWL_ERR(mvm, "Failed to send time sync cfg cmd: %d\n", err);
+ } else {
+ mvm->time_sync.active = protocols != 0;
+ ether_addr_copy(mvm->time_sync.peer_addr, addr);
+ IWL_DEBUG_INFO(mvm, "Time sync: set peer addr=%pM\n", addr);
+ }
+
+ if (!mvm->time_sync.active)
+ skb_queue_purge(&mvm->time_sync.frame_list);
+
+ return err;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/time-sync.h b/sys/contrib/dev/iwlwifi/mvm/time-sync.h
new file mode 100644
index 000000000000..2cfd0fb5e781
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/time-sync.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2022 Intel Corporation
+ */
+#ifndef __TIME_SYNC_H__
+#define __TIME_SYNC_H__
+
+#include "mvm.h"
+#include <linux/ieee80211.h>
+
+void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data);
+void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr,
+ u32 protocols);
+
+static inline
+bool iwl_mvm_time_sync_frame(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *addr)
+{
+ if (ether_addr_equal(mvm->time_sync.peer_addr, addr) &&
+ (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) {
+ skb_queue_tail(&mvm->time_sync.frame_list, skb);
+ return true;
+ }
+
+ return false;
+}
+#endif
diff --git a/sys/contrib/dev/iwlwifi/mvm/tt.c b/sys/contrib/dev/iwlwifi/mvm/tt.c
new file mode 100644
index 000000000000..1958f4ca4773
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/tt.c
@@ -0,0 +1,866 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2016 Intel Deutschland GmbH
+ */
+#ifdef CONFIG_THERMAL
+#include <linux/sort.h>
+#endif
+
+#include "mvm.h"
+
+#define IWL_MVM_NUM_CTDP_STEPS 20
+#define IWL_MVM_MIN_CTDP_BUDGET_MW 150
+
+#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
+
+void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+ u32 duration = tt->params.ct_kill_duration;
+
+ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return;
+
+ IWL_ERR(mvm, "Enter CT Kill\n");
+ iwl_mvm_set_hw_ctkill_state(mvm, true);
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ tt->throttle = false;
+ tt->dynamic_smps = false;
+ }
+
+ /* Don't schedule an exit work if we're in test mode, since
+ * the temperature will not change unless we manually set it
+ * again (or disable testing).
+ */
+ if (!mvm->temperature_test)
+ schedule_delayed_work(&tt->ct_kill_exit,
+ round_jiffies_relative(duration * HZ));
+}
+
+static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
+{
+ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return;
+
+ IWL_ERR(mvm, "Exit CT Kill\n");
+ iwl_mvm_set_hw_ctkill_state(mvm, false);
+}
+
+static void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+{
+ /* ignore the notification if we are in test mode */
+ if (mvm->temperature_test)
+ return;
+
+ if (mvm->temperature == temp)
+ return;
+
+ mvm->temperature = temp;
+ iwl_mvm_tt_handler(mvm);
+}
+
+static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_dts_measurement_notif_v1 *notif_v1;
+ int len = iwl_rx_packet_payload_len(pkt);
+ int temp;
+
+ /* we can use notif_v1 only, because v2 only adds an additional
+ * parameter, which is not used in this function.
+ */
+ if (WARN_ON_ONCE(len < sizeof(*notif_v1))) {
+ IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+ return -EINVAL;
+ }
+
+ notif_v1 = (void *)pkt->data;
+
+ temp = le32_to_cpu(notif_v1->temp);
+
+ /* shouldn't be negative, but since it's s32, make sure it isn't */
+ if (WARN_ON_ONCE(temp < 0))
+ temp = 0;
+
+ IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
+
+ return temp;
+}
+
+static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ int *temp = data;
+ int ret;
+
+ ret = iwl_mvm_temp_notif_parse(mvm, pkt);
+ if (ret < 0)
+ return true;
+
+ *temp = ret;
+
+ return true;
+}
+
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_dts_measurement_notif *notif_v2;
+ int len = iwl_rx_packet_payload_len(pkt);
+ int temp;
+ u32 ths_crossed;
+
+ /* the notification is handled synchronously in ctkill, so skip here */
+ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+ return;
+
+ temp = iwl_mvm_temp_notif_parse(mvm, pkt);
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ if (temp >= 0)
+ iwl_mvm_tt_temp_changed(mvm, temp);
+ return;
+ }
+
+ if (WARN_ON_ONCE(len < sizeof(*notif_v2))) {
+ IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+ return;
+ }
+
+ notif_v2 = (void *)pkt->data;
+ ths_crossed = le32_to_cpu(notif_v2->threshold_idx);
+
+ /* 0xFF in ths_crossed means the notification is not related
+ * to a trip, so we can ignore it here.
+ */
+ if (ths_crossed == 0xFF)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n",
+ temp, ths_crossed);
+
+#ifdef CONFIG_THERMAL
+ if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
+ return;
+
+ if (mvm->tz_device.tzone) {
+ struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
+
+ thermal_zone_device_update(tz_dev->tzone,
+ THERMAL_TRIP_VIOLATED);
+ }
+#endif /* CONFIG_THERMAL */
+}
+
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct ct_kill_notif *notif;
+
+ notif = (struct ct_kill_notif *)pkt->data;
+ IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
+ notif->temperature);
+ if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP,
+ CT_KILL_NOTIFICATION, 0) > 1)
+ IWL_DEBUG_TEMP(mvm,
+ "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n",
+ notif->dts, notif->scheme);
+
+ iwl_mvm_enter_ctkill(mvm);
+}
+
+/*
+ * send the DTS_MEASUREMENT_TRIGGER command with or without waiting for a
+ * response. If we get a response then the measurement is stored in 'temp'
+ */
+static int iwl_mvm_send_temp_cmd(struct iwl_mvm *mvm, bool response, s32 *temp)
+{
+ struct iwl_host_cmd cmd = {};
+ struct iwl_dts_measurement_cmd dts_cmd = {
+ .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
+ };
+ struct iwl_ext_dts_measurement_cmd ext_cmd = {
+ .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
+ };
+ struct iwl_dts_measurement_resp *resp;
+ void *cmd_ptr;
+ int ret;
+ u32 cmd_flags = 0;
+ u16 len;
+
+ /* Check which command format is used (regular/extended) */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) {
+ len = sizeof(ext_cmd);
+ cmd_ptr = &ext_cmd;
+ } else {
+ len = sizeof(dts_cmd);
+ cmd_ptr = &dts_cmd;
+ }
+ /* The command version where we get a response is zero length */
+ if (response) {
+ cmd_flags = CMD_WANT_SKB;
+ len = 0;
+ }
+
+ cmd.id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE);
+ cmd.len[0] = len;
+ cmd.flags = cmd_flags;
+ cmd.data[0] = cmd_ptr;
+
+ IWL_DEBUG_TEMP(mvm,
+ "Sending temperature measurement command - %s response\n",
+ response ? "with" : "without");
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (ret) {
+ IWL_ERR(mvm,
+ "Failed to send the temperature measurement command (err=%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (response) {
+ resp = (void *)cmd.resp_pkt->data;
+ *temp = le32_to_cpu(resp->temp);
+ IWL_DEBUG_TEMP(mvm,
+ "Got temperature measurement response: temp=%d\n",
+ *temp);
+ iwl_free_resp(&cmd);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
+{
+ struct iwl_notification_wait wait_temp_notif;
+ static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
+ DTS_MEASUREMENT_NOTIF_WIDE) };
+ int ret;
+ u8 cmd_ver;
+
+ /*
+ * If command version is 1 we send the command and immediately get
+ * a response. For older versions we send the command and wait for a
+ * notification (no command TLV for previous versions).
+ */
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1)
+ return iwl_mvm_send_temp_cmd(mvm, true, temp);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
+ temp_notif, ARRAY_SIZE(temp_notif),
+ iwl_mvm_temp_notif_wait, temp);
+
+ ret = iwl_mvm_send_temp_cmd(mvm, false, temp);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
+ IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
+ if (ret)
+ IWL_WARN(mvm, "Getting the temperature timed out\n");
+
+ return ret;
+}
+
+static void check_exit_ctkill(struct work_struct *work)
+{
+ struct iwl_mvm_tt_mgmt *tt;
+ struct iwl_mvm *mvm;
+ u32 duration;
+ s32 temp;
+ int ret;
+
+ tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
+ mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
+
+ if (iwl_mvm_is_tt_in_fw(mvm)) {
+ iwl_mvm_exit_ctkill(mvm);
+
+ return;
+ }
+
+ duration = tt->params.ct_kill_duration;
+
+ flush_work(&mvm->roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+
+ if (__iwl_mvm_mac_start(mvm))
+ goto reschedule;
+
+ ret = iwl_mvm_get_temp(mvm, &temp);
+
+ __iwl_mvm_mac_stop(mvm, false);
+
+ if (ret)
+ goto reschedule;
+
+ IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
+
+ if (temp <= tt->params.ct_kill_exit) {
+ mutex_unlock(&mvm->mutex);
+ iwl_mvm_exit_ctkill(mvm);
+ return;
+ }
+
+reschedule:
+ mutex_unlock(&mvm->mutex);
+ schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+ round_jiffies(duration * HZ));
+}
+
+static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ enum ieee80211_smps_mode smps_mode;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->thermal_throttle.dynamic_smps)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode, 0);
+}
+
+static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
+{
+ struct iwl_mvm_sta *mvmsta;
+ int i, err;
+
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
+ if (!mvmsta)
+ continue;
+
+ if (enable == mvmsta->tt_tx_protection)
+ continue;
+ err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
+ if (err) {
+ IWL_ERR(mvm, "Failed to %s Tx protection\n",
+ enable ? "enable" : "disable");
+ } else {
+ IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
+ enable ? "Enable" : "Disable");
+ mvmsta->tt_tx_protection = enable;
+ }
+ }
+}
+
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_THERMAL_MNG_BACKOFF,
+ .len = { sizeof(u32), },
+ .data = { &backoff, },
+ };
+
+ backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
+ if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
+ IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
+ backoff);
+ mvm->thermal_throttle.tx_backoff = backoff;
+ } else {
+ IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
+ }
+}
+
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
+{
+ struct iwl_tt_params *params = &mvm->thermal_throttle.params;
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+ s32 temperature = mvm->temperature;
+ bool throttle_enable = false;
+ int i;
+ u32 tx_backoff;
+
+ IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
+
+ if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
+ iwl_mvm_enter_ctkill(mvm);
+ return;
+ }
+
+ if (params->support_ct_kill &&
+ temperature <= params->ct_kill_exit) {
+ iwl_mvm_exit_ctkill(mvm);
+ return;
+ }
+
+ if (params->support_dynamic_smps) {
+ if (!tt->dynamic_smps &&
+ temperature >= params->dynamic_smps_entry) {
+ IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
+ tt->dynamic_smps = true;
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tt_smps_iterator, mvm);
+ throttle_enable = true;
+ } else if (tt->dynamic_smps &&
+ temperature <= params->dynamic_smps_exit) {
+ IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
+ tt->dynamic_smps = false;
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tt_smps_iterator, mvm);
+ }
+ }
+
+ if (params->support_tx_protection) {
+ if (temperature >= params->tx_protection_entry) {
+ iwl_mvm_tt_tx_protection(mvm, true);
+ throttle_enable = true;
+ } else if (temperature <= params->tx_protection_exit) {
+ iwl_mvm_tt_tx_protection(mvm, false);
+ }
+ }
+
+ if (params->support_tx_backoff) {
+ tx_backoff = tt->min_backoff;
+ for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
+ if (temperature < params->tx_backoff[i].temperature)
+ break;
+ tx_backoff = max(tt->min_backoff,
+ params->tx_backoff[i].backoff);
+ }
+ if (tx_backoff != tt->min_backoff)
+ throttle_enable = true;
+ if (tt->tx_backoff != tx_backoff)
+ iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
+ }
+
+ if (!tt->throttle && throttle_enable) {
+ IWL_WARN(mvm,
+ "Due to high temperature thermal throttling initiated\n");
+ tt->throttle = true;
+ } else if (tt->throttle && !tt->dynamic_smps &&
+ tt->tx_backoff == tt->min_backoff &&
+ temperature <= params->tx_protection_exit) {
+ IWL_WARN(mvm,
+ "Temperature is back to normal thermal throttling stopped\n");
+ tt->throttle = false;
+ }
+}
+
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 200},
+ {.temperature = 113, .backoff = 600},
+ {.temperature = 114, .backoff = 1200},
+ {.temperature = 115, .backoff = 2000},
+ {.temperature = 116, .backoff = 4000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
+{
+ struct iwl_ctdp_cmd cmd = {
+ .operation = cpu_to_le32(op),
+ .window_size = 0,
+ };
+ u32 budget;
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Do a linear scale from IWL_MVM_MIN_CTDP_BUDGET_MW to the configured
+ * maximum in the predefined number of steps.
+ */
+ budget = ((mvm->thermal_throttle.power_budget_mw -
+ IWL_MVM_MIN_CTDP_BUDGET_MW) *
+ (IWL_MVM_NUM_CTDP_STEPS - 1 - state)) /
+ (IWL_MVM_NUM_CTDP_STEPS - 1) +
+ IWL_MVM_MIN_CTDP_BUDGET_MW;
+ cmd.budget = cpu_to_le32(budget);
+
+ status = 0;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
+ CTDP_CONFIG_CMD),
+ sizeof(cmd), &cmd, &status);
+
+ if (ret) {
+ IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ switch (op) {
+ case CTDP_CMD_OPERATION_START:
+#ifdef CONFIG_THERMAL
+ mvm->cooling_dev.cur_state = state;
+#endif /* CONFIG_THERMAL */
+ break;
+ case CTDP_CMD_OPERATION_REPORT:
+ IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
+ /* when the function is called with CTDP_CMD_OPERATION_REPORT
+ * option the function should return the average budget value
+ * that is received from the FW.
+ * The budget can't be less or equal to 0, so it's possible
+ * to distinguish between error values and budgets.
+ */
+ return status;
+ case CTDP_CMD_OPERATION_STOP:
+ IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n");
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_THERMAL
+static int compare_temps(const void *a, const void *b)
+{
+ return ((s16)le16_to_cpu(*(const __le16 *)a) -
+ (s16)le16_to_cpu(*(const __le16 *)b));
+}
+
+struct iwl_trip_walk_data {
+ __le16 *thresholds;
+ int count;
+};
+
+static int iwl_trip_temp_cb(struct thermal_trip *trip, void *arg)
+{
+ struct iwl_trip_walk_data *twd = arg;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID)
+ return 0;
+
+ twd->thresholds[twd->count++] = cpu_to_le16((s16)(trip->temperature / 1000));
+ return 0;
+}
+#endif
+
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
+{
+ struct temp_report_ths_cmd cmd = {0};
+ int ret;
+#ifdef CONFIG_THERMAL
+ struct iwl_trip_walk_data twd = { .thresholds = cmd.thresholds, .count = 0 };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->tz_device.tzone)
+ goto send;
+
+ /*
+ * The thermal core holds an array of temperature trips that are
+ * unsorted and uncompressed, the FW should get it compressed and
+ * sorted.
+ */
+
+ /* compress trips to cmd array, remove uninitialized values*/
+ for_each_thermal_trip(mvm->tz_device.tzone, iwl_trip_temp_cb, &twd);
+
+ cmd.num_temps = cpu_to_le32(twd.count);
+ if (twd.count)
+ sort(cmd.thresholds, twd.count, sizeof(s16), compare_temps, NULL);
+
+send:
+#endif
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+ TEMP_REPORTING_THRESHOLDS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+ ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
+ int *temperature)
+{
+ struct iwl_mvm *mvm = thermal_zone_device_priv(device);
+ int ret;
+ int temp;
+
+ guard(mvm)(mvm);
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+ /*
+ * Tell the core that there is no valid temperature value to
+ * return, but it need not worry about this.
+ */
+ *temperature = THERMAL_TEMP_INVALID;
+ return 0;
+ }
+
+ ret = iwl_mvm_get_temp(mvm, &temp);
+ if (ret)
+ return ret;
+
+ *temperature = temp * 1000;
+ return 0;
+}
+
+static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
+ const struct thermal_trip *trip, int temp)
+{
+ struct iwl_mvm *mvm = thermal_zone_device_priv(device);
+
+ guard(mvm)(mvm);
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ if ((temp / 1000) > S16_MAX)
+ return -EINVAL;
+
+ return iwl_mvm_send_temp_report_ths_cmd(mvm);
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = iwl_mvm_tzone_get_temp,
+ .set_trip_temp = iwl_mvm_tzone_set_trip_temp,
+};
+
+static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
+{
+ int i, ret;
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ mvm->tz_device.tzone = NULL;
+
+ return;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ /*
+ * 0 is a valid temperature,
+ * so initialize the array with S16_MIN which invalid temperature
+ */
+ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) {
+ mvm->tz_device.trips[i].temperature = THERMAL_TEMP_INVALID;
+ mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE;
+ mvm->tz_device.trips[i].flags = THERMAL_TRIP_FLAG_RW_TEMP;
+ }
+ mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name,
+ mvm->tz_device.trips,
+ IWL_MAX_DTS_TRIPS,
+ mvm, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(mvm->tz_device.tzone)) {
+ IWL_DEBUG_TEMP(mvm,
+ "Failed to register to thermal zone (err = %ld)\n",
+ PTR_ERR(mvm->tz_device.tzone));
+ mvm->tz_device.tzone = NULL;
+ return;
+ }
+
+ ret = thermal_zone_device_enable(mvm->tz_device.tzone);
+ if (ret) {
+ IWL_DEBUG_TEMP(mvm, "Failed to enable thermal zone\n");
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ }
+}
+
+static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = IWL_MVM_NUM_CTDP_STEPS - 1;
+
+ return 0;
+}
+
+static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+
+ *state = mvm->cooling_dev.cur_state;
+
+ return 0;
+}
+
+static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+
+ guard(mvm)(mvm);
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ if (new_state >= IWL_MVM_NUM_CTDP_STEPS)
+ return -EINVAL;
+
+ return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ new_state);
+}
+
+static const struct thermal_cooling_device_ops tcooling_ops = {
+ .get_max_state = iwl_mvm_tcool_get_max_state,
+ .get_cur_state = iwl_mvm_tcool_get_cur_state,
+ .set_cur_state = iwl_mvm_tcool_set_cur_state,
+};
+
+static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
+{
+ char name[] = "iwlwifi";
+
+ if (!iwl_mvm_is_ctdp_supported(mvm))
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ mvm->cooling_dev.cdev =
+ thermal_cooling_device_register(name,
+ mvm,
+ &tcooling_ops);
+
+ if (IS_ERR(mvm->cooling_dev.cdev)) {
+ IWL_DEBUG_TEMP(mvm,
+ "Failed to register to cooling device (err = %ld)\n",
+ PTR_ERR(mvm->cooling_dev.cdev));
+ mvm->cooling_dev.cdev = NULL;
+ return;
+ }
+}
+
+static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
+{
+ if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
+ if (mvm->tz_device.tzone) {
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ mvm->tz_device.tzone = NULL;
+ }
+}
+
+static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
+{
+ if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
+ if (mvm->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+ mvm->cooling_dev.cdev = NULL;
+ }
+}
+#endif /* CONFIG_THERMAL */
+
+static u32 iwl_mvm_ctdp_get_max_budget(struct iwl_mvm *mvm)
+{
+ u64 bios_power_budget = 0;
+ u32 default_power_budget;
+
+ switch (CSR_HW_RFID_TYPE(mvm->trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_JF2:
+ case IWL_CFG_RF_TYPE_JF1:
+ default_power_budget = 2000;
+ break;
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_HR1:
+ default_power_budget = 2400;
+ break;
+ case IWL_CFG_RF_TYPE_GF:
+ /* dual-radio devices have a higher budget */
+ if (CSR_HW_RFID_IS_CDB(mvm->trans->info.hw_rf_id))
+ default_power_budget = 5200;
+ else
+ default_power_budget = 2880;
+ break;
+ case IWL_CFG_RF_TYPE_FM:
+ default_power_budget = 3450;
+ break;
+ default:
+ default_power_budget = 5550;
+ break;
+ }
+
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &bios_power_budget);
+
+ /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */
+ if (bios_power_budget &&
+ bios_power_budget != 0xffff && bios_power_budget != 0xffffffff &&
+ bios_power_budget >= IWL_MVM_MIN_CTDP_BUDGET_MW &&
+ bios_power_budget <= default_power_budget)
+ return (u32)bios_power_budget;
+
+ return default_power_budget;
+}
+
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
+{
+ struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+
+ IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
+
+ if (mvm->cfg->thermal_params)
+ tt->params = *mvm->cfg->thermal_params;
+ else
+ tt->params = iwl_mvm_default_tt_params;
+
+ tt->power_budget_mw = iwl_mvm_ctdp_get_max_budget(mvm);
+ IWL_DEBUG_TEMP(mvm, "cTDP power budget: %d mW\n", tt->power_budget_mw);
+ tt->throttle = false;
+ tt->dynamic_smps = false;
+ tt->min_backoff = min_backoff;
+ INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+ iwl_mvm_cooling_device_register(mvm);
+ iwl_mvm_thermal_zone_register(mvm);
+#endif
+ mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+}
+
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
+{
+ if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE))
+ return;
+
+ cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
+ IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
+
+#ifdef CONFIG_THERMAL
+ iwl_mvm_cooling_device_unregister(mvm);
+ iwl_mvm_thermal_zone_unregister(mvm);
+#endif
+ mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/tx.c b/sys/contrib/dev/iwlwifi/mvm/tx.c
new file mode 100644
index 000000000000..2b6052a6f90a
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/tx.c
@@ -0,0 +1,2324 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
+ */
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <net/gso.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "iwl-trans.h"
+#include "iwl-nvm-utils.h"
+#include "iwl-utils.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-sync.h"
+
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+ u16 tid, u16 ssn)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
+ if (!trig)
+ return;
+
+ ba_trig = (void *)trig->data;
+
+ if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "BAR sent to %pM, tid %d, ssn %d",
+ addr, tid, ssn);
+}
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ bool amsdu)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u16 offload_assist = 0;
+#if IS_ENABLED(CONFIG_INET)
+ u8 protocol = 0;
+
+ /* Do not compute checksum if already computed */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto out;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ struct ipv6_opt_hdr *hp;
+
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /*
+ * Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) && amsdu) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+out:
+#endif
+ /*
+ * mac header len should include IV, size is in words unless
+ * the IV is added by the firmware like in WEP.
+ * In new Tx API, the IV is always added by the firmware.
+ */
+ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
+ mh_len += info->control.hw_key->iv_len;
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+ if (amsdu)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ else if (ieee80211_hdrlen(hdr->frame_control) % 4)
+ /* padding is inserted later in transport */
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ return offload_assist;
+}
+
+/*
+ * Sets most of the Tx cmd's fields
+ */
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct ieee80211_tx_info *info, u8 sta_id)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u32 tx_flags = le32_to_cpu(tx_cmd_params->tx_flags);
+ u32 len = skb->len + FCS_LEN;
+ bool amsdu = false;
+ u8 ac;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (ieee80211_is_probe_resp(fc) &&
+ !is_multicast_ether_addr(hdr->addr1)))
+ tx_flags |= TX_CMD_FLG_ACK;
+ else
+ tx_flags &= ~TX_CMD_FLG_ACK;
+
+ if (ieee80211_is_probe_resp(fc))
+ tx_flags |= TX_CMD_FLG_TSF;
+
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd_params->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar = (void *)skb->data;
+ u16 control = le16_to_cpu(bar->control);
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
+
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+ tx_cmd_params->tid_tspec = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+ WARN_ON_ONCE(tx_cmd_params->tid_tspec >= IWL_MAX_TID_COUNT);
+ iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd_params->tid_tspec,
+ ssn);
+ } else {
+ if (ieee80211_is_data(fc))
+ tx_cmd_params->tid_tspec = IWL_TID_NON_QOS;
+ else
+ tx_cmd_params->tid_tspec = IWL_MAX_TID_COUNT;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_flags |= TX_CMD_FLG_SEQ_CTL;
+ else
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ }
+
+ /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
+ if (tx_cmd_params->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd_params->tid_tspec];
+ else
+ ac = tid_to_mac80211_ac[0];
+
+ tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
+ TX_CMD_FLG_BT_PRIO_POS;
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+ else if (ieee80211_is_action(fc))
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+ else
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+
+ /* The spec allows Action frames in A-MPDU, we don't support
+ * it
+ */
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+ } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+ } else {
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+ }
+
+ if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
+ !is_multicast_ether_addr(hdr->addr1))
+ tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+ ieee80211_action_contains_tpc(skb))
+ tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
+
+ tx_cmd_params->tx_flags = cpu_to_le32(tx_flags);
+ /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
+ tx_cmd_params->len = cpu_to_le16((u16)skb->len);
+ tx_cmd_params->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_cmd_params->sta_id = sta_id;
+
+ tx_cmd_params->offload_assist =
+ cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu));
+}
+
+static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ if (info->band == NL80211_BAND_2GHZ &&
+ !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+ return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+
+ if (sta && ieee80211_is_data(fc)) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
+ }
+
+ return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+}
+
+static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ int rate_idx)
+{
+ u32 rate_flags = 0;
+ u8 rate_plcp;
+ bool is_cck;
+
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+ rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm,
+ info,
+ info->control.vif);
+
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
+ is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) &&
+ (rate_idx <= IWL_LAST_CCK_RATE);
+
+ /* Set CCK or OFDM flag */
+ if (!is_cck)
+ rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
+ else
+ rate_flags |= RATE_MCS_MOD_TYPE_CCK;
+
+ return (u32)rate_plcp | rate_flags;
+}
+
+static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
+{
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ u32 result;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+ u8 nss = ieee80211_rate_get_vht_nss(rate);
+
+ result = RATE_MCS_MOD_TYPE_VHT;
+ result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
+ result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= RATE_MCS_SGI_MSK;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= RATE_MCS_CHAN_WIDTH_40;
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ result |= RATE_MCS_CHAN_WIDTH_80;
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ result |= RATE_MCS_CHAN_WIDTH_160;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ result = RATE_MCS_MOD_TYPE_HT;
+ result |= u32_encode_bits(rate->idx & 0x7,
+ RATE_HT_MCS_CODE_MSK);
+ result |= u32_encode_bits(rate->idx >> 3,
+ RATE_MCS_NSS_MSK);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= RATE_MCS_SGI_MSK;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= RATE_MCS_CHAN_WIDTH_40;
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ result |= RATE_MCS_LDPC_MSK;
+ if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
+ result |= RATE_MCS_STBC_MSK;
+ } else {
+ int rate_idx = info->control.rates[0].idx;
+
+ result = iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
+ }
+
+ if (info->control.antennas)
+ result |= u32_encode_bits(info->control.antennas,
+ RATE_MCS_ANT_AB_MSK);
+ else
+ result |= iwl_mvm_get_tx_ant(mvm, info, sta, fc);
+
+ return result;
+}
+
+static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ int rate_idx = -1;
+
+ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+ /* info->control is only relevant for non HW rate control */
+
+ /* HT rate doesn't make sense for a non data frame */
+ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_data(fc),
+ "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
+ info->control.rates[0].flags,
+ info->control.rates[0].idx,
+ le16_to_cpu(fc),
+ sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
+
+ rate_idx = info->control.rates[0].idx;
+
+ /* For non 2 GHZ band, remap mac80211 rate indices into driver
+ * indices.
+ */
+ if (info->band != NL80211_BAND_2GHZ ||
+ (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ rate_idx += IWL_FIRST_OFDM_RATE;
+
+ /* For 2.4 GHZ band, check that there is no need to remap */
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+ }
+
+ return iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
+}
+
+static __le32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ u32 rate;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ rate = iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
+ else
+ rate = iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
+ iwl_mvm_get_tx_ant(mvm, info, sta, fc);
+
+ return iwl_mvm_v3_rate_to_fw(rate, mvm->fw_rates_ver);
+}
+
+/*
+ * Sets the fields in the Tx cmd that are rate related
+ */
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ /* Set retry limit on RTS packets */
+ tx_cmd_params->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc)) {
+ tx_cmd_params->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+ tx_cmd_params->rts_retry_limit =
+ min(tx_cmd_params->data_retry_limit, tx_cmd_params->rts_retry_limit);
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd_params->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+ } else {
+ tx_cmd_params->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ }
+
+ /*
+ * for data packets, rate info comes from the table inside the fw. This
+ * table is controlled by LINK_QUALITY commands
+ */
+
+ if (likely(ieee80211_is_data(fc) && sta &&
+ !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
+ tx_cmd_params->initial_rate_index = 0;
+ tx_cmd_params->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+ }
+ } else if (ieee80211_is_back_req(fc)) {
+ tx_cmd_params->tx_flags |=
+ cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
+ }
+
+ /* Set the rate in the TX cmd */
+ tx_cmd_params->rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc);
+}
+
+static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
+ u8 *crypto_hdr)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u64 pn;
+
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[2] = 0;
+ crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct sk_buff *skb_frag,
+ int hdrlen)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u8 *crypto_hdr = skb_frag->data + hdrlen;
+ enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
+ u64 pn;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd_params);
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd_params->sec_ctl = TX_CMD_SEC_TKIP;
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd_params->key);
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd_params->sec_ctl |= TX_CMD_SEC_KEY128;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd_params->sec_ctl |= TX_CMD_SEC_WEP |
+ ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
+ TX_CMD_SEC_WEP_KEY_IDX_MSK);
+
+ memcpy(&tx_cmd_params->key[3], keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ type = TX_CMD_SEC_GCMP;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ /* TODO: Taking the key from the table might introduce a race
+ * when PTK rekeying is done, having an old packets with a PN
+ * based on the old key but the message encrypted with a new
+ * one.
+ * Need to handle this.
+ */
+ tx_cmd_params->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd_params->key[0] = keyconf->hw_key_idx;
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+ break;
+ default:
+ tx_cmd_params->sec_ctl |= TX_CMD_SEC_EXT;
+ }
+}
+
+static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
+{
+ if (unlikely(!mvmsta))
+ return true;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return true;
+
+ if (likely(ieee80211_is_data(hdr->frame_control) &&
+ mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
+ return false;
+
+ /*
+ * Not a data frame, use host rate if on an old device that
+ * can't possibly be doing MLO (firmware may be selecting a
+ * bad rate), if we might be doing MLO we need to let FW pick
+ * (since we don't necesarily know the link), but FW rate
+ * selection was fixed.
+ */
+ return mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+}
+
+static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *out_hdr = cmd;
+
+ memcpy(cmd, hdr, hdrlen);
+ if (addr3_override)
+ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
+}
+
+/*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+static struct iwl_device_tx_cmd *
+iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info, int hdrlen,
+ struct ieee80211_sta *sta, u8 sta_id,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_device_tx_cmd *dev_cmd;
+ struct iwl_tx_cmd_v6 *tx_cmd;
+
+ dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
+
+ if (unlikely(!dev_cmd))
+ return NULL;
+
+ dev_cmd->hdr.cmd = TX_CMD;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ __le32 rate_n_flags = 0;
+ u16 flags = 0;
+ struct iwl_mvm_sta *mvmsta = sta ?
+ iwl_mvm_sta_from_mac80211(sta) : NULL;
+ bool amsdu = false;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /*
+ * For data and mgmt packets rate info comes from the fw (for
+ * new devices, older FW is somewhat broken for this). Only
+ * set rate/antenna for injected frames with fixed rate, or
+ * when no sta is given, or with older firmware.
+ */
+ if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags =
+ iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ hdr->frame_control);
+ } else if (!ieee80211_is_data(hdr->frame_control) ||
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
+ /* These are important frames */
+ flags |= IWL_TX_FLAGS_HIGH_PRI;
+ }
+
+ if (mvm->trans->mac_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd *cmd = (void *)dev_cmd->payload;
+ u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
+ info, amsdu);
+
+ cmd->offload_assist = cpu_to_le32(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
+
+ cmd->flags = cpu_to_le16(flags);
+ cmd->rate_n_flags = rate_n_flags;
+ } else {
+ struct iwl_tx_cmd_v9 *cmd = (void *)dev_cmd->payload;
+ u16 offload_assist = iwl_mvm_tx_csum(mvm, skb,
+ info, amsdu);
+
+ cmd->offload_assist = cpu_to_le16(offload_assist);
+
+ /* Total # bytes to be transmitted */
+ cmd->len = cpu_to_le16((u16)skb->len);
+
+ /* Copy MAC header from skb into command buffer */
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
+
+ cmd->flags = cpu_to_le32(flags);
+ cmd->rate_n_flags = rate_n_flags;
+ }
+ goto out;
+ }
+
+ tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
+
+ if (info->control.hw_key)
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, &tx_cmd->params, skb, hdrlen);
+
+ iwl_mvm_set_tx_cmd(mvm, skb, &tx_cmd->params, info, sta_id);
+
+ iwl_mvm_set_tx_cmd_rate(mvm, &tx_cmd->params, info, sta, hdr->frame_control);
+
+ /* Copy MAC header from skb into command buffer */
+ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
+
+out:
+ return dev_cmd;
+}
+
+static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
+ struct iwl_device_tx_cmd *cmd)
+{
+ struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
+ memset(&skb_info->status, 0, sizeof(skb_info->status));
+ memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
+
+ skb_info->driver_data[1] = cmd;
+}
+
+static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif_link_info *link,
+ struct ieee80211_tx_info *info,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ /*
+ * Non-bufferable frames use the broadcast station, thus they
+ * use the probe queue.
+ * Also take care of the case where we send a deauth to a
+ * station that we don't have, or similarly an association
+ * response (with non-success status) for a station we can't
+ * accept.
+ * Also, disassociate frames might happen, particular with
+ * reason 7 ("Class 3 frame received from nonassociated STA").
+ */
+ if (ieee80211_is_mgmt(fc) &&
+ (!ieee80211_is_bufferable_mmpdu(skb) ||
+ ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
+ return link->mgmt_queue;
+
+ if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
+ is_multicast_ether_addr(hdr->addr1))
+ return link->cab_queue;
+
+ WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+ "fc=0x%02x", le16_to_cpu(fc));
+ return link->mgmt_queue;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ if (ieee80211_is_mgmt(fc))
+ return mvm->p2p_dev_queue;
+
+ WARN_ON_ONCE(1);
+ return mvm->p2p_dev_queue;
+ default:
+ WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
+ return -1;
+ }
+}
+
+static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
+ struct iwl_probe_resp_data *resp_data;
+ const u8 *ie;
+ u8 *pos;
+ u8 match[] = {
+ (WLAN_OUI_WFA >> 16) & 0xff,
+ (WLAN_OUI_WFA >> 8) & 0xff,
+ WLAN_OUI_WFA & 0xff,
+ WLAN_OUI_TYPE_WFA_P2P,
+ };
+
+ rcu_read_lock();
+
+ resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data);
+ if (!resp_data)
+ goto out;
+
+ if (!resp_data->notif.noa_active)
+ goto out;
+
+ ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
+ mgmt->u.probe_resp.variable,
+ skb->len - base_len,
+ match, 4, 2);
+ if (!ie) {
+ IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
+ goto out;
+ }
+
+ if (skb_tailroom(skb) < resp_data->noa_len) {
+ if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
+ IWL_ERR(mvm,
+ "Failed to reallocate probe resp\n");
+ goto out;
+ }
+ }
+
+ pos = skb_put(skb, resp_data->noa_len);
+
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ /* Set length of IE body (not including ID and length itself) */
+ *pos++ = resp_data->noa_len - 2;
+ *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
+ *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
+ *pos++ = WLAN_OUI_WFA & 0xff;
+ *pos++ = WLAN_OUI_TYPE_WFA_P2P;
+
+ memcpy(pos, &resp_data->notif.noa_attr,
+ resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
+
+out:
+ rcu_read_unlock();
+}
+
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info info;
+ struct iwl_device_tx_cmd *dev_cmd;
+ u8 sta_id;
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ __le16 fc = hdr->frame_control;
+ bool offchannel = IEEE80211_SKB_CB(skb)->flags &
+ IEEE80211_TX_CTL_TX_OFFCHAN;
+ int queue = -1;
+
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
+ return -1;
+
+ memcpy(&info, skb->cb, sizeof(info));
+
+ if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
+ return -1;
+
+ if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
+ return -1;
+
+ if (info.control.vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info.control.vif);
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+
+ if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+ p2p_aux) ||
+ (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ offchannel)) {
+ /*
+ * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
+ * that can be used in 2 different types of vifs, P2P
+ * Device and STATION.
+ * P2P Device uses the offchannel queue.
+ * STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue.
+ * If P2P_DEV_OVER_AUX is supported (p2p_aux = true)
+ * also P2P Device uses the aux queue.
+ */
+ sta_id = mvm->aux_sta.sta_id;
+ queue = mvm->aux_queue;
+ if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE))
+ return -1;
+ } else if (info.control.vif->type ==
+ NL80211_IFTYPE_P2P_DEVICE ||
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
+ u32 link_id = u32_get_bits(info.control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+ struct iwl_mvm_vif_link_info *link;
+
+ if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+ if (info.control.vif->active_links)
+ link_id = ffs(info.control.vif->active_links) - 1;
+ else
+ link_id = 0;
+ }
+
+ link = mvmvif->link[link_id];
+ if (WARN_ON(!link))
+ return -1;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ sta_id = link->bcast_sta.sta_id;
+ else
+ sta_id = link->mcast_sta.sta_id;
+
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info,
+ skb);
+ } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
+ queue = mvm->snif_queue;
+ sta_id = mvm->snif_sta.sta_id;
+ }
+ }
+
+ if (queue < 0) {
+ IWL_ERR(mvm, "No queue was found. Dropping TX\n");
+ return -1;
+ }
+
+ if (unlikely(ieee80211_is_probe_resp(fc)))
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
+
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
+ NULL);
+ if (!dev_cmd)
+ return -1;
+
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ return -1;
+ }
+
+ return 0;
+}
+
+unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, unsigned int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u8 ac = tid_to_mac80211_ac[tid];
+ enum nl80211_band band;
+ unsigned int txf;
+ unsigned int val;
+ int lmac;
+
+ /* For HE redirect to trigger based fifos */
+ if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
+ ac += 4;
+
+ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+
+ /*
+ * Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+ val = mvmsta->max_amsdu_len;
+
+ if (hweight16(sta->valid_links) <= 1) {
+ if (sta->valid_links) {
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link = ffs(sta->valid_links) - 1;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
+ if (WARN_ON(!link_conf))
+ band = NL80211_BAND_2GHZ;
+ else
+ band = link_conf->chanreq.oper.chan->band;
+ rcu_read_unlock();
+ } else {
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
+ }
+
+ lmac = iwl_mvm_get_lmac_id(mvm, band);
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
+ /* for real MLO restrict to both LMACs if they exist */
+ lmac = IWL_LMAC_5G_INDEX;
+ val = min_t(unsigned int, val,
+ mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+ lmac = IWL_LMAC_24G_INDEX;
+ } else {
+ lmac = IWL_LMAC_24G_INDEX;
+ }
+
+ return min_t(unsigned int, val,
+ mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
+#ifdef CONFIG_INET
+
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skb)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
+ u16 snap_ip_tcp, pad;
+ netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
+ u8 tid;
+
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
+
+ if (!mvmsta->max_amsdu_len ||
+ !ieee80211_is_data_qos(hdr->frame_control) ||
+ !mvmsta->amsdu_enabled)
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+ /*
+ * Do not build AMSDU for IPv6 with extension headers.
+ * ask stack to segment and checkum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ netdev_flags &= ~NETIF_F_CSUM_MASK;
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+ }
+
+ tid = ieee80211_get_tid(hdr);
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ /*
+ * No need to lock amsdu_in_ampdu_allowed since it can't be modified
+ * during an BA session.
+ */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
+ !(mvmsta->amsdu_enabled & BIT(tid)))
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+ /*
+ * Take the min of ieee80211 station and mvm station
+ */
+ max_amsdu_len =
+ min_t(unsigned int, sta->cur->max_amsdu_len,
+ iwl_mvm_max_amsdu_size(mvm, sta, tid));
+
+ /*
+ * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
+ * supported. This is a spec requirement (IEEE 802.11-2015
+ * section 8.7.3 NOTE 3).
+ */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !sta->deflink.vht_cap.vht_supported)
+ max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
+
+ /* Sub frame header + SNAP + IP header + TCP header + MSS */
+ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+ pad = (4 - subf_len) & 0x3;
+
+ /*
+ * If we have N subframes in the A-MSDU, then the A-MSDU's size is
+ * N * subf_len + (N - 1) * pad.
+ */
+ num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
+
+ if (sta->max_amsdu_subframes &&
+ num_subframes > sta->max_amsdu_subframes)
+ num_subframes = sta->max_amsdu_subframes;
+
+ tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ /*
+ * Make sure we have enough TBs for the A-MSDU:
+ * 2 for each subframe
+ * 1 more for each fragment
+ * 1 more for the potential data in the header
+ */
+ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
+ mvm->trans->info.max_skb_frags)
+ num_subframes = 1;
+
+ if (num_subframes > 1)
+ *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* This skb fits in one single A-MSDU */
+ if (num_subframes * mss >= tcp_payload_len) {
+ __skb_queue_tail(mpdus_skb, skb);
+ return 0;
+ }
+
+ /*
+ * Trick the segmentation function to make it
+ * create SKBs that can fit into one A-MSDU.
+ */
+ return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb);
+}
+#else /* CONFIG_INET */
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skb)
+{
+ /* Impossible to get TSO with CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif
+
+/* Check if there are any timed-out TIDs on a given shared TXQ */
+static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
+{
+ unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
+ unsigned long now = jiffies;
+ int tid;
+
+ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+ return false;
+
+ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ return true;
+ }
+
+ return false;
+}
+
+static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ int airtime)
+{
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata;
+
+ if (mac >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ mdata = &mvm->tcm.data[mac];
+
+ if (mvm->tcm.paused)
+ return;
+
+ if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+ schedule_delayed_work(&mvm->tcm.work, 0);
+
+ mdata->tx.airtime += airtime;
+}
+
+static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int tid)
+{
+ u32 ac = tid_to_mac80211_ac[tid];
+ int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+ struct iwl_mvm_tcm_mac *mdata;
+
+ if (mac >= NUM_MAC_INDEX_DRIVER)
+ return -EINVAL;
+
+ mdata = &mvm->tcm.data[mac];
+
+ mdata->tx.pkts[ac]++;
+
+ return 0;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related.
+ *
+ * This function must be called with BHs disabled.
+ */
+static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_device_tx_cmd *dev_cmd;
+ __le16 fc;
+ u16 seq_number = 0;
+ u8 tid = IWL_MAX_TID_COUNT;
+ u16 txq_id;
+ bool is_ampdu = false;
+ int hdrlen;
+
+ if (WARN_ON_ONCE(!sta))
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ fc = hdr->frame_control;
+ hdrlen = ieee80211_hdrlen(fc);
+
+ if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
+ return -1;
+
+ if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA))
+ return -1;
+
+ if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
+ return -1;
+
+ if (unlikely(ieee80211_is_probe_resp(fc)))
+ iwl_mvm_probe_resp_set_noa(mvm, skb);
+
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+ sta, mvmsta->deflink.sta_id,
+ addr3_override);
+ if (!dev_cmd)
+ goto drop;
+
+ /*
+ * we handle that entirely ourselves -- for uAPSD the firmware
+ * will always send a notification, and for PS-Poll responses
+ * we'll notify mac80211 when getting frame status
+ */
+ info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
+ spin_lock(&mvmsta->lock);
+
+ /* nullfunc frames should go to the MGMT queue regardless of QOS,
+ * the conditions of !ieee80211_is_qos_nullfunc(fc) and
+ * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID
+ */
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ tid = ieee80211_get_tid(hdr);
+ if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
+ goto drop_unlock_sta;
+
+ is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ if (WARN_ONCE(is_ampdu &&
+ mvmsta->tid_data[tid].state != IWL_AGG_ON,
+ "Invalid internal agg state %d for TID %d",
+ mvmsta->tid_data[tid].state, tid))
+ goto drop_unlock_sta;
+
+ seq_number = mvmsta->tid_data[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
+
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ /* update the tx_cmd hdr as it was already copied */
+ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
+ }
+ } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) &&
+ !ieee80211_is_nullfunc(fc)) {
+ tid = IWL_TID_NON_QOS;
+ }
+
+ txq_id = mvmsta->tid_data[tid].txq_id;
+
+ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+ if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
+#if defined(__FreeBSD__)
+ printf("%s:%d: fc %#06x tid %u txq_id %u mvm %p "
+ "skb %p { len %u } info %p sta %p (see PR 274382)\n",
+ __func__, __LINE__,
+ fc, tid, txq_id, mvm, skb, skb->len, info, sta);
+#endif
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return -1;
+ }
+
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ /* Keep track of the time of the last frame for this RA/TID */
+ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
+ /*
+ * If we have timed-out TIDs - schedule the worker that will
+ * reconfig the queues and update them
+ *
+ * Note that the no lock is taken here in order to not serialize
+ * the TX flow. This isn't dangerous because scheduling
+ * mvm->add_stream_wk can't ruin the state, and if we DON'T
+ * schedule it due to some race condition then next TX we get
+ * here we will.
+ */
+ if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED &&
+ iwl_mvm_txq_should_update(mvm, txq_id)))
+ schedule_work(&mvm->add_stream_wk);
+ }
+
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
+ mvmsta->deflink.sta_id, tid, txq_id,
+ IEEE80211_SEQ_TO_SN(seq_number), skb->len);
+
+ /* From now on, we cannot access info->control */
+ iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
+ /*
+ * The IV is introduced by the HW for new tx api, and it is not present
+ * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
+ * IV for those devices.
+ */
+ if (ieee80211_is_data(fc))
+ iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
+ info->control.hw_key &&
+ !iwl_mvm_has_new_tx_api(mvm) ?
+ info->control.hw_key->iv_len : 0);
+
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+ goto drop_unlock_sta;
+
+ if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
+ mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
+
+ spin_unlock(&mvmsta->lock);
+
+ if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
+ tid == IWL_MAX_TID_COUNT ? 0 : tid))
+ goto drop;
+
+ return 0;
+
+drop_unlock_sta:
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+drop:
+ IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id,
+ tid);
+ return -1;
+}
+
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_tx_info info;
+ struct sk_buff_head mpdus_skbs;
+ struct ieee80211_vif *vif;
+ unsigned int payload_len;
+ int ret;
+ struct sk_buff *orig_skb = skb;
+ const u8 *addr3;
+
+ if (WARN_ON_ONCE(!sta))
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA))
+ return -1;
+
+ memcpy(&info, skb->cb, sizeof(info));
+
+ if (!skb_is_gso(skb))
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
+
+ payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ if (payload_len <= skb_shinfo(skb)->gso_size)
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
+
+ __skb_queue_head_init(&mpdus_skbs);
+
+ vif = info.control.vif;
+ if (!vif)
+ return -1;
+
+ ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
+ if (ret)
+ return ret;
+
+ WARN_ON(skb_queue_empty(&mpdus_skbs));
+
+ /*
+ * As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
+ * in the command header. We need to preserve the original
+ * address 3 in the skb header to correctly create all the
+ * A-MSDU subframe headers from it.
+ */
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ addr3 = vif->cfg.ap_addr;
+ break;
+ case NL80211_IFTYPE_AP:
+ addr3 = vif->addr;
+ break;
+ default:
+ addr3 = NULL;
+ break;
+ }
+
+ while (!skb_queue_empty(&mpdus_skbs)) {
+ struct ieee80211_hdr *hdr;
+ bool amsdu;
+
+ skb = __skb_dequeue(&mpdus_skbs);
+ hdr = (void *)skb->data;
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
+ amsdu ? addr3 : NULL);
+ if (ret) {
+ /* Free skbs created as part of TSO logic that have not yet been dequeued */
+ __skb_queue_purge(&mpdus_skbs);
+ /* skb here is not necessarily same as skb that entered this method,
+ * so free it explicitly.
+ */
+ if (skb == orig_skb)
+ ieee80211_free_txskb(mvm->hw, skb);
+ else
+ kfree_skb(skb);
+ /* there was error, but we consumed skb one way or another, so return 0 */
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct ieee80211_vif *vif = mvmsta->vif;
+ u16 normalized_ssn;
+
+ lockdep_assert_held(&mvmsta->lock);
+
+ if (tid_data->state == IWL_AGG_ON &&
+ iwl_mvm_tid_queued(mvm, tid_data) == 0) {
+ /*
+ * Now that this aggregation or DQA queue is empty tell
+ * mac80211 so it knows we no longer have frames buffered for
+ * the station on this TID (for the TIM bitmap calculation.)
+ */
+ ieee80211_sta_set_buffered(sta, tid, false);
+ }
+
+ /*
+ * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+ * to align the wrap around of ssn so we compare relevant values.
+ */
+ normalized_ssn = tid_data->ssn;
+ if (mvm->trans->mac_cfg->gen2)
+ normalized_ssn &= 0xff;
+
+ if (normalized_ssn != tid_data->next_reclaimed)
+ return;
+
+ switch (tid_data->state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can continue addBA flow ssn = next_recl = %d\n",
+ tid_data->next_reclaimed);
+ tid_data->state = IWL_AGG_STARTING;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(BT_PRIO);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(SMALL_CF_POLL);
+ TX_STATUS_FAIL(FW_DROP);
+ TX_STATUS_FAIL(STA_COLOR_MISMATCH);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static int iwl_mvm_get_hwrate_chan_width(u32 chan_width)
+{
+ switch (chan_width) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ return 0;
+ case RATE_MCS_CHAN_WIDTH_40:
+ return IEEE80211_TX_RC_40_MHZ_WIDTH;
+ case RATE_MCS_CHAN_WIDTH_80:
+ return IEEE80211_TX_RC_80_MHZ_WIDTH;
+ case RATE_MCS_CHAN_WIDTH_160:
+ return IEEE80211_TX_RC_160_MHZ_WIDTH;
+ default:
+ return 0;
+ }
+}
+
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r)
+{
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 rate = format == RATE_MCS_MOD_TYPE_HT ?
+ RATE_HT_MCS_INDEX(rate_n_flags) :
+ rate_n_flags & RATE_MCS_CODE_MSK;
+
+ r->flags |=
+ iwl_mvm_get_hwrate_chan_width(rate_n_flags &
+ RATE_MCS_CHAN_WIDTH_MSK);
+
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_HT:
+ r->flags |= IEEE80211_TX_RC_MCS;
+ r->idx = rate;
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
+ ieee80211_rate_set_vht(r, rate,
+ FIELD_GET(RATE_MCS_NSS_MSK,
+ rate_n_flags) + 1);
+ r->flags |= IEEE80211_TX_RC_VHT_MCS;
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
+ /* mac80211 cannot do this without ieee80211_tx_status_ext()
+ * but it only matters for radiotap */
+ r->idx = 0;
+ break;
+ default:
+ r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ band);
+ }
+}
+
+/*
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(struct iwl_mvm *mvm,
+ __le32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+ u32 rate;
+
+ /*
+ * Technically this conversion is incorrect for BA status, however:
+ * - we only use the BA notif data for older firmware that have
+ * host rate scaling and don't use newer rate formats
+ * - the firmware API changed together for BA notif and TX CMD
+ * as well
+ */
+ rate = iwl_mvm_v3_rate_from_fw(rate_n_flags, mvm->fw_rates_ver);
+
+ info->status.antenna =
+ ((rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate, info->band, r);
+}
+
+static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+ u32 status, __le16 frame_control)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_tx_status *status_trig;
+ int i;
+
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
+ enum iwl_fw_ini_time_point tp =
+ IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+ if (ieee80211_is_action(frame_control))
+ tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ tp, NULL);
+ return;
+ }
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
+ FW_DBG_TRIGGER_TX_STATUS);
+ if (!trig)
+ return;
+
+ status_trig = (void *)trig->data;
+
+ for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
+ /* don't collect on status 0 */
+ if (!status_trig->statuses[i].status)
+ break;
+
+ if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
+ continue;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Tx status %d was received",
+ status & TX_STATUS_MSK);
+ break;
+ }
+}
+
+/*
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+ struct iwl_tx_resp *tx_resp)
+{
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
+}
+
+static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct ieee80211_sta *sta;
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ /* struct iwl_tx_resp_v3 is almost the same */
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
+ struct agg_tx_status *agg_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
+ u32 status = le16_to_cpu(agg_status->status);
+ u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
+ struct sk_buff_head skbs;
+ u8 skb_freed = 0;
+ u8 lq_color;
+ u16 next_reclaimed, seq_ctl;
+ bool is_ndp = false;
+
+ __skb_queue_head_init(&skbs);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ txq_id = le16_to_cpu(tx_resp->tx_queue);
+
+ seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ bool flushed = false;
+
+ skb_freed++;
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
+
+ /* inform mac80211 about what happened with the frame */
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+ case TX_STATUS_FAIL_FIFO_FLUSHED:
+ case TX_STATUS_FAIL_DRAIN_FLOW:
+ flushed = true;
+ break;
+ case TX_STATUS_FAIL_DEST_PS:
+ /* the FW should have stopped the queue and not
+ * return this status
+ */
+ IWL_ERR_LIMIT(mvm,
+ "FW reported TX filtered, status=0x%x, FC=0x%x\n",
+ status, le16_to_cpu(hdr->frame_control));
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ break;
+ default:
+ break;
+ }
+
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
+ ieee80211_is_mgmt(hdr->frame_control))
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+
+ /*
+ * If we are freeing multiple frames, mark all the frames
+ * but the first one as acked, since they were acknowledged
+ * before
+ * */
+ if (skb_freed > 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
+
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+
+ iwl_mvm_hwrate_to_tx_status(mvm, tx_resp->initial_rate, info);
+
+ /* Don't assign the converted initial_rate, because driver
+ * TLC uses this and doesn't support the new FW rate
+ */
+ info->status.status_driver_data[1] =
+ (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
+
+ /* Single frame failure in an AMPDU queue => send BAR */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
+ if (ieee80211_is_back_req(hdr->frame_control))
+ seq_ctl = 0;
+ else if (status != TX_STATUS_SUCCESS)
+ seq_ctl = le16_to_cpu(hdr->seq_ctrl);
+
+ if (unlikely(!seq_ctl)) {
+ /*
+ * If it is an NDP, we can't update next_reclaim since
+ * its sequence control is 0. Note that for that same
+ * reason, NDPs are never sent to A-MPDU'able queues
+ * so that we can never have more than one freed frame
+ * for a single Tx resonse (see WARN_ON below).
+ */
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ is_ndp = true;
+ }
+
+ /*
+ * TODO: this is not accurate if we are freeing more than one
+ * packet.
+ */
+ info->status.tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+ info->status.status_driver_data[0] =
+ RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
+
+ if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1)))
+ ieee80211_tx_status_skb(mvm->hw, skb);
+ }
+
+ /* This is an aggregation queue or might become one, so we use
+ * the ssn since: ssn = wifi seq_num % 256.
+ * The seq_ctl is the sequence control of the packet to which
+ * this Tx response relates. But if there is a hole in the
+ * bitmap of the BA we received, this Tx response may allow to
+ * reclaim the hole and all the subsequent packets that were
+ * already acked. In that case, seq_ctl != ssn, and the next
+ * packet to be reclaimed will be ssn and not seq_ctl. In that
+ * case, several packets will be reclaimed even if
+ * frame_count = 1.
+ *
+ * The ssn is the index (% 256) of the latest packet that has
+ * treated (acked / dropped) + 1.
+ */
+ next_reclaimed = ssn;
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TXQ %d status %s (0x%08x)\n",
+ txq_id, iwl_mvm_get_tx_fail_reason(status), status);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
+ le32_to_cpu(tx_resp->initial_rate),
+ tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
+ ssn, next_reclaimed, seq_ctl);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
+
+ if (!IS_ERR(sta)) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
+
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
+ iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
+
+ if (sta->wme && tid != IWL_MGMT_TID) {
+ struct iwl_mvm_tid_data *tid_data =
+ &mvmsta->tid_data[tid];
+ bool send_eosp_ndp = false;
+
+ spin_lock_bh(&mvmsta->lock);
+
+ if (!is_ndp) {
+ tid_data->next_reclaimed = next_reclaimed;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ if (tid < IWL_MAX_TID_COUNT)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
+ true, 0);
+ } else {
+ IWL_DEBUG_TX_REPLY(mvm,
+ "NDP - don't update next_reclaimed\n");
+ }
+
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+ if (mvmsta->sleep_tx_count) {
+ mvmsta->sleep_tx_count--;
+ if (mvmsta->sleep_tx_count &&
+ !iwl_mvm_tid_queued(mvm, tid_data)) {
+ /*
+ * The number of frames in the queue
+ * dropped to 0 even if we sent less
+ * frames than we thought we had on the
+ * Tx queue.
+ * This means we had holes in the BA
+ * window that we just filled, ask
+ * mac80211 to send EOSP since the
+ * firmware won't know how to do that.
+ * Send NDP and the firmware will send
+ * EOSP notification that will trigger
+ * a call to ieee80211_sta_eosp().
+ */
+ send_eosp_ndp = true;
+ }
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+ if (send_eosp_ndp) {
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
+ IEEE80211_FRAME_RELEASE_UAPSD,
+ 1, tid, false, false);
+ mvmsta->sleep_tx_count = 0;
+ ieee80211_send_eosp_nullfunc(sta, tid);
+ }
+ }
+
+ if (mvmsta->next_status_eosp) {
+ mvmsta->next_status_eosp = false;
+ ieee80211_sta_eosp(sta);
+ }
+ }
+out:
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
+static const char *iwl_get_agg_tx_status(u16 status)
+{
+ switch (status & AGG_TX_STATE_STATUS_MSK) {
+ AGG_TX_STATE_(TRANSMITTED);
+ AGG_TX_STATE_(UNDERRUN);
+ AGG_TX_STATE_(BT_PRIO);
+ AGG_TX_STATE_(FEW_BYTES);
+ AGG_TX_STATE_(ABORT);
+ AGG_TX_STATE_(TX_ON_AIR_DROP);
+ AGG_TX_STATE_(LAST_SENT_TRY_CNT);
+ AGG_TX_STATE_(LAST_SENT_BT_KILL);
+ AGG_TX_STATE_(SCD_QUERY);
+ AGG_TX_STATE_(TEST_BAD_CRC32);
+ AGG_TX_STATE_(RESPONSE);
+ AGG_TX_STATE_(DUMP_TX);
+ AGG_TX_STATE_(DELAY_TX);
+ }
+
+ return "UNKNOWN";
+}
+
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+ struct agg_tx_status *frame_status =
+ iwl_mvm_get_agg_status(mvm, tx_resp);
+ int i;
+ bool tirgger_timepoint = false;
+
+ for (i = 0; i < tx_resp->frame_count; i++) {
+ u16 fstatus = le16_to_cpu(frame_status[i].status);
+ /* In case one frame wasn't transmitted trigger time point */
+ tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
+ AGG_TX_STATE_TRANSMITTED);
+ IWL_DEBUG_TX_REPLY(mvm,
+ "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
+ iwl_get_agg_tx_status(fstatus),
+ fstatus & AGG_TX_STATE_STATUS_MSK,
+ (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
+ AGG_TX_STATE_TRY_CNT_POS,
+ le16_to_cpu(frame_status[i].sequence));
+ }
+
+ if (tirgger_timepoint)
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
+
+}
+#else
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ struct iwl_mvm_sta *mvmsta;
+ int queue = SEQ_TO_QUEUE(sequence);
+ struct ieee80211_sta *sta;
+
+ if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
+ return;
+
+ iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (!WARN_ON_ONCE(!mvmsta)) {
+ mvmsta->tid_data[tid].rate_n_flags =
+ tx_resp->initial_rate;
+ mvmsta->tid_data[tid].tx_time =
+ le16_to_cpu(tx_resp->wireless_media_time);
+ mvmsta->tid_data[tid].lq_color =
+ TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le16_to_cpu(tx_resp->wireless_media_time));
+ }
+
+ rcu_read_unlock();
+}
+
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+
+ if (tx_resp->frame_count == 1)
+ iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+ else
+ iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+}
+
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+ int txq, int index,
+ struct ieee80211_tx_info *tx_info, __le32 rate,
+ bool is_flush)
+{
+ struct sk_buff_head reclaimed_skbs;
+ struct iwl_mvm_tid_data *tid_data = NULL;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta = NULL;
+ struct sk_buff *skb;
+ int freed;
+
+ if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
+ tid > IWL_MAX_TID_COUNT,
+ "sta_id %d tid %d", sta_id, tid))
+ return;
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /* Reclaiming frames for a station that has been deleted ? */
+ if (WARN_ON_ONCE(!sta)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /*
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ if (!is_flush)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ }
+
+ /*
+ * It's possible to get a BA response after invalidating the rcu (rcu is
+ * invalidated in order to prevent new Tx from being sent, but there may
+ * be some frames already in-flight).
+ * In this case we just want to reclaim, and could skip all the
+ * sta-dependent stuff since it's in the middle of being removed
+ * anyways.
+ */
+ if (IS_ERR(sta))
+ goto out;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ tid_data = &mvmsta->tid_data[tid];
+
+ if (tid_data->txq_id != txq) {
+ IWL_ERR(mvm,
+ "invalid reclaim request: Q %d, tid %d\n",
+ tid_data->txq_id, tid);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&mvmsta->lock);
+
+ tid_data->next_reclaimed = index;
+
+ iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+ freed = 0;
+
+ /* pack lq color from tid_data along the reduced txp */
+ tx_info->status.status_driver_data[0] =
+ RS_DRV_DATA_PACK(tid_data->lq_color,
+ tx_info->status.status_driver_data[0]);
+ /* the value is only consumed for old FW that has v1 rates anyway */
+ tx_info->status.status_driver_data[1] =
+ (void *)(uintptr_t)le32_to_cpu(rate);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (!is_flush) {
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+ }
+
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ if (freed == 1) {
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ memcpy(&info->status, &tx_info->status,
+ sizeof(tx_info->status));
+ iwl_mvm_hwrate_to_tx_status(mvm, rate, info);
+ }
+ }
+
+ spin_unlock_bh(&mvmsta->lock);
+
+ /* We got a BA notif with 0 acked or scd_ssn didn't progress which is
+ * possible (i.e. first MPDU in the aggregation wasn't acked)
+ * Still it's important to update RS about sent vs. acked.
+ */
+ if (!is_flush && skb_queue_empty(&reclaimed_skbs) &&
+ !iwl_mvm_has_tlc_offload(mvm)) {
+ struct ieee80211_chanctx_conf *chanctx_conf = NULL;
+
+ /* no TLC offload, so non-MLD mode */
+ if (mvmsta->vif)
+ chanctx_conf =
+ rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
+
+ if (WARN_ON_ONCE(!chanctx_conf))
+ goto out;
+
+ tx_info->band = chanctx_conf->def.chan->band;
+ iwl_mvm_hwrate_to_tx_status(mvm, rate, tx_info);
+
+ IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
+ iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
+ }
+
+out:
+ rcu_read_unlock();
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ skb = __skb_dequeue(&reclaimed_skbs);
+ ieee80211_tx_status_skb(mvm->hw, skb);
+ }
+}
+
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ int sta_id, tid, txq, index;
+ struct ieee80211_tx_info ba_info = {};
+ struct iwl_mvm_ba_notif *ba_notif;
+ struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_sta *mvmsta;
+
+ ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ struct iwl_compressed_ba_notif *ba_res =
+ (void *)pkt->data;
+ u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
+ u16 tfd_cnt;
+ int i;
+
+ if (IWL_FW_CHECK(mvm, sizeof(*ba_res) > pkt_len,
+ "short BA notification (%d)\n", pkt_len))
+ return;
+
+ sta_id = ba_res->sta_id;
+ ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
+ ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
+ ba_info.status.tx_time =
+ (u16)le32_to_cpu(ba_res->wireless_time);
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_res->reduced_txp;
+
+ tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+ if (!tfd_cnt)
+ return;
+
+ if (IWL_FW_CHECK(mvm,
+ struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
+ "short BA notification (tfds:%d, size:%d)\n",
+ tfd_cnt, pkt_len))
+ return;
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ /*
+ * It's possible to get a BA response after invalidating the rcu
+ * (rcu is invalidated in order to prevent new Tx from being
+ * sent, but there may be some frames already in-flight).
+ * In this case we just want to reclaim, and could skip all the
+ * sta-dependent stuff since it's in the middle of being removed
+ * anyways.
+ */
+
+ /* Free per TID */
+ for (i = 0; i < tfd_cnt; i++) {
+ struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+
+ tid = ba_tfd->tid;
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
+ if (mvmsta)
+ mvmsta->tid_data[i].lq_color = lq_color;
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid,
+ (int)(le16_to_cpu(ba_tfd->q_num)),
+ le16_to_cpu(ba_tfd->tfd_index),
+ &ba_info,
+ ba_res->tx_rate, false);
+ }
+
+ if (mvmsta) {
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le32_to_cpu(ba_res->wireless_time));
+
+ iwl_mvm_count_mpdu(mvmsta, sta_id,
+ le16_to_cpu(ba_res->txed), true, 0);
+ }
+ rcu_read_unlock();
+ return;
+ }
+
+ ba_notif = (void *)pkt->data;
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+ /* "flow" corresponds to Tx queue */
+ txq = le16_to_cpu(ba_notif->scd_flow);
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ index = le16_to_cpu(ba_notif->scd_ssn);
+
+ rcu_read_lock();
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (IWL_FW_CHECK(mvm, !mvmsta,
+ "invalid STA ID %d in BA notif\n",
+ sta_id)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ tid_data = &mvmsta->tid_data[tid];
+
+ ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
+ ba_info.status.ampdu_len = ba_notif->txed;
+ ba_info.status.tx_time = tid_data->tx_time;
+ ba_info.status.status_driver_data[0] =
+ (void *)(uintptr_t)ba_notif->reduced_txp;
+
+ rcu_read_unlock();
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+ ba_notif->sta_addr, ba_notif->sta_id);
+
+ IWL_DEBUG_TX_REPLY(mvm,
+ "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+ ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+ le64_to_cpu(ba_notif->bitmap), txq, index,
+ ba_notif->txed, ba_notif->txed_2_done);
+
+ IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+ ba_notif->reduced_txp);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags, false);
+}
+
+/*
+ * Note that there are transports that buffer frames before they reach
+ * the firmware. This means that after flush_tx_path is called, the
+ * queue might not be empty. The race-free way to handle this is to:
+ * 1) set the station as draining
+ * 2) flush the Tx path
+ * 3) wait for the transport queues to be empty
+ */
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
+ .queues_ctl = cpu_to_le32(tfd_msk),
+ .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
+ };
+
+ WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+ ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
+ sizeof(flush_cmd), &flush_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+}
+
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
+{
+ int ret;
+ struct iwl_tx_path_flush_cmd_rsp *rsp;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .sta_id = cpu_to_le32(sta_id),
+ .tid_mask = cpu_to_le16(tids),
+ };
+
+ struct iwl_host_cmd cmd = {
+ .id = TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ };
+
+ WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+ cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+ sta_id, tids);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (ret) {
+ IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+ return ret;
+ }
+
+ if (cmd.flags & CMD_WANT_SKB) {
+ int i;
+ int num_flushed_queues;
+
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ rsp = (void *)cmd.resp_pkt->data;
+
+ if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
+ "sta_id %d != rsp_sta_id %d",
+ sta_id, le16_to_cpu(rsp->sta_id))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+ if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+ "num_flushed_queues %d", num_flushed_queues)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ for (i = 0; i < num_flushed_queues; i++) {
+ struct ieee80211_tx_info tx_info = {};
+ struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+ int tid = le16_to_cpu(queue_info->tid);
+ int read_before = le16_to_cpu(queue_info->read_before_flush);
+ int read_after = le16_to_cpu(queue_info->read_after_flush);
+ int queue_num = le16_to_cpu(queue_info->queue_num);
+
+ if (tid == IWL_MGMT_TID)
+ tid = IWL_MAX_TID_COUNT;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "tid %d queue_id %d read-before %d read-after %d\n",
+ tid, queue_num, read_before, read_after);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
+ &tx_info, 0, true);
+ }
+free_rsp:
+ iwl_free_resp(&cmd);
+ }
+ return ret;
+}
+
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
+{
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
+
+ return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
+}
diff --git a/sys/contrib/dev/iwlwifi/mvm/utils.c b/sys/contrib/dev/iwlwifi/mvm/utils.c
new file mode 100644
index 000000000000..e7e24941db15
--- /dev/null
+++ b/sys/contrib/dev/iwlwifi/mvm/utils.c
@@ -0,0 +1,1416 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ */
+#if defined(__FreeBSD__)
+#include <linux/math64.h>
+#endif
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+#include "fw/api/rs.h"
+#include "fw/img.h"
+
+/*
+ * Will return 0 even if the cmd failed when RFKILL is asserted unless
+ * CMD_WANT_SKB is set in cmd->flags.
+ */
+int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+ if (WARN_ON(mvm->d3_test_active))
+ return -EIO;
+#endif
+
+ /*
+ * Synchronous commands from this op-mode must hold
+ * the mutex, this ensures we don't try to send two
+ * (or more) synchronous commands at a time.
+ */
+ if (!(cmd->flags & CMD_ASYNC))
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+
+ /*
+ * If the caller wants the SKB, then don't hide any problems, the
+ * caller might access the response buffer which will be NULL if
+ * the command failed.
+ */
+ if (cmd->flags & CMD_WANT_SKB)
+ return ret;
+
+ /*
+ * Silently ignore failures if RFKILL is asserted or
+ * we are in suspend\resume process
+ */
+ if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
+ return 0;
+ return ret;
+}
+
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
+ u32 flags, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/*
+ * We assume that the caller set the status to the success value
+ */
+int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
+ u32 *status)
+{
+ struct iwl_rx_packet *pkt;
+ struct iwl_cmd_response *resp;
+ int ret, resp_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+ if (WARN_ON(mvm->d3_test_active))
+ return -EIO;
+#endif
+
+ /*
+ * Only synchronous commands can wait for status,
+ * we use WANT_SKB so the caller can't.
+ */
+ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
+ "cmd flags %x", cmd->flags))
+ return -EINVAL;
+
+ cmd->flags |= CMD_WANT_SKB;
+
+ ret = iwl_trans_send_cmd(mvm->trans, cmd);
+ if (ret == -ERFKILL) {
+ /*
+ * The command failed because of RFKILL, don't update
+ * the status, leave it as success and return 0.
+ */
+ return 0;
+ } else if (ret) {
+ return ret;
+ }
+
+ pkt = cmd->resp_pkt;
+
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ ret = -EIO;
+ goto out_free_resp;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32_to_cpu(resp->status);
+ out_free_resp:
+ iwl_free_resp(cmd);
+ return ret;
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
+ const void *data, u32 *status)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+}
+
+int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band)
+{
+ int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ bool is_LB = band == NL80211_BAND_2GHZ;
+
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
+ return is_LB ? rate + IWL_FIRST_OFDM_RATE :
+ rate;
+
+ /* CCK is not allowed in HB */
+ return is_LB ? rate : -1;
+}
+
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ int band_offset = 0;
+
+ /* Legacy rate format, search for match in table */
+ if (band != NL80211_BAND_2GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - band_offset;
+
+ return -1;
+}
+
+u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
+{
+ return (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE :
+ rate_idx);
+}
+
+u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
+{
+ static const u8 mac80211_ac_to_ucode_ac[] = {
+ AC_VO,
+ AC_VI,
+ AC_BE,
+ AC_BK
+ };
+
+ return mac80211_ac_to_ucode_ac[ac];
+}
+
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+ IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+ le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+ IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+ le16_to_cpu(err_resp->bad_cmd_seq_num),
+ le32_to_cpu(err_resp->error_service));
+ IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
+ le64_to_cpu(err_resp->timestamp));
+}
+
+/*
+ * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
+ * The parameter should also be a combination of ANT_[ABC].
+ */
+u8 first_antenna(u8 mask)
+{
+ BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+ if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
+ return BIT(0);
+ return BIT(ffs(mask) - 1);
+}
+
+#define MAX_ANT_NUM 2
+/*
+ * Toggles between TX antennas to send the probe request on.
+ * Receives the bitmask of valid TX antennas and the *index* used
+ * for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
+{
+ u8 ind = last_idx;
+ int i;
+
+ for (i = 0; i < MAX_ANT_NUM; i++) {
+ ind = (ind + 1) % MAX_ANT_NUM;
+ if (valid & BIT(ind))
+ return ind;
+ }
+
+ WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+ return last_idx;
+}
+
+/**
+ * iwl_mvm_send_lq_cmd() - Send link quality command
+ * @mvm: Driver data.
+ * @lq: Link quality command to send.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ *
+ * Returns: an error code indicating success or failure
+ */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
+{
+ struct iwl_host_cmd cmd = {
+ .id = LQ_CMD,
+ .len = { sizeof(struct iwl_lq_cmd), },
+ .flags = CMD_ASYNC,
+ .data = { lq, },
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STA ||
+ iwl_mvm_has_tlc_offload(mvm)))
+ return -EINVAL;
+
+ return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/**
+ * iwl_mvm_update_smps - Get a request to change the SMPS mode
+ * @mvm: Driver data.
+ * @vif: Pointer to the ieee80211_vif structure
+ * @req_type: The part of the driver who call for a change.
+ * @smps_request: The request to change the SMPS mode.
+ * @link_id: for MLO link_id, otherwise 0 (deflink)
+ *
+ * Get a requst to change the SMPS mode,
+ * and change it according to all other requests in the driver.
+ */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request,
+ unsigned int link_id)
+{
+ struct iwl_mvm_vif *mvmvif;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
+ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ /* SMPS is handled by firmware */
+ if (iwl_mvm_has_rlc_offload(mvm))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (WARN_ON_ONCE(!mvmvif->link[link_id]))
+ return;
+
+ mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (mvmvif->link[link_id]->smps_requests[i] ==
+ IEEE80211_SMPS_STATIC) {
+ smps_mode = IEEE80211_SMPS_STATIC;
+ break;
+ }
+ if (mvmvif->link[link_id]->smps_requests[i] ==
+ IEEE80211_SMPS_DYNAMIC)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ }
+
+ /* SMPS is disabled in eSR */
+ if (mvmvif->esr_active)
+ smps_mode = IEEE80211_SMPS_OFF;
+
+ ieee80211_request_smps(vif, link_id, smps_mode);
+}
+
+void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ enum iwl_mvm_smps_type_request req_type,
+ enum ieee80211_smps_mode smps_request)
+{
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id)
+ iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
+ link_id);
+ rcu_read_unlock();
+}
+
+static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
+
+ return true;
+}
+
+#define PERIODIC_STAT_RATE 5
+
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
+{
+ u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+ u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+ struct iwl_system_statistics_cmd system_cmd = {
+ .cfg_mask = cpu_to_le32(flags),
+ .config_time_sec = cpu_to_le32(enable ?
+ PERIODIC_STAT_RATE : 0),
+ .type_id_mask = cpu_to_le32(type),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ 0, sizeof(system_cmd), &system_cmd);
+}
+
+static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
+ u8 cmd_ver)
+{
+ struct iwl_system_statistics_cmd system_cmd = {
+ .cfg_mask = clear ?
+ cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
+ cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
+ IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
+ .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
+ .len[0] = sizeof(system_cmd),
+ .data[0] = &system_cmd,
+ };
+ struct iwl_notification_wait stats_wait;
+ static const u16 stats_complete[] = {
+ WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
+ };
+ int ret;
+
+ if (cmd_ver != 1) {
+ IWL_FW_CHECK_FAILED(mvm,
+ "Invalid system statistics command version:%d\n",
+ cmd_ver);
+ return -EOPNOTSUPP;
+ }
+
+ iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
+ stats_complete, ARRAY_SIZE(stats_complete),
+ NULL, NULL);
+
+ mvm->statistics_clear = clear;
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &stats_wait);
+ return ret;
+ }
+
+ /* 500ms for OPERATIONAL, PART1 and END notification should be enough
+ * for FW to collect data from all LMACs and send
+ * STATISTICS_NOTIFICATION to host
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
+ if (ret)
+ return ret;
+
+ if (clear)
+ iwl_mvm_accu_radio_stats(mvm);
+
+ return ret;
+}
+
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
+{
+ struct iwl_statistics_cmd scmd = {
+ .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
+ };
+
+ struct iwl_host_cmd cmd = {
+ .id = STATISTICS_CMD,
+ .len[0] = sizeof(scmd),
+ .data[0] = &scmd,
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+ int ret;
+
+ /*
+ * Don't request statistics during restart, they'll not have any useful
+ * information right after restart, nor is clearing needed
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
+
+ /* From version 15 - STATISTICS_NOTIFICATION, the reply for
+ * STATISTICS_CMD is empty, and the response is with
+ * STATISTICS_NOTIFICATION notification
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ STATISTICS_NOTIFICATION, 0) < 15) {
+ cmd.flags = CMD_WANT_SKB;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret)
+ return ret;
+
+ iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+ iwl_free_resp(&cmd);
+ } else {
+ struct iwl_notification_wait stats_wait;
+ static const u16 stats_complete[] = {
+ STATISTICS_NOTIFICATION,
+ };
+
+ iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
+ stats_complete, ARRAY_SIZE(stats_complete),
+ iwl_wait_stats_complete, NULL);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &stats_wait);
+ return ret;
+ }
+
+ /* 200ms should be enough for FW to collect data from all
+ * LMACs and send STATISTICS_NOTIFICATION to host
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
+ if (ret)
+ return ret;
+ }
+
+ if (clear)
+ iwl_mvm_accu_radio_stats(mvm);
+
+ return 0;
+}
+
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+{
+ mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+ mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+ mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+ mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+}
+
+struct iwl_mvm_diversity_iter_data {
+ struct iwl_mvm_phy_ctxt *ctxt;
+ bool result;
+};
+
+static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_diversity_iter_data *data = _data;
+ int i, link_id;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+
+ if (link_info->phy_ctxt != data->ctxt)
+ continue;
+
+ for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+ if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+ link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
+ data->result = false;
+ break;
+ }
+ }
+ }
+}
+
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
+ struct iwl_mvm_phy_ctxt *ctxt)
+{
+ struct iwl_mvm_diversity_iter_data data = {
+ .ctxt = ctxt,
+ .result = true,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ return false;
+
+ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
+ return false;
+
+ if (mvm->cfg->rx_with_siso_diversity)
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_diversity_iter, &data);
+
+ return data.result;
+}
+
+void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
+ bool low_latency, u16 mac_id)
+{
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mac_id)
+ };
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+ return;
+
+ if (low_latency) {
+ /* currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+
+ if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
+ 0, sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send low latency command\n");
+}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mvm_low_latency_cause cause)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int res;
+ bool prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ prev = iwl_mvm_vif_low_latency(mvmvif);
+ iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
+
+ low_latency = iwl_mvm_vif_low_latency(mvmvif);
+
+ if (low_latency == prev)
+ return 0;
+
+ iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
+
+ res = iwl_mvm_update_quotas(mvm, false, NULL);
+ if (res)
+ return res;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ return iwl_mvm_power_update_mac(mvm);
+}
+
+struct iwl_mvm_low_latency_iter {
+ bool result;
+ bool result_per_band[NUM_NL80211_BANDS];
+};
+
+static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_low_latency_iter *result = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ enum nl80211_band band;
+
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ result->result = true;
+
+ if (!mvmvif->deflink.phy_ctxt)
+ return;
+
+ band = mvmvif->deflink.phy_ctxt->channel->band;
+ result->result_per_band[band] = true;
+ }
+}
+
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_low_latency_iter data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &data);
+
+ return data.result;
+}
+
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+ struct iwl_mvm_low_latency_iter data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_ll_iter, &data);
+
+ return data.result_per_band[band];
+}
+
+struct iwl_bss_iter_data {
+ struct ieee80211_vif *vif;
+ bool error;
+};
+
+static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bss_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return;
+
+ if (data->vif) {
+ data->error = true;
+ return;
+ }
+
+ data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
+{
+ struct iwl_bss_iter_data bss_iter_data = {};
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bss_iface_iterator, &bss_iter_data);
+
+ if (bss_iter_data.error)
+ return ERR_PTR(-EINVAL);
+
+ return bss_iter_data.vif;
+}
+
+struct iwl_bss_find_iter_data {
+ struct ieee80211_vif *vif;
+ u32 macid;
+};
+
+static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bss_find_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->id == data->macid)
+ data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
+{
+ struct iwl_bss_find_iter_data data = {
+ .macid = macid,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bss_find_iface_iterator, &data);
+
+ return data.vif;
+}
+
+struct iwl_sta_iter_data {
+ bool assoc;
+};
+
+static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_sta_iter_data *data = _data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (vif->cfg.assoc)
+ data->assoc = true;
+}
+
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
+{
+ struct iwl_sta_iter_data data = {
+ .assoc = false,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_sta_iface_iterator,
+ &data);
+ return data.assoc;
+}
+
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ unsigned int default_timeout =
+ mvm->trans->mac_cfg->base->wd_timeout;
+
+ /*
+ * We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+ vif->type == NL80211_IFTYPE_AP)
+ return IWL_WATCHDOG_DISABLED;
+ return default_timeout;
+}
+
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_MLME);
+ if (!trig)
+ goto out;
+
+ trig_mlme = (void *)trig->data;
+
+ if (trig_mlme->stop_connection_loss &&
+ --trig_mlme->stop_connection_loss)
+ goto out;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
+
+out:
+ ieee80211_connection_loss(vif);
+}
+
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_sta *sta,
+ u16 tid)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
+ FW_DBG_TRIGGER_BA);
+ if (!trig)
+ return;
+
+ ba_trig = (void *)trig->data;
+
+ if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Frame from %pM timed out, tid %d",
+ sta->addr, tid);
+}
+
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
+{
+ if (!elapsed)
+ return 0;
+
+ return (100 * airtime / elapsed) / USEC_PER_MSEC;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
+{
+ u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
+
+ if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
+ return IWL_MVM_TRAFFIC_HIGH;
+ if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
+ return IWL_MVM_TRAFFIC_MEDIUM;
+
+ return IWL_MVM_TRAFFIC_LOW;
+}
+
+static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
+
+ if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
+ return;
+
+ low_latency = mvm->tcm.result.low_latency[mvmvif->id];
+
+ if (!mvm->tcm.result.change[mvmvif->id] &&
+ prev == low_latency) {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ return;
+ }
+
+ if (prev != low_latency) {
+ /* this sends traffic load and updates quota as well */
+ iwl_mvm_update_low_latency(mvm, vif, low_latency,
+ LOW_LATENCY_TRAFFIC);
+ } else {
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+}
+
+static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
+{
+ guard(mvm)(mvm);
+
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iter, mvm);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ iwl_mvm_config_scan(mvm);
+}
+
+static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+
+ mvmvif = container_of(wk, struct iwl_mvm_vif,
+ uapsd_nonagg_detected_wk.work);
+ vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+ mvm = mvmvif->mvm;
+
+ if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
+ return;
+
+ /* remember that this AP is broken */
+ memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
+ vif->bss_conf.bssid, ETH_ALEN);
+ mvm->uapsd_noagg_bssid_write_idx++;
+ if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
+ mvm->uapsd_noagg_bssid_write_idx = 0;
+
+ iwl_mvm_connection_loss(mvm, vif,
+ "AP isn't using AMPDU with uAPSD enabled");
+}
+
+static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!vif->cfg.assoc)
+ return;
+
+ if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
+ !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
+ !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
+ !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
+ return;
+
+ if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
+ return;
+
+ mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
+ IWL_INFO(mvm,
+ "detected AP should do aggregation but isn't, likely due to U-APSD\n");
+ schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
+ 15 * HZ);
+}
+
+static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
+ unsigned int elapsed,
+ int mac)
+{
+ u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
+ u64 tpt;
+ unsigned long rate;
+ struct ieee80211_vif *vif;
+
+ rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
+
+ if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
+ mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
+ return;
+
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ tpt = 8 * bytes; /* kbps */
+ do_div(tpt, elapsed);
+ rate *= 1000; /* kbps */
+ if (tpt < 22 * rate / 100)
+ return;
+ } else {
+ /*
+ * the rate here is actually the threshold, in 100Kbps units,
+ * so do the needed conversion from bytes to 100Kbps:
+ * 100kb = bits / (100 * 1000),
+ * 100kbps = 100kb / (msecs / 1000) ==
+ * (bits / (100 * 1000)) / (msecs / 1000) ==
+ * bits / (100 * msecs)
+ */
+ tpt = (8 * bytes);
+ do_div(tpt, elapsed * 100);
+ if (tpt < rate)
+ return;
+ }
+
+ rcu_read_lock();
+ vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
+ if (vif)
+ iwl_mvm_uapsd_agg_disconnect(mvm, vif);
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 *band = _data;
+
+ if (!mvmvif->deflink.phy_ctxt)
+ return;
+
+ band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
+}
+
+static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
+ unsigned long ts,
+ bool handle_uapsd)
+{
+ unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
+ unsigned int uapsd_elapsed =
+ jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
+ u32 total_airtime = 0;
+ u32 band_airtime[NUM_NL80211_BANDS] = {0};
+ u32 band[NUM_MAC_INDEX_DRIVER] = {0};
+ int ac, mac, i;
+ bool low_latency = false;
+ enum iwl_mvm_traffic_load load, band_load;
+ bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
+
+ if (handle_ll)
+ mvm->tcm.ll_ts = ts;
+ if (handle_uapsd)
+ mvm->tcm.uapsd_nonagg_ts = ts;
+
+ mvm->tcm.result.elapsed = elapsed;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_tcm_iterator,
+ &band);
+
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+ u32 vo_vi_pkts = 0;
+ u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
+
+ total_airtime += airtime;
+ band_airtime[band[mac]] += airtime;
+
+ load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
+ mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
+ mvm->tcm.result.load[mac] = load;
+ mvm->tcm.result.airtime[mac] = airtime;
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
+ vo_vi_pkts += mdata->rx.pkts[ac] +
+ mdata->tx.pkts[ac];
+
+ /* enable immediately with enough packets but defer disabling */
+ if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
+ mvm->tcm.result.low_latency[mac] = true;
+ else if (handle_ll)
+ mvm->tcm.result.low_latency[mac] = false;
+
+ if (handle_ll) {
+ /* clear old data */
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ }
+ low_latency |= mvm->tcm.result.low_latency[mac];
+
+ if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
+ iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
+ mac);
+ /* clear old data */
+ if (handle_uapsd)
+ mdata->uapsd_nonagg_detect.rx_bytes = 0;
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+ }
+
+ load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
+ mvm->tcm.result.global_load = load;
+
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
+ mvm->tcm.result.band_load[i] = band_load;
+ }
+
+ /*
+ * If the current load isn't low we need to force re-evaluation
+ * in the TCM period, so that we can return to low load if there
+ * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
+ * triggered by traffic).
+ */
+ if (load != IWL_MVM_TRAFFIC_LOW)
+ return MVM_TCM_PERIOD;
+ /*
+ * If low-latency is active we need to force re-evaluation after
+ * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
+ * when there's no traffic at all.
+ */
+ if (low_latency)
+ return MVM_LL_PERIOD;
+ /*
+ * Otherwise, we don't need to run the work struct because we're
+ * in the default "idle" state - traffic indication is low (which
+ * also covers the "no traffic" case) and low-latency is disabled
+ * so there's no state that may need to be disabled when there's
+ * no traffic at all.
+ *
+ * Note that this has no impact on the regular scheduling of the
+ * updates triggered by traffic - those happen whenever one of the
+ * two timeouts expire (if there's traffic at all.)
+ */
+ return 0;
+}
+
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
+{
+ unsigned long ts = jiffies;
+ bool handle_uapsd =
+ time_after(ts, mvm->tcm.uapsd_nonagg_ts +
+ msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
+
+ spin_lock(&mvm->tcm.lock);
+ if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ spin_unlock(&mvm->tcm.lock);
+ return;
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
+ guard(mvm)(mvm);
+ if (iwl_mvm_request_statistics(mvm, true))
+ handle_uapsd = false;
+ }
+
+ spin_lock(&mvm->tcm.lock);
+ /* re-check if somebody else won the recheck race */
+ if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+ /* calculate statistics */
+ unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
+ handle_uapsd);
+
+ /* the memset needs to be visible before the timestamp */
+ smp_mb();
+ mvm->tcm.ts = ts;
+ if (work_delay)
+ schedule_delayed_work(&mvm->tcm.work, work_delay);
+ }
+ spin_unlock(&mvm->tcm.lock);
+
+ iwl_mvm_tcm_results(mvm);
+}
+
+void iwl_mvm_tcm_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ tcm.work);
+
+ iwl_mvm_recalc_tcm(mvm);
+}
+
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
+{
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.paused = true;
+ spin_unlock_bh(&mvm->tcm.lock);
+ if (with_cancel)
+ cancel_delayed_work_sync(&mvm->tcm.work);
+}
+
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
+{
+ int mac;
+ bool low_latency = false;
+
+ spin_lock_bh(&mvm->tcm.lock);
+ mvm->tcm.ts = jiffies;
+ mvm->tcm.ll_ts = jiffies;
+ for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+ memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+ memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+ memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+
+ if (mvm->tcm.result.low_latency[mac])
+ low_latency = true;
+ }
+ /* The TCM data needs to be reset before "paused" flag changes */
+ smp_mb();
+ mvm->tcm.paused = false;
+
+ /*
+ * if the current load is not low or low latency is active, force
+ * re-evaluation to cover the case of no traffic.
+ */
+ if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
+ schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
+ else if (low_latency)
+ schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
+
+ spin_unlock_bh(&mvm->tcm.lock);
+}
+
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
+ iwl_mvm_tcm_uapsd_nonagg_detected_wk);
+}
+
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
+}
+
+u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
+{
+ u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
+
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->mac_cfg->base->gp2_reg_addr)
+ reg_addr = mvm->trans->mac_cfg->base->gp2_reg_addr;
+
+ return iwl_read_prph(mvm->trans, reg_addr);
+}
+
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
+ u32 *gp2, u64 *boottime, ktime_t *realtime)
+{
+ bool ps_disabled;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Disable power save when reading GP2 */
+ ps_disabled = mvm->ps_disabled;
+ if (!ps_disabled) {
+ mvm->ps_disabled = true;
+ iwl_mvm_power_update_device(mvm);
+ }
+
+ *gp2 = iwl_mvm_get_systime(mvm);
+
+ if (clock_type == CLOCK_BOOTTIME && boottime)
+ *boottime = ktime_get_boottime_ns();
+ else if (clock_type == CLOCK_REALTIME && realtime)
+ *realtime = ktime_get_real();
+
+ if (!ps_disabled) {
+ mvm->ps_disabled = ps_disabled;
+ iwl_mvm_power_update_device(mvm);
+ }
+}
+
+/* Find if at least two links from different vifs use same channel
+ * FIXME: consider having a refcount array in struct iwl_mvm_vif for
+ * used phy_ctxt ids.
+ */
+bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
+ struct iwl_mvm_vif *vif2)
+{
+ unsigned int i, j;
+
+ for_each_mvm_vif_valid_link(vif1, i) {
+ for_each_mvm_vif_valid_link(vif2, j) {
+ if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
+u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver)
+{
+ u32 rate_v3 = 0, rate_v1;
+ u32 dup = 0;
+
+ if (rate_ver > 1)
+ return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3);
+
+ rate_v1 = le32_to_cpu(rate);
+ if (rate_v1 == 0)
+ return rate_v1;
+ /* convert rate */
+ if (rate_v1 & RATE_MCS_HT_MSK_V1) {
+ u32 nss;
+
+ rate_v3 |= RATE_MCS_MOD_TYPE_HT;
+ rate_v3 |=
+ rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
+ nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK);
+ rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+ } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
+ rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK);
+
+ rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
+
+ rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
+ if (rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
+ u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
+ RATE_MCS_HE_106T_POS_V1;
+ u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
+ RATE_MCS_HE_GI_LTF_POS;
+
+ if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
+ he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
+ he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
+ /* the new rate have an additional bit to
+ * represent the value 4 rather then using SGI
+ * bit for this purpose - as it was done in the
+ * old rate
+ */
+ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
+ RATE_MCS_SGI_POS_V1;
+
+ rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
+ rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS;
+ rate_v3 |= he_106t << RATE_MCS_HE_106T_POS;
+ rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
+ rate_v3 |= RATE_MCS_MOD_TYPE_HE;
+ } else {
+ rate_v3 |= RATE_MCS_MOD_TYPE_VHT;
+ }
+ /* if legacy format */
+ } else {
+ u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
+
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
+ rate_v3 |= legacy_rate;
+ if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
+ rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
+ }
+
+ /* convert flags */
+ if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
+ rate_v3 |= RATE_MCS_LDPC_MSK;
+ rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate_v1 & RATE_MCS_ANT_AB_MSK) |
+ (rate_v1 & RATE_MCS_STBC_MSK) |
+ (rate_v1 & RATE_MCS_BF_MSK);
+
+ dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
+ if (dup) {
+ rate_v3 |= RATE_MCS_DUP_MSK;
+ rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS;
+ }
+
+ if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
+ (rate_v1 & RATE_MCS_SGI_MSK_V1))
+ rate_v3 |= RATE_MCS_SGI_MSK;
+
+ return rate_v3;
+}
+
+__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver)
+{
+ u32 result = 0;
+ int rate_idx;
+
+ if (rate_ver > 1)
+ return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2);
+
+ switch (rate & RATE_MCS_MOD_TYPE_MSK) {
+ case RATE_MCS_MOD_TYPE_CCK:
+ result = RATE_MCS_CCK_MSK_V1;
+ fallthrough;
+ case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
+ rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK);
+ if (!(result & RATE_MCS_CCK_MSK_V1))
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx),
+ RATE_LEGACY_RATE_MSK_V1);
+ break;
+ case RATE_MCS_MOD_TYPE_HT:
+ result = RATE_MCS_HT_MSK_V1;
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_HT_MCS_CODE_MSK),
+ RATE_HT_MCS_RATE_CODE_MSK_V1);
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_MCS_NSS_MSK),
+ RATE_HT_MCS_MIMO2_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
+ result = RATE_MCS_VHT_MSK_V1;
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_VHT_MCS_NSS_MSK),
+ RATE_MCS_CODE_MSK);
+ result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
+ RATE_VHT_MCS_NSS_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_HE: /* not generated */
+ default:
+ WARN_ONCE(1, "bad modulation type %d\n",
+ u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK));
+ return 0;
+ }
+
+ if (rate & RATE_MCS_LDPC_MSK)
+ result |= RATE_MCS_LDPC_MSK_V1;
+ WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) >
+ RATE_MCS_CHAN_WIDTH_160_VAL);
+ result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate & RATE_MCS_ANT_AB_MSK) |
+ (rate & RATE_MCS_STBC_MSK) |
+ (rate & RATE_MCS_BF_MSK);
+
+ /* not handling DUP since we don't use it */
+ WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK);
+
+ if (rate & RATE_MCS_SGI_MSK)
+ result |= RATE_MCS_SGI_MSK_V1;
+
+ return cpu_to_le32(result);
+}
+
+bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
+{
+ unsigned int i;
+
+ /* FIXME: can it fail when phy_ctxt is assigned? */
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ if (mvmvif->link[i]->phy_ctxt &&
+ mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
+ return true;
+ }
+
+ return false;
+}